diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..47530b9c8dfd5ee5215f07f03fe81d1ab65cc1dd Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/_compatibility.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/_compatibility.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f701ee722b57693d210a2f9e9ed8e7c6d6d7a1e6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/_compatibility.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/_symbolic_trace.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/_symbolic_trace.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..79887e635603aa856f3849f76e55518e81cebb7e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/_symbolic_trace.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/annotate.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/annotate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ff0e31b4ec56e5f6ffd3f9cf5cc81f5fbac4bf9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/annotate.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/config.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7da0418ebc89b2590b18b23d9403e4d87f330935 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/config.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/graph.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/graph.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..364e8480e3c049da0d97e70420741ed0a8a00c01 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/graph.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/interpreter.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/interpreter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c73c08b99c2ad333b6b1994ed8c80361162db51 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/interpreter.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/node.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/node.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e4c931c8657c78d85f6cef80edbf5a8505ec2f13 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/node.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/operator_schemas.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/operator_schemas.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba411d93a7d84c830080a33cd25b57e95a4d0ff5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/operator_schemas.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/subgraph_rewriter.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/subgraph_rewriter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da481ffb7c41e28e00b118f52f45638c61c41974 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/subgraph_rewriter.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/tensor_type.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/tensor_type.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7fbbf3c346feccd5d901757173b9372f0a7f8c5c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/tensor_type.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/traceback.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/traceback.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..254b7a48b0c4f259c0325ab383e457c63616bfbe Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/__pycache__/traceback.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/_sym_dispatch_mode.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/_sym_dispatch_mode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..40a33594ea9b3f628653841c13f26d08b064790c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/_sym_dispatch_mode.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/accelerator_partitioner.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/accelerator_partitioner.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..568ac29884a783ec4311c4c728539bea15241974 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/accelerator_partitioner.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/debug.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/debug.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6233fd2f91c01d40634a69d6cead3483cf3e8ef1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/debug.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/merge_matmul.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/merge_matmul.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d3e3bae489426fb659930d643152129df828e8e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/merge_matmul.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/meta_tracer.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/meta_tracer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4d2eb2664436170a530e6e7d3e44225fcc440f1f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/meta_tracer.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/proxy_tensor.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/proxy_tensor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a40a40a9f8c76600a9556f8f66f2a7c3d7c7f69e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/proxy_tensor.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/recording.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/recording.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2fb39c6d2aa79b66293e4e9a6dc93691040c393 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/recording.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/sym_node.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/sym_node.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..adce4e8ccf2b7cc07463d9fffcd86486521d6991 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/sym_node.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/_config.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/_config.py new file mode 100644 index 0000000000000000000000000000000000000000..0f2bffa475f489ee1a57ccda24ce8a9610ad0ea5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/_config.py @@ -0,0 +1,42 @@ +import os +import sys + +# [@compile_ignored: debug] Uses z3 for validating the guard optimizations transformations. +translation_validation = ( + os.environ.get("TORCHDYNAMO_TRANSLATION_VALIDATION", "0") == "1" +) +# Timeout (in milliseconds) for z3 finding a solution. +# [@compile_ignored: debug] +translation_validation_timeout = int( + os.environ.get("TORCHDYNAMO_TRANSLATION_VALIDATION_TIMEOUT", "600000") +) +# Disables bisection for translation validation. +# +# Translation validation bisection is enabled by default, if translation validation +# is also enabled. This should help finding guard simplification issues. However, +# since validation uses Z3 for bisecting, it might take a lot of time. +# +# Set this configuration option so as to avoid bisecting. +# [@compile_ignored: debug] +translation_validation_no_bisect = ( + os.environ.get("TORCHDYNAMO_TRANSLATION_NO_BISECT", "0") == "1" +) +# Checks whether replaying ShapeEnv events on a freshly constructed one yields +# the a ShapeEnv with the same state. This should be used only in testing. +check_shape_env_recorded_events = False + + +# [@compile_ignored: debug] Show a warning for every specialization +print_specializations = False + +# wraps (un)equalities with 'Not' class after recording the correct expression +# in the FX graph. This should incorrectly construct the divisible and replacement +# lists, and incorrectly issue guards. +inject_EVALUATE_EXPR_flip_equality_TESTING_ONLY = False + +# [@compile_ignored: debug] Validate that ShapeEnv's version key is updated correctly +validate_shape_env_verison_key = False + +from torch.utils._config_module import install_config_module + +install_config_module(sys.modules[__name__]) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/_sym_dispatch_mode.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/_sym_dispatch_mode.py new file mode 100644 index 0000000000000000000000000000000000000000..52fa6221c3de81de72e6871e03e28ebb40925943 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/_sym_dispatch_mode.py @@ -0,0 +1,58 @@ +from typing import List, Type + +__all__ = ["SymDispatchMode", "handle_sym_dispatch", "sym_function_mode"] + +SYM_FUNCTION_MODE = None + + +# SymDispatchMode gets invoked whenever an operation is processed on +# a PySymInt. When this occurs, you get called at __sym_dispatch__ +# with the operation in question. This is symmetric to TorchDispatchMode +# but with some caveats: +# +# - In TorchDispatchMode, you get the same arguments as what a user +# invoked your API with; e.g., if you call torch.ops.aten.foo(a, b), +# you get (a, b) as args to your call. In SymDispatchMode, if +# you call a + b (where a and b are SymInts), you will get +# (a.node, b.node) as your args (these are PySymInts) +# +# - SymInt/PySymInt don't have FX proxy support (unlike, e.g., Tensor). +# So you have to manually call Tracer/create_node to write into +# the graph. See ProxySymDispatchMode for an example +# +class SymDispatchMode: + def __sym_dispatch__(self, func, types, args, kwargs): + raise NotImplementedError() + + def __enter__(self): + global SYM_FUNCTION_MODE + old = SYM_FUNCTION_MODE + if hasattr(self, "inner"): + raise RuntimeError( + f"{self} has already been used as a mode. Please use a fresh version" + ) + else: + self.inner = old + SYM_FUNCTION_MODE = self + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + global SYM_FUNCTION_MODE + SYM_FUNCTION_MODE = self.inner + + +def handle_sym_dispatch(func, args, kwargs): + global SYM_FUNCTION_MODE + mode = sym_function_mode() + assert mode + SYM_FUNCTION_MODE = mode.inner + try: + # TODO: properly compute types + types: List[Type] = [] + return mode.__sym_dispatch__(func, types, args, kwargs) + finally: + SYM_FUNCTION_MODE = mode + + +def sym_function_mode(): + return SYM_FUNCTION_MODE diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/accelerator_partitioner.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/accelerator_partitioner.py new file mode 100644 index 0000000000000000000000000000000000000000..a09269eff510f56274cd99ec14b958091519c7ac --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/accelerator_partitioner.py @@ -0,0 +1,1078 @@ +import operator +from collections import deque +from typing import Dict, List, Set, NamedTuple, Tuple, Deque + +import torch +from torch.fx.passes.graph_manipulation import get_size_of_all_nodes +from torch.fx.experimental.partitioner_utils import ( + Partition, + Device, + PartitionerConfig, + get_partition_to_latency_mapping, + get_latency_of_partitioned_graph, + NodeLatency, + get_extra_size_of, + PartitionMode, +) +from torch.fx.graph_module import GraphModule +from torch.fx.node import Node, map_arg +from torch.fx.passes.split_module import split_module + + +class DAGNode: + """DAGNode class maintains useful information for a partition (submodule), + and its input submodules and output submodules. + """ + + def __init__( + self, + submodule_node: Node, + input_nodes: List[Node], + output_nodes: List[Node], + logical_device_ids: List[int], + size_bytes: int, + ) -> None: + self.submodule_node: Node = submodule_node + self.input_nodes: List[Node] = input_nodes + self.output_nodes: List[Node] = output_nodes + self.logical_device_ids: List[int] = logical_device_ids + self.size_bytes = size_bytes + + def __str__(self) -> str: + return str(self.submodule_node) + + +class DAG: + """DAG class contains all the DAG nodes""" + + def __init__(self) -> None: + self.nodes: List[DAGNode] = [] + + def create_node( + self, + submodule_node: Node, + input_nodes: List[Node], + output_nodes: List[Node], + logical_devices: List[int], + size_bytes: int, + ) -> None: + node = DAGNode( + submodule_node, input_nodes, output_nodes, logical_devices, size_bytes + ) + self.nodes.append(node) + + +class PartitionResult(NamedTuple): + """NameTuple used for returning DAG and a new fx module""" + + dag: DAG + module_with_submodules: GraphModule + + +"""Followings are some helper functions for partition manipulation""" + + +def reset_partition_device(partitions): + for partition in partitions: + partition.logical_device_ids = [] + + +def combine_two_partitions( + partition_0: Partition, partition_1: Partition, partitions: List[Partition] +) -> None: + """Given a list of partitions and its two partitions, + combine these two partitions into a new one appending to the partitions + and remove the previous two partitions from the list of partitions + """ + partition = Partition(len(partitions)) + partition.nodes = partition_0.nodes.union(partition_1.nodes) + partition.recalculate_mem_size() + partitions.append(partition) + partitions.remove(partition_0) + partitions.remove(partition_1) + reorganize_partitions(partitions) + return + + +def set_parents_and_children(partitions: List[Partition]) -> None: + """Given a list of partitions, mark parents and children for each partition""" + # Go through all nodes in a partition. + # If a node's user is in other partition, + # then the other partition is this partition's children. + # This partition is the other partition's parent + for partition in partitions: + partition.children = set() + partition.parents = set() + for partition in partitions: + for node in partition.nodes: + # For each node in the current partition, find its users + users = node.users + for n in users: + # Find which the partition the user node belongs to. + # Note that if the node itself is also belongs to that partition, + # that partition is not the child of the current partition + for p in partitions: + if p != partition and n in p.nodes and node not in p.nodes: + partition.children.add(p) + p.parents.add(partition) + return + + +def reorganize_partitions(partitions: List[Partition]) -> None: + """Given a list of partitions, reorganize partition id, + its parents and its children for each partition + """ + # Rearrange partition ids + for i, partition in enumerate(partitions): + partition.partition_id = i + set_parents_and_children(partitions) + return + + +def get_bfs_level_partition(partitions: List[Partition]) -> None: + """Given a list of partitions, + mark the bfs level for each partition + """ + current_level: Set[Partition] = set() + visited: Set[Partition] = set() + for partition in partitions: + # If a partition has no parent, it should be in root level + if len(partition.parents) == 0: + current_level.add(partition) + next_level: Set[Partition] = set() + level = 0 + # bfs + while current_level: + partition = current_level.pop() + partition.bfs_level = level + visited.add(partition) + children = partition.children + for child in children: + if child not in next_level: + next_level.add(child) + if not current_level: + current_level = next_level.copy() + next_level = set() + level += 1 + return + + +def get_node_to_partition_mapping(partitions: List[Partition]) -> Dict[Node, int]: + """Given a list of partitions,return node to partition mapping""" + node_to_partition: Dict[Node, int] = {} + for partition in partitions: + for node in partition.nodes: + node_to_partition[node] = partition.partition_id + return node_to_partition + + +def get_logical_id_to_device(devices: List[Device]) -> Dict[int, Device]: + """Get a mapping from device logical ID to Device object.""" + logical_id_to_device: Dict[int, Device] = {} + for d in devices: + logical_id_to_device[d.logical_id] = d + return logical_id_to_device + + +def get_device_partition_stats( + partitions: List[Partition], devices: List[Device] +) -> Tuple[Dict[Device, List[Partition]], Dict[Device, int], List[Partition]]: + """Given a list of partitions and a list of devices, returns: + 1. A mapping from device to partitions on it; + 2. A mapping from device to its remaining memory size; + 3. A list of partitions that do not have a device. + """ + # logical id to device + logical_id_to_device = get_logical_id_to_device(devices) + # Track partitions on device + device_to_partitions: Dict[Device, List[Partition]] = {} + # Track device's left mem size + device_to_left_mem_bytes: Dict[Device, int] = {} + for d in devices: + device_to_partitions[d] = [] + device_to_left_mem_bytes[d] = d.available_mem_bytes + + # Deal with the partitions that already have a device + # and also collect all partitions without a device (no_device_partitions) + no_device_partitions = [] + for partition in partitions: + if partition.logical_device_ids != []: + for logical_id in partition.logical_device_ids: + device = logical_id_to_device[logical_id] + device_to_partitions[device].append(partition) + device_to_left_mem_bytes[device] -= partition.used_mem_bytes + else: + no_device_partitions.append(partition) + + return ( + device_to_partitions, + device_to_left_mem_bytes, + no_device_partitions, + ) + + +def get_device_to_partitions_mapping( + partitions: List[Partition], devices: List[Device] +): + """Given a list of partitions and a list of devices, + map each partition into a device. + """ + + def calculate_extra_mem_bytes_needed_for( + partition: Partition, partitions: List[Partition] + ): + all_nodes: Set[Node] = set() + for p in partitions: + all_nodes = all_nodes.union(p.nodes) + if len(all_nodes) == 0: + return partition.used_mem_bytes + all_nodes = all_nodes.union(partition.nodes) + extra_size_needed = 0 + for node in partition.nodes: + extra_size_needed += get_extra_size_of(node, all_nodes) + return extra_size_needed + + def find_device_for(partition: Partition): + """Given a partition, find a logical device for the partition + The algorithm is to put the partition on the device + that has just enough mem left for that partition. + device_to_left_mem_bytes is a dictionary between device and its left mem size + sorted by its left mem size + """ + for d in device_to_left_mem_bytes: + extra_size_needed = calculate_extra_mem_bytes_needed_for( + partition, device_to_partitions[d] + ) + if extra_size_needed < device_to_left_mem_bytes[d]: + device_to_partitions[d].append(partition) + partition.logical_device_ids.append(d.logical_id) + device_to_left_mem_bytes[d] -= extra_size_needed + return True + return False + + ( + device_to_partitions, + device_to_left_mem_bytes, + no_device_partitions, + ) = get_device_partition_stats(partitions, devices) + + # Find devices for all the partitions without a device + found_device = True + for partition in no_device_partitions: + device_to_left_mem_bytes = dict(sorted(device_to_left_mem_bytes.items(), key=lambda item: item[1])) + found_device = find_device_for(partition) + if not found_device: + break + return found_device + + +def check_dependency(partition): + """Given a partition,check if there is a circular dependency on + this partition using bfs + """ + visited: Set[Partition] = {partition} + queue: Deque[Partition] = deque([partition]) + while queue: + p = queue.popleft() + for child in p.children: + if child == partition: + return True + else: + if child not in visited: + visited.add(child) + queue.append(child) + return False + + +class Partitioner: + """A fx module may not fit into one device. + Partitioner class helps partition one fx module into submodules (partitions), + so that the submodules can be executed crossing different accelerators. + The main function of this class is self.partition_graph. + It partitions the fx module based on the scheme specified in partition_config + A DAG structure is returned + along with a new fx module with submodule nodes. + """ + + def __init__(self) -> None: + self.partitions: List[Partition] = [] + self.node_to_partition: Dict[Node, int] = {} + self.devices: List[Device] = [] + + def partition_graph( + self, + fx_module: GraphModule, + torch_module: torch.nn.Module, + partitioner_config: PartitionerConfig, + ) -> PartitionResult: + """Given the fx module, torch module and partitioner_config, + find the partitions, do the partitions, + and then return a DAG and a new fx module with submodule nodes (partitions) + """ + self.graph_module = fx_module + self.torch_module = torch_module + self.devices = partitioner_config.devices + if len(self.devices) == 0: + raise RuntimeError("No devices") + # Tag the size in bytes to all nodes in the graph_module. + get_size_of_all_nodes(self.graph_module) + # Check if there are op nodes in the fx module + nodes = self.graph_module.graph.nodes + if all(node.op in {"placeholder", "get_attr", "output"} for node in nodes): + raise RuntimeError("No Partition since no operations in the module") + # Calculate total size of the fx module + total_size_of_graph = 0 + for node in nodes: + if node.op == "output": + break + total_size_of_graph += node.size_bytes.total_size + # Find the device with the max mem size + device_with_max_mem = max(self.devices, key=lambda d: d.available_mem_bytes) + # AOT based partition + if partitioner_config.mode == PartitionMode.aot_based: + self.aot_based_partition( + partitioner_config.node_to_partition_mapping, + partitioner_config.partition_to_logical_device_mapping, + ) + # Single partition if the whole module can be fit into one device + elif total_size_of_graph <= device_with_max_mem.available_mem_bytes: + self.find_single_partition( + total_size_of_graph, logical_device_id=device_with_max_mem.logical_id + ) + elif total_size_of_graph > sum([d.available_mem_bytes for d in self.devices]): + raise RuntimeError("Devices have no enough memory for the module") + else: + # Sparse nn based partition + if partitioner_config.mode == PartitionMode.sparse_nn: + available_mem_bytes = self.devices[0].available_mem_bytes + if not all( + device.available_mem_bytes == available_mem_bytes + for device in self.devices + ): + raise RuntimeError("All devices must have same memory size!") + # sparse_nn_partition only support same memory size + # TODO: add different size support for sparse_nn_partition + self.sparse_nn_partition(available_mem_bytes) + # Cost aware partition + elif partitioner_config.mode == PartitionMode.cost_aware: + self.cost_aware_partition( + partitioner_config.transfer_rate_bytes_per_sec, + partitioner_config.node_to_latency_mapping, + ) + # KL based partition + elif partitioner_config.mode == PartitionMode.kl_based: + self.kl_based_partition( + partitioner_config.transfer_rate_bytes_per_sec, + partitioner_config.node_to_latency_mapping, + ) + else: + self.size_based_partition() + + # Saturate host if possible. + if partitioner_config.saturate_host: + self.saturate_host() + + # Partition the graph module based on the partition assignment. + module_with_submodules = self.do_partition() + + # The DAG contains DAGNodes with info of each partition's input nodes, output nodes + # and how partitions are connected. + dag = self.dump_dag(module_with_submodules) + ret = PartitionResult(dag, module_with_submodules) + return ret + + def find_single_partition( + self, total_size_of_graph, logical_device_id: int = 0 + ) -> None: + """Fit the whole fx module into one device""" + partition_0 = self.create_partition() + for node in self.graph_module.graph.nodes: + if node.op == "output": + # Skip the output node, but there can + # be nodes after the output in certain cases. + continue + partition_0.nodes.add(node) + partition_0.used_mem_bytes = total_size_of_graph + partition_0.logical_device_ids = [logical_device_id] + # Get the node to partition mapping + self.node_to_partition = get_node_to_partition_mapping(self.partitions) + return + + def size_based_partition(self) -> None: + """This method is to partition the fx module based on memory size. + It uses greedy approach. The result may not be the best. + The basic idea is: + Step 1: + Find a device which has enough memory to fit the current node, create a empty partition + with the size of that device. + Then keep adding the following nodes into the partition until the partition is full. + Step 2: + Repeat Step 1 until no device left + Step 3: + If some nodes are left, create a partition for each left node (single node partition). + and then try to map those partitions into logical devices with enough mem left. + """ + + def find_device_based_on_size(node) -> Device: + """Given a node, this function is to find a logical device + that could fit the node. + """ + mem_size_needed = get_extra_size_of(node, set()) + device = Device("", -1, -1) + for d in self.devices: + if ( + d not in occupied_devices + and d.available_mem_bytes >= mem_size_needed + ): + device = d + break + if device.available_mem_bytes < 0: + raise RuntimeError(str(node) + "is too large to fit any device") + occupied_devices.append(device) + return device + + # Track partition and its left mem size + partition_to_left_mem_bytes: Dict[Partition, int] = {} + # Track all the devices that have been used + occupied_devices: List[Device] = [] + partition = self.create_partition() + for node in self.graph_module.graph.nodes: + if node.op in {"call_module", "call_method", "call_function"}: + # Check if there are devices left + if len(self.partitions) <= len(self.devices): + total_size_of_input_nodes = get_extra_size_of(node, partition.nodes) + # Check if the current partition is the very first partition + if partition.used_mem_bytes == 0: + # Find a device to fit the first node, return available mem size + device = find_device_based_on_size(node) + occupied_devices.append(device) + # Update partition and its left mem size + partition_to_left_mem_bytes[ + partition + ] = device.available_mem_bytes + # Update available mem for the current partition + partition.logical_device_ids.append(device.logical_id) + else: + # The current partition is not the first partition + # Check if the current node can fit into current partition + if ( + partition_to_left_mem_bytes[partition] + < total_size_of_input_nodes + ): + # Check if no device is left + if len(self.partitions) == len(self.devices): + # No device is left + # Put the previous partitions into a list (non_single_node_partitions) + non_single_node_partitions = self.partitions[:] + # Create the first single node partition for the current node + self.create_single_node_partition(node) + continue + # Some devices are still left + # Create a new partition with a mem size that is enough for the current node + device = find_device_based_on_size(node) + partition = self.create_partition() + total_size_of_input_nodes = get_extra_size_of( + node, partition.nodes + ) + partition_to_left_mem_bytes[ + partition + ] = device.available_mem_bytes + partition.logical_device_ids.append(device.logical_id) + partition.add_node(node) + partition_to_left_mem_bytes[partition] -= total_size_of_input_nodes + # Create single node partitions if no device is left + else: + self.create_single_node_partition(node) + reorganize_partitions(self.partitions) + # Get the node to partition mapping + self.node_to_partition = get_node_to_partition_mapping(self.partitions) + # Mapping all partitions into device + found_partition_to_device_mapping = get_device_to_partitions_mapping( + self.partitions, self.devices + ) + if not found_partition_to_device_mapping: + raise RuntimeError("Cannot Get a Valid Partition to Logical Device Mapping") + return + + def saturate_host(self) -> None: + """Saturate host by assigning replicates to unused devices with enough memory. + It uses a greedy approach to find a next available set of devices to place all split + partitions: For each used device, it searches for an idle device with minimal memory + size that can hold all the partition located on that device; If the search is successful + for all used devices, it then assigns the new devices' logical ID to the corresponding + partition. + """ + ( + device_to_partitions, + device_to_left_mem_bytes, + no_device_partitions, + ) = get_device_partition_stats(self.partitions, self.devices) + + assert ( + len(no_device_partitions) == 0 + ), f"Expect no_device_partitions has 0 device, but get {len(no_device_partitions)}" + + # Devices that hold partitions + used_devices = [d for d in self.devices if len(device_to_partitions[d]) > 0] + # Track replicates of the assigned devices + replicated_device_to_used_device: Dict[Device, Device] = {} + + while len(used_devices) * 2 + len(replicated_device_to_used_device) <= len( + self.devices + ): + # Success flag for this round + success = True + # Devices that have not been assigned + idle_devices = [ + d + for d in self.devices + if d not in used_devices and d not in replicated_device_to_used_device + ] + # Temporary mapping from replicated device to original device + temp_replicate_mapping = {} + + # Find a new device to replicate all partitions on an used device + for used_device in used_devices: + # Idle devices that have enough memory + available_devices = [ + d + for d in idle_devices + if d.available_mem_bytes + >= used_device.available_mem_bytes + - device_to_left_mem_bytes[used_device] + ] + if len(available_devices) == 0: + success = False + break + new_device = min(available_devices, key=lambda d: d.available_mem_bytes) + idle_devices.remove(new_device) + temp_replicate_mapping[new_device] = used_device + + if not success: + break + replicated_device_to_used_device.update(temp_replicate_mapping) + + # Update logical device IDs assigned to the partitions + for ( + replicate_device, + original_device, + ) in replicated_device_to_used_device.items(): + logical_id = replicate_device.logical_id + for partition in device_to_partitions[original_device]: + partition.logical_device_ids.append(logical_id) + for p in self.partitions: + print(p.logical_device_ids) + + def do_partition(self) -> GraphModule: + """Return a new fx module with submodule nodes (partitions).""" + module_with_submodules = split_module( + self.graph_module, + self.torch_module, + lambda node: self.node_to_partition[node], + ) + return module_with_submodules + + def dump_dag(self, module_with_submodules: GraphModule) -> DAG: + """Return the dag structure and the new fx module with submodules.""" + dag = DAG() + for node in module_with_submodules.graph.nodes: + if node.op == "output": + break + if node.op in {"placeholder", "get_attr"}: + continue + if node.target == operator.__getitem__: + continue + input_nodes: Dict[Node, None] = {} + map_arg(node.args, input_nodes.setdefault) + map_arg(node.kwargs, input_nodes.setdefault) + # When a node has two or more output nodes, + # it outputs its result to 'getitem' nodes. + # Those 'getitem' nodes are the output node for this node. + # Otherwise, the output node is this node itself. + if len(node.users) > 1: + output_nodes = list(node.users) + else: + output_nodes = [node] + partition_id = int(node.name.rsplit("_", 1)[-1]) + device_ids = self.partitions[partition_id].logical_device_ids + size_bytes = self.partitions[partition_id].used_mem_bytes + dag.create_node( + node, list(input_nodes), output_nodes, device_ids, size_bytes + ) + return dag + + def create_partition(self) -> Partition: + """Create a partition and append it to self.partitions.""" + partition_id = len(self.partitions) + partition = Partition(partition_id) + self.partitions.append(partition) + return partition + + def create_single_node_partition(self, node): + """Create a partition for a single node""" + partition = self.create_partition() + partition.add_node(node) + return + + def sparse_nn_partition(self, available_mem_bytes: int) -> None: + """This method partition a sparse nn module. + It is size based partition but different from size_based_partition, + it only works when all the devices have same memory size (available_mem_bytes). + In the future, devices with different mem sizes will be supported like size_based_partition. + It first traverse all the nodes and do the partitions based on the same memory size. + If the current partition has no enough memory left for a new op node + (call_module, call_method, call_function), a new partition is created. + When crossing the boundary between non-embedding nodes and embedding nodes, + a new partition is created regardlessly. + For example, if the current node is a non-embedding node but the next node is an + embedding node, a new partition is created for the next node. + After the partition, the partitions are combined as much as possible. + The rule is that a non-embedding partition only + combines with another non-embedding one. + So as the embedding partitions. + """ + + def combine_partitions_based_on_size( + partitions: List[Partition], available_mem_bytes: int + ) -> None: + """Combining small partitions together to keep as less partitions as possible. + Here is an example of the algorithm to do this: + Assume some partitions, we first sort them based on partition used memory size. + [(partition_4, 1), (partition_3, 1), (partition_2, 2), (partition_1, 7), (partition_0, 9)] + The available memory is 10. + step 1: self.find_partition_to_combine_based_on_size() + First, mark bfs level for each partition + Second, look the smallest partition, partition_4: 10 - 1 = 9 + It means any partition has a used memory equal or less than 9 could combine this partition + We go from the largest and selection partition_0. + Check the bfs level for two partitions, if the level difference is less than 2, + it can be combined. + step 2: repeat step 1 until no partitions can be combined + """ + find_combination = True + while find_combination: + # Sort partitions based on memory size + sorted_partitions = sorted(partitions, key=lambda p: p.used_mem_bytes) + # Mark bfs level + get_bfs_level_partition(self.partitions) + find_combination, partitions = find_partition_to_combine_based_on_size( + sorted_partitions, available_mem_bytes, partitions + ) + return + + def calculate_mem_bytes_needed(p1, p2): + """Given two partitions, calculate how many mem bytes + are needed if two partitions are combined + """ + nodes = p1.nodes.union(p2.nodes) + mem_bytes_needed = 0 + for node in nodes: + mem_bytes_needed += get_extra_size_of(node, nodes) + return mem_bytes_needed + + def find_partition_to_combine_based_on_size( + sorted_partitions: List[Partition], + available_mem_bytes: int, + partitions: List[Partition], + ) -> Tuple[bool, List[Partition]]: + """step 1 in combine_partition_based_on_size()""" + find_combination = False + smallest_partition = sorted_partitions.pop(0) + for p in sorted_partitions[::-1]: + if abs(smallest_partition.bfs_level - p.bfs_level) <= 1: + # Calculate how many bytes needed if combined + mem_bytes_needed = calculate_mem_bytes_needed(p, smallest_partition) + if mem_bytes_needed <= available_mem_bytes: + combine_two_partitions(p, smallest_partition, self.partitions) + partitions.remove(smallest_partition) + partitions.remove(p) + partitions.append(self.partitions[-1]) + find_combination = True + break + return find_combination, partitions + + def reset_partition_in_sparse_nn(partition, new_partition=True): + """If crossing the boundary between non-embedding nodes and + embedding nodes, create a new partition + """ + if in_embedding_region: + embedding_partitions.append(partition) + else: + non_embedding_partitions.append(partition) + if new_partition: + partition = self.create_partition() + partition.left_mem_bytes = available_mem_bytes + return partition + return None + + def is_embedding_node(node: Node) -> bool: + """Check if a node is an embedding node""" + if node.op == "call_module": + submodule = self.graph_module + for atom in str(node.target).split("."): + if not hasattr(submodule, atom): + raise RuntimeError( + f"Module {submodule} has no attribute {atom}" + ) + submodule = getattr(submodule, atom) + if "Embedding" in str(submodule): + return True + return False + + # Track embedding partitions and non-embedding partitions separately + embedding_partitions: List[Partition] = [] + non_embedding_partitions: List[Partition] = [] + # A Flag to check the boundary + in_embedding_region: bool = False + partition = self.create_partition() + for node in self.graph_module.graph.nodes: + if node.op in {"call_module", "call_method", "call_function"}: + # Check if crossing the boundary between embedding nodes and non embedding nodes + if is_embedding_node(node) != in_embedding_region: + # Crossing the boundary + # Check if the current partition is an empty partition + if partition.used_mem_bytes != 0: + # The current partition isn't an empty partition. Create a new one. + partition = reset_partition_in_sparse_nn(partition) + in_embedding_region = not in_embedding_region + total_size_of_input_nodes = get_extra_size_of(node, partition.nodes) + if ( + total_size_of_input_nodes + partition.used_mem_bytes + > available_mem_bytes + ): + partition = reset_partition_in_sparse_nn(partition) + total_size_of_input_nodes = get_extra_size_of(node, partition.nodes) + if total_size_of_input_nodes > available_mem_bytes: + raise RuntimeError( + node.target + "is too large to fit into a device" + ) + partition.add_node(node) + reset_partition_in_sparse_nn(partition, new_partition=False) + # Set parents and children for partitions + set_parents_and_children(self.partitions) + # Combining non-embedding partitions + combine_partitions_based_on_size(non_embedding_partitions, available_mem_bytes) + # Combining embedding partitions + combine_partitions_based_on_size(embedding_partitions, available_mem_bytes) + total_size_of_non_embedding_partitions = 0 + for partition in non_embedding_partitions: + total_size_of_non_embedding_partitions += partition.used_mem_bytes + # Check if devices are enough for all partitions + if len(embedding_partitions) > len(self.devices): + msg = ( + "Need " + + str(len(embedding_partitions)) + + " devices, but only " + + str(len(self.devices)) + + " provided" + ) + raise RuntimeError(msg) + occupied_devices = [] + for i, partition in enumerate(embedding_partitions): + # Check if all non-embedding partitions can fit into embedding partition devices + if ( + total_size_of_non_embedding_partitions + partition.used_mem_bytes + > available_mem_bytes + ): + raise RuntimeError( + "partition_" + + str(partition.partition_id) + + "(embedding partition) and non embedding partitions can not fit into one device" + ) + else: + # Add logical device to the partition + partition.logical_device_ids = [self.devices[i].logical_id] + occupied_devices.append(self.devices[i].logical_id) + # Add logical devices to the non_embedding_partitions + for partition in non_embedding_partitions: + partition.logical_device_ids = occupied_devices + # Get the node to partition mapping + self.node_to_partition = get_node_to_partition_mapping(self.partitions) + return + + def cost_aware_partition( + self, + transfer_rate_bytes_per_sec: float, + node_to_latency_mapping: Dict[Node, NodeLatency], + ) -> None: + """This method is to partition the fx module based on the cost. + The cost is the total latency of running the whole fx module. + In partitioner_utils.py, the cost model is built. + The cost aware partition algorithm is: + #1. At every beginning, each node is a partition. + Then we map all the partitions to the devices + and calculate the cost + #2. Then try to pre-combine any two of the partitions if the two + partitions can be combined. + (the bfs level is less than 2 or two partitions are connected and + can find partition to device mapping) + See if any partition pair could reduce the current cost. + Choose the pair that shows the minimum cost and then combine them + #3. Repeat #2 until the cost cannot be reduced. + """ + + def try_combining_partitions(p0_index, p1_index, partitions) -> float: + """Given two partitions and a list of partitions, combine these two partitions + and see what is the cost of the modified partition list + """ + p0 = partitions[p0_index] + p1 = partitions[p1_index] + """If two partitions' bfs level are less than 2 or two partitions are connected to each other, + then they can be combined + """ + if ( + (abs(p0.bfs_level - p1.bfs_level) <= 1) + or (p0 in p1.parents) + or p0 in (p1.children) + ): + combine_two_partitions(p0, p1, partitions) + # Check if a circular dependency exists after combining + if check_dependency(partitions[-1]): + return float("inf") + # Check if the modified partition list can be mapped to devices after combination + reset_partition_device(partitions) + found_deivce = get_device_to_partitions_mapping( + partitions, self.devices + ) + if not found_deivce: + return float("inf") + # Calculate the new cost + partition_to_latency_mapping = get_partition_to_latency_mapping( + partitions, node_to_latency_mapping + ) + cost = get_latency_of_partitioned_graph( + partitions, + partition_to_latency_mapping, + transfer_rate_bytes_per_sec, + ) + return cost + # If two partition can not be combined, the cost is inf + return float("inf") + + def search_combination( + transfer_rate_bytes_per_sec, node_to_latency_mapping + ) -> bool: + """Given transfer rate between partitions and each node's latency, + find two partitions to combine so the cost of the partitions can + be reduced. + The algorithm is : + 1. Go through all the partition pairs and see + if any pair of partitions can be combined. + 2. Calculate the cost after the combination. + 3. Select the minimum cost and combine its corresponding partition pair. + """ + partition_to_latency_mapping = get_partition_to_latency_mapping( + self.partitions, node_to_latency_mapping + ) + cost = get_latency_of_partitioned_graph( + self.partitions, + partition_to_latency_mapping, + transfer_rate_bytes_per_sec, + ) + if len(self.partitions) == 1: + return False + partition_pair: List[int] = [] + for i in range(len(self.partitions) - 1): + for j in range(i + 1, len(self.partitions)): + # Try to combine the partition pair + # and see the new cost after combination + new_cost = try_combining_partitions(i, j, self.partitions[:]) + if new_cost <= cost: + partition_pair = [i, j] + cost = new_cost + reorganize_partitions(self.partitions) + # If a partition pair is found, combine them + if len(partition_pair) != 0: + p0 = self.partitions[partition_pair[0]] + p1 = self.partitions[partition_pair[1]] + combine_two_partitions(p0, p1, self.partitions) + get_bfs_level_partition(self.partitions) + reset_partition_device(self.partitions) + get_device_to_partitions_mapping(self.partitions, self.devices) + return len(partition_pair) != 0 + + for node in self.graph_module.graph.nodes: + if node.op not in {"placeholder", "get_attr", "output"}: + self.create_single_node_partition(node) + # Set up parent partitions and children partitions for each partition + set_parents_and_children(self.partitions) + # Get bfs level for each partition + get_bfs_level_partition(self.partitions) + find_combination = True + while find_combination: + # Search for a pair partition to generate the minimum new cost, + # then combine them + find_combination = search_combination( + transfer_rate_bytes_per_sec, node_to_latency_mapping + ) + # Make sure all partitions are set up correctly + reorganize_partitions(self.partitions) + # Set up node to partition mapping + self.node_to_partition = get_node_to_partition_mapping(self.partitions) + return + + def kl_based_partition( + self, + transfer_rate_bytes_per_sec: float, + node_to_latency_mapping: Dict[Node, NodeLatency], + ) -> None: + """This function is a cost aware partition based + on Kernighan-Lin algorithm. + First, the graph is partitioned using size_based_partition. + Then, each node is swapped with any other node in a different + partition, and at the same time, the cost is estimated after + the swapping. + For example, we have nodes n0, n1, n2, n3 and n4. + Using size_based_partition, n0 and n1 are in Partition p0. + n2, n3 and n4 in Partition p1. The current cost is estimated. + We first tried using n0 to swap with n2 from the other partition. + Then we see that swapping n0 and n2 shows a lower cost + than the current cost and it is the minimum among other pairs like + (n0, None)(This means moving n0 to Partition without swapping other nodes), + (n0, n3) and (n0, n4). We swap n0 and n2 and set the new cost + as the current cost. + Then We repeat this process for all the other nodes until all swapping pairs + are tried. + """ + + def swap_nodes(n0, n1, p0, p1): + # Either n0 or n1 could be None + # That means we simply move the node + # to another partition + if n0 is not None: + p0.remove_node(n0) + p1.add_node(n0) + if n1 is not None: + p0.add_node(n1) + p1.remove_node(n1) + + def try_swap_nodes( + n0, n1, p0, p1, node_to_latency_mapping, transfer_rate_per_sec + ): + cost = float("inf") + swap_nodes(n0, n1, p0, p1) + # Reorganize partitions after swapping + reorganize_partitions(self.partitions) + # Check if there is a circular dependency after swapping + if (not check_dependency(p0)) and (not check_dependency(p1)): + reset_partition_device(self.partitions) + partition_to_latency_mapping = get_partition_to_latency_mapping( + self.partitions, node_to_latency_mapping + ) + # Check if all partitions can be mapped to logical devices after swapping + found_device = get_device_to_partitions_mapping( + self.partitions, self.devices + ) + if not found_device: + cost = float("inf") + else: + cost = get_latency_of_partitioned_graph( + self.partitions, + partition_to_latency_mapping, + transfer_rate_bytes_per_sec, + ) + # Swap back and reset all partitions back to original + swap_nodes(n1, n0, p0, p1) + reorganize_partitions(self.partitions) + reset_partition_device(self.partitions) + get_device_to_partitions_mapping(self.partitions, self.devices) + return cost + + def swap_node_to_partition( + node, p0, p1, node_to_latency_mapping, transfer_rate_per_sec + ): + """This function helps to swap one node from partition p0 + with all the nodes in another partition p1 + """ + p1_nodes = list(p1.nodes) + [None] + min_cost = float("inf") + node_pair: List[Node] = [] + for n1 in p1_nodes: + # Ignore the node if it is not a op node + if n1 is not None and n1.op in {"placeholder", "get_attr"}: + continue + # Try swapping node in p0 with n1 in p1 + cost = try_swap_nodes( + node, n1, p0, p1, node_to_latency_mapping, transfer_rate_per_sec + ) + if cost < min_cost: + node_pair = [node, n1] + min_cost = cost + return cost, node_pair + + # First use size_base_partition + self.size_based_partition() + partition_to_latency_mapping = get_partition_to_latency_mapping( + self.partitions, node_to_latency_mapping + ) + # Calculate the cost of the partitions + cost = get_latency_of_partitioned_graph( + self.partitions, partition_to_latency_mapping, transfer_rate_bytes_per_sec + ) + # Keep tracking the node pair that shows the better cost + node_pair: List[Node] = [] + # Keep tracking the partition pair of node pair + partition_pair: List[Partition] = [] + # Collect all the op nodes from the graph + op_nodes = [] + for n in self.graph_module.graph.nodes: + if n.op not in {"placeholder", "get_attr", "output"}: + op_nodes.append(n) + for node in op_nodes: + # Find which partition the current node belongs + p0_index = self.node_to_partition[node] + p0 = self.partitions[p0_index] + # Go through all the other partitions to swap + # with other nodes from those partitions + for p1_index, _ in enumerate(self.partitions): + if p0_index != p1_index: + p1 = self.partitions[p1_index] + new_cost, new_node_pair = swap_node_to_partition( + node, + p0, + p1, + node_to_latency_mapping, + transfer_rate_bytes_per_sec, + ) + # Update the cost + # Track the swapped node pair and their partitions + if new_cost < cost: + cost = new_cost + node_pair = new_node_pair + partition_pair = [p0, p1] + # Do the swapping after trying all the nodes from a partition + if len(node_pair) != 0: + swap_nodes( + node_pair[0], node_pair[1], partition_pair[0], partition_pair[1] + ) + reorganize_partitions(self.partitions) + get_device_to_partitions_mapping(self.partitions, self.devices) + reorganize_partitions(self.partitions) + # Mapping the device to the partition + get_device_to_partitions_mapping(self.partitions, self.devices) + return + + def aot_based_partition( + self, node_to_partition_mapping, partition_to_logical_device_mapping + ): + """This function helps to rebuild the partitions given the nodes and its + corresponding partition id + """ + partition_id_to_partition_mapping: Dict[int, Partition] = {} + self.node_to_partition = node_to_partition_mapping + for node in self.node_to_partition: + partition_id = self.node_to_partition[node] + # If the requested partition has not been created, create the partition + if partition_id not in partition_id_to_partition_mapping: + partition = Partition(partition_id) + self.partitions.append(partition) + partition_id_to_partition_mapping[partition_id] = partition + partition.logical_device_ids = partition_to_logical_device_mapping[ + partition_id + ] + else: + partition = partition_id_to_partition_mapping[ + self.node_to_partition[node] + ] + # Add the current node into the partition + partition.add_node(node) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/const_fold.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/const_fold.py new file mode 100644 index 0000000000000000000000000000000000000000..b5010c1e509fabd0af765e2ac580e80bd7543ed2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/const_fold.py @@ -0,0 +1,289 @@ +import re +from typing import Callable, Dict, Optional, Set, Union + +import torch.fx +from torch.fx.node import map_arg +from torch.fx.passes.split_module import split_module + + +__all__ = ['FoldedGraphModule', 'get_unique_attr_name_in_module', 'split_const_subgraphs'] + +class FoldedGraphModule(torch.fx.GraphModule): + """ + FoldedGraphModule is a GraphModule which also contains another + `const_subgraph_module` representing a subgraph which has all const attr + inputs and which can be run once before running the main standard + `graph`. The `const_output_names` are the ordered list names of attrs which + represent what each respective output from the const_subgraph should be set + on which attrs. + """ + + def __init__( + self, + root: torch.nn.Module, + graph: torch.fx.Graph, + const_subgraph: Optional[torch.fx.Graph] = None, + fx_const_folded_attrs_name: Optional[str] = None, + device_for_folded_attrs: str = "cuda", + ): + super().__init__(root, graph) + self.const_subgraph_module = ( + None + if const_subgraph is None + else torch.fx.GraphModule(root, const_subgraph) + ) + self.has_folding_been_run = False + self.fx_const_folded_attrs_name = fx_const_folded_attrs_name + self.device_for_folded_attrs = device_for_folded_attrs + + def __call__(self, *args, **kwargs): + if not self.has_folding_been_run: + self.run_folding() + return super().__call__(*args) + + def run_folding(self): + # If there's no const subgraph module or attr output names to use, return + # early as there is no const folding to perform. + if ( + self.const_subgraph_module is None + or self.fx_const_folded_attrs_name is None + ): + return + + assert not self.has_folding_been_run + self.has_folding_been_run = True + + # Actually run const folding subgraph. Note that single attr const fold + # subgraphs output a single Tensor while multiple outputs are returned as + # Tuple[Tensor,]. + folded_attrs = self.const_subgraph_module() + + def _create_param(i): + return torch.nn.Parameter( + i + if not isinstance(i, int) + else torch.Tensor([i]).to(device=self.device_for_folded_attrs), + requires_grad=i.requires_grad if isinstance(i, torch.Tensor) else False, + ) + + params = ( + torch.nn.ParameterList([_create_param(i) for i in folded_attrs]) + if isinstance(folded_attrs, tuple) + else _create_param(folded_attrs) + ) + setattr(self, self.fx_const_folded_attrs_name, params) + + +def _inline_module(gm: torch.fx.GraphModule, inline_mod_name: str): + """ + Given `gm` and some graph module which is called with target name `inline_mod_name`, + this helper will inline all of the nodes from that called graph module into `gm`. + """ + # Fetch the inner graph module that we want to inline inside `gm`. + inline_mod = dict(gm.named_modules())[inline_mod_name] + assert isinstance(inline_mod, torch.fx.GraphModule) + call_mod_node_to_replace = None + for node in gm.graph.nodes: + if node.op == "call_module" and node.target == inline_mod_name: + call_mod_node_to_replace = node + break + assert call_mod_node_to_replace is not None + + # Now actually do the swap. Note that we have to keep track of new nodes that are + # copied into `gm` -- we do this via replacement_mapping. + call_mod_args = call_mod_node_to_replace.args + replacement_mapping: Dict[torch.fx.Node, torch.fx.Node] = {} + ph_count = 0 + + def replacement_fn(node): + new_node = replacement_mapping[node] + new_node.meta = node.meta.copy() + return new_node + + for inline_node in inline_mod.graph.nodes: + if inline_node.op == "placeholder": + replacement_mapping[inline_node] = call_mod_args[ph_count] + ph_count += 1 + continue + + if inline_node.op == "output": + outputs = inline_node.args[0] + output_replacements = map_arg(outputs, replacement_fn) + call_mod_node_to_replace.replace_all_uses_with(output_replacements) + continue + + with gm.graph.inserting_before(call_mod_node_to_replace): + new_node = gm.graph.node_copy(inline_node, replacement_fn) + replacement_mapping[inline_node] = new_node + + gm.graph.eliminate_dead_code() + + +def get_unique_attr_name_in_module(mod_traced: torch.fx.GraphModule, name: str) -> str: + """ + Make sure the name is unique (in a module) and can represents an attr. + """ + # Delete all characters that are illegal in a Python identifier. + name = re.sub("[^0-9a-zA-Z_]+", "_", name) + if name[0].isdigit(): + name = f"_{name}" + # Now make sure it is in fact unique to the module by incrementing suffix value. + while hasattr(mod_traced, name): + match = re.match(r"(.*)_(\d+)$", name) + if match is None: + name = name + "_1" + else: + base, num = match.group(1, 2) + name = f"{base}_{int(num) + 1}" + + return name + + +def split_const_subgraphs( + module: Union[torch.nn.Module, torch.fx.GraphModule], + skip_folding_node_fn: Optional[Callable[[torch.fx.Node], bool]] = None, + device_for_folded_attrs: str = "cpu", +) -> FoldedGraphModule: + """ + Looks through `module` for any nodes that have all constant attribute inputs + and separates them out into their own constant subgraph, and returns a + FoldedGraphModule which runs that constant subgraph on the first run to set + attributes on the module prior to running the non-constant portion of the + graph. + """ + if not isinstance(module, torch.fx.GraphModule): + mod_traced = torch.fx.symbolic_trace(module) + else: + mod_traced = module + + # Build up a list of const_nodes, defined as nodes that are themselves + # get_attrs, or have all get_attr or other constant node inputs. + const_nodes: Set[torch.fx.Node] = set() + found_const_folding = False + for node in mod_traced.graph.nodes: + # Skip over placeholders/outputs because they can't be const folded and + # we don't want to add tags to them. + if node.op in {"placeholder", "output"}: + continue + + # If the node itself is constant, or all of its inputs are constant, + # then tag it as constant. + if node.op != "get_attr" and not set(node.all_input_nodes).issubset( + const_nodes + ): + continue + + # If provided skip folding function says to skip, then skip. + if skip_folding_node_fn and skip_folding_node_fn(node): + continue + + # Skip folding side-effectful functions + if node.is_impure(): + continue + + # Must be a constant foldable node at this point. + const_nodes.add(node) + if node.op != "get_attr": + found_const_folding = True + + # If we did not find any const folding then return early without a const fold subgraph. + if not found_const_folding: + return FoldedGraphModule(mod_traced, mod_traced.graph) + + # Partition the module into two: submod_0 for constant folding subgraph, and + # submod_1 for the rest. + def mod_partition(node: torch.fx.Node): + return 0 if node in const_nodes else 1 + + split = split_module(mod_traced, module, mod_partition) + + const_gm, non_const_gm = split.submod_0, split.submod_1 + const_mod_name, non_const_mod_name = "submod_0", "submod_1" + + # The module that a call_module node refers to gets copied to submodules during split. + # The path to the module also gets inlined, i.e. mod.a.b -> mod_a_b. Here we need to + # attach inlined modules to `split` as it's the owning module now. + for node in non_const_gm.graph.nodes: + if node.op == "call_module": + setattr(split, node.target, getattr(non_const_gm, node.target)) + for node in const_gm.graph.nodes: + if node.op == "call_module": + setattr(split, node.target, getattr(const_gm, node.target)) + + # split_module currently does not use get_attrs for attrs. Instead it passes + # them in as args from the parent module, which used get_attrs. Here we set + # them as get_attrs inside const_gm, allowing for running folding without + # somehow a priori knowing the attrs that should be passed as args. We can + # unconditionally do this for all placeholders because we know all + # placeholders to const_gm must be constants accessible via get_attr. + call_const_gm_args = None + for node in split.graph.nodes: + if node.op == "call_module": + if node.target == const_mod_name: + call_const_gm_args = node.args + break + assert call_const_gm_args is not None + + # Here we do the actual replacement of placeholders to get_attrs. Note that here we + # set the const_gm.graph into a new root_const_gm with split as the root module, + # because we are fetching attributes directly from the root module, instead of + # fetching them from const_gm. Example: The const_gm must have some format like: + # graph(): + # %inp : [num_users=1] = placeholder[target=const_inp] + # %add : [num_users=1] = call_function[target=operator.add](args = (%inp, %inp), kwargs = {}) + # return add + # We replace that with the following, which does not have any placeholders: + # graph(): + # %inp_1 : [num_users=1] = get_attr[target=const_inp] + # %add : [num_users=1] = call_function[target=operator.add](args = (%inp_1, %inp_1), kwargs = {}) + # return add + root_const_gm = torch.fx.GraphModule(split, const_gm.graph) + for node in root_const_gm.graph.nodes: + if node.op == "output": + multiple_outputs = isinstance(node.args[0], tuple) + continue + if node.op != "placeholder": + continue + in_node = next(n for n in call_const_gm_args if n.name == node.target) + assert in_node.op == "get_attr" + with root_const_gm.graph.inserting_before(node): + new_node = root_const_gm.graph.get_attr(in_node.target) + new_node.meta = node.meta.copy() + node.replace_all_uses_with(new_node) + root_const_gm.graph.erase_node(node) + assert "multiple_outputs" in locals() + + # Now find the call to const_gm inside split, and replace it with a getattr to the + # folded tensor(s) that result from constant folding. Note that we don't need to + # worry about whether this is one or more tensors because the original graph + # correctly uses getitem to extract individual tensors if there are multiple folded. + fx_const_folded_attrs_name = get_unique_attr_name_in_module( + split, "_FX_CONST_FOLDED_ATTRS" + ) + setattr( + split, + fx_const_folded_attrs_name, + torch.nn.ParameterList() if multiple_outputs else torch.nn.Parameter(), + ) + for node in split.graph.nodes: + if node.op == "call_module" and node.target == const_mod_name: + with node.graph.inserting_before(node): + folded_attrs = node.graph.get_attr(fx_const_folded_attrs_name) + folded_attrs.meta = node.meta.copy() + node.replace_all_uses_with(folded_attrs) + break + + split.graph.eliminate_dead_code() + + # Finally, inline the non-constant submod into the split submod. This is so that the + # original caller who may have passed in a graph module will get back out a graph + # module whose graph is traced to the same granularity. + _inline_module(split, non_const_mod_name) + + return FoldedGraphModule( + split, + split.graph, + root_const_gm.graph, + fx_const_folded_attrs_name, + device_for_folded_attrs, + ) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/debug.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/debug.py new file mode 100644 index 0000000000000000000000000000000000000000..bd6fed690914e0f3696fb6c37bb63371bd801f93 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/debug.py @@ -0,0 +1,31 @@ +import torch.fx as fx + +def set_trace(gm: fx.GraphModule) -> fx.GraphModule: + """ + Sets a breakpoint in `gm`'s generated python code. It drops into pdb when + `gm` gets run. + + Args: + gm: graph module to insert breakpoint. It is then recompiled for it to + take effect. + + Returns: + the `gm` with breakpoint inserted. + """ + def insert_pdb(body): + return ["import pdb; pdb.set_trace()\n", *body] + + with gm.graph.on_generate_code( + make_transformer=lambda cur_transform: ( + # new code transformer to register + lambda body: ( + insert_pdb( + cur_transform(body) if cur_transform + else body + ) + ) + ) + ): + gm.recompile() + + return gm diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/graph_gradual_typechecker.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/graph_gradual_typechecker.py new file mode 100644 index 0000000000000000000000000000000000000000..8c35e0fab3f64900f2c724096d523ddca4c8ea79 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/graph_gradual_typechecker.py @@ -0,0 +1,914 @@ +from functools import reduce +import torch +import operator +from torch.fx.tensor_type import Dyn, is_consistent, TensorType, is_more_precise +from typing import Callable, Dict +from torch.fx.node import Target, Node +from torch.nn.modules.batchnorm import BatchNorm2d +from torch.nn.modules.conv import Conv2d +from torch.fx.experimental.refinement_types import Equality +import itertools + +from torch.fx.experimental.unification import Var # type: ignore[attr-defined] + +import sympy + +_INFERENCE_RULES: Dict[Target, Callable] = {} +_REFINEMENT_RULES: Dict[Target, Callable] = {} +_RULES: Dict[Target, Callable] = {} + + +def expand_to_tensor_dim(t, n): + """ + Expand a type to the desired tensor dimension if possible + Raise an error otherwise. + - t is the given type + - n is a number of dimensions to expand to + """ + if t == Dyn: + dims = [Dyn] * n + return TensorType(tuple(dims)) + elif isinstance(t, TensorType): + if len(t.__args__) != n: + raise TypeError(f'Cannot extend tensor. Tensor {t} has rank {len(t.__args__)}. It should have rank {n}') + return t + else: + raise TypeError(f'Cannot match the type {t}') + + +def broadcast_types(t1, t2): + """ + Applies broadcasting to both given types such that they + become consistent with eachother and returns two new + resulting types + """ + + # if either type is Dyn, do nothing since the types are already consistent + if t1 == Dyn or t2 == Dyn or isinstance(t1, Var) or isinstance(t2, Var): + return t1, t2 + + if isinstance(t1, TensorType) and isinstance(t2, TensorType): + s1 = len(t1.__args__) + s2 = len(t2.__args__) + + new_t1 = list(t1.__args__) + new_t2 = list(t2.__args__) + + # We make the types the same length which is the first requirement + # for consistency + if s1 > s2: + for i in range(s1 - s2): + new_t2.insert(0, 1) + + elif s2 > s1: + for i in range(s2 - s1): + new_t1.insert(0, 1) + + # we replace occurrences of "1" with each tensor with + # the corresponding type from the other tensor + for i, (x, y) in enumerate(zip(new_t1, new_t2)): + if x == 1: + new_t1[i] = y + elif y == 1: + new_t2[i] = x + + # at this point our tensors should be consistent + # and we can apply the element-wise operation and find the right dimension + # for the output of the operation + (t1, t2) = TensorType(tuple(new_t1)), TensorType(tuple(new_t2)) + return (t1, t2) + else: + raise TypeError(f'Cannot broadcast types {t1} and {t2}') + +def register_inference_rule(call_target): + def register(fn): + if call_target in _INFERENCE_RULES: + raise RuntimeError(f'Inference rule already registered for {call_target}!') + _INFERENCE_RULES[call_target] = fn + return fn + return register + +def register_refinement_rule(call_target): + def register(fn): + if call_target in _REFINEMENT_RULES: + raise RuntimeError(f'Refinement rule already registered for {call_target}!') + _REFINEMENT_RULES[call_target] = fn + return fn + return register + +def register_algebraic_expressions_inference_rule(call_target): + def register(fn): + if call_target in _RULES: + raise RuntimeError(f'Rule already registered for {call_target}!') + _RULES[call_target] = fn + return fn + return register + +@register_inference_rule(torch.add) +@register_inference_rule(operator.add) +def add_inference_rule(n: Node): + """ + Apply the addition inference rule. This includes: + - scalar addition + - broadcasting semantics + + Note that we always return the least precise type between + the operands (after applying broadcasting) to be the final type of the operation + + Note that we do not modify the operand types themselves after applying broadcasting + to them. We only use them to calculate the final type + """ + assert isinstance(n.args[0], Node) + assert isinstance(n.args[1], Node) + t1 = n.args[0].type + t2 = n.args[1].type + + # handle scalar addition + if t1 == int and isinstance(t2, TensorType): + n.type = t2 + return n.type + + # handle scalar addition + elif t2 == int and isinstance(t1, TensorType): + n.type = t1 + return n.type + + # we bring the new types to the point where + # we can check for consistency + # any inconsistency would not have been caused + # by broadcasting at this point + (new_t1, new_t2) = broadcast_types(t1, t2) + + if new_t1 != t1 or new_t2 != t2: + n.meta['broadcast'] = True + n.meta[str(n.args[0])] = new_t1 + n.meta[str(n.args[1])] = new_t2 + + else: + n.meta['broadcast'] = False + + new_t1 = t1 if not n.meta['broadcast'] else new_t1 + new_t2 = t2 if not n.meta['broadcast'] else new_t2 + + # we check for consistency between the new types + if is_consistent(new_t1, new_t2): + # we return the less precise type because + # broadcasting may have happened + # for operands with shape [1,2,Dyn] and [1,2,1] + # we have to assign the node [1,2,Dyn] + if is_more_precise(new_t1, new_t2): + n.type = new_t2 + else: + n.type = new_t1 + return n.type + else: + raise TypeError(f'Cannot add arguments {n.args[0]} ({ n.args[0].type}) and {n.args[1]} ({ n.args[1].type}) in node {n}.' + f' Types should match ') + +@register_inference_rule(getattr) +def get_attr_inference_rule(n: Node, traced): + """ + The current getattr rule only handles the shape attribute + Can be extended to other attributes + The most representitive type we have is "Dyn" but the system + can be extended with more types, such as a type to represent shapes + """ + attr_node = n.args[0] + attr_name = n.args[1] + + if attr_name == "shape": + n.type = Dyn + else: + raise TypeError("Not yet implemented") + + # TODO. We leave it like this till we add a type to represent tensor sizes + return n.type + +@register_inference_rule(torch.transpose) +def transpose_inference_rule(n: Node): + """ + We check that dimensions for the transpose operations + are within range of the tensor type of the node + """ + if n.target == torch.transpose: + assert isinstance(n.args[0], Node) + t = n.args[0].type + + assert isinstance(n.args[1], int) + assert isinstance(n.args[2], int) + dim1, dim2 = n.args[1], n.args[2] + + if t == Dyn: + n.type = Dyn + return n.type + + elif isinstance(t, TensorType): + if 0 <= dim1 < len(t.__args__) and 0 <= dim2 < len(t.__args__): + new_type = list(t.__args__) + new_type[dim1], new_type[dim2] = new_type[dim2], new_type[dim1] + final = TensorType(new_type) + n.type = get_greatest_upper_bound(n.type, final) + return n.type + else: + raise TypeError(f'Cannot transpose {dim1} and {dim2} in type {t} for node {n}') + else: + raise TypeError(f'Cannot transpose {dim1} and {dim2} in type {t} for node {n}') + + +@register_inference_rule(torch.reshape) +def reshape_inference_rule(n: Node): + """ + Without dynamism, the rule checks that the + product of the elements of the argument tensor + type is equal to the product of the elements + of the required shape. We gradualize this rule + by adding a case to handle fully dynamic input + as well as input where some of the tensor dimensions + are unknown. In this case we check for divisibility + """ + assert isinstance(n.args[0], Node) + t1 = n.args[0].type + + assert isinstance(n.args[1], list) + t2 = n.args[1] + t2_type = TensorType([Dyn if elem == -1 else elem for elem in t2]) + + # if we do not know the original tensor dimension, + # we return the required dimension + if t1 == Dyn: + n.type = t2_type + return t2_type + + # if any of the dimensions are unknown, + # we check for divisibility + elif isinstance(t1, TensorType): + assert isinstance(t1, TensorType) + a = [e if e != Dyn else 1 for e in t1.__args__] + p1 = reduce(lambda x, y: x * y, a) + p2 = reduce(lambda x, y: x * y, t2) + if p1 % p2 == 0 or p2 % p1 == 0: + n.type = t2_type + return t2_type + else: + raise TypeError(f'Cannot reshape in node {n} from {t1} to {t2_type}') + else: + raise TypeError(f'Cannot reshape in node {n} from {t1} to {t2_type}') + +@register_inference_rule(BatchNorm2d) +def bn2d_inference_rule(n: Node, module_instance): + """ + Given a BatchNorm2D instance and a node check the following conditions: + - the input type can be expanded to a size 4 tensor: t = (x_1, x_2, x_3, x_4) + - the current node type can be expanded to a size 4 tensor: t' = (x_1', x_2', x_3', x_4') + - t is consistent with t' + - x_2 is consistent with the module's num_features + - x_2' is consistent with the module's num_features + output type: the more precise type of t and t' + """ + assert isinstance(n.args[0], Node) + n.args[0].type = expand_to_tensor_dim(n.args[0].type, 4) + arg_type = n.args[0].type + n.type = expand_to_tensor_dim(n.type, 4) + + # we check the conditions on the incoming argument + # and any existing annotation + # we also check for consistency between both annotations + if is_consistent(arg_type.__args__[1], module_instance.num_features) and \ + is_consistent(n.type.__args__[1], module_instance.num_features) and \ + is_consistent(arg_type, n.type): + + # we choose the more precise type + # to be the node type + # so if an incoming argument has more type information + # we set this node's type to be the argument type + n.type = get_greatest_upper_bound(arg_type, n.type) + return n.type + else: + raise TypeError(f'Cannot apply {module_instance} with input type {arg_type} and existing type {n.type} on {n}') + + +def calculate_out_dimension(d_in, module_instance, index): + """ + For calculating h_in and w_out according to the conv2D documentation + """ + padding = (module_instance.padding, module_instance.padding) \ + if isinstance(module_instance.padding, int) else module_instance.padding + kernel_size = (module_instance.kernel_size, module_instance.kernel_size) \ + if isinstance(module_instance.kernel_size, int) else module_instance.kernel_size + stride = (module_instance.stride, module_instance.stride) \ + if isinstance(module_instance.stride, int) else module_instance.stride + dilation = (module_instance.dilation, module_instance.dilation) \ + if isinstance(module_instance.dilation, int) else module_instance.dilation + + DIMENSION_TYPES = (int, sympy.Symbol) + + if d_in == Dyn: + return Dyn + + elif isinstance(d_in, DIMENSION_TYPES): + n = d_in + 2 * padding[index] - \ + dilation[index] * \ + (kernel_size[index] - 1) - 1 + + return (n // stride[0]) + 1 + + else: + raise TypeError(f'{d_in} in {module_instance} must be a number or Dyn. Received {type(d_in)}') + + +def get_greatest_upper_bound(type1, type2): + """ + Get the most precise type that's consistent with the given types + """ + if type1 == Dyn: + return type2 + elif type2 == Dyn: + return type1 + elif isinstance(type1, TensorType) and isinstance(type2, TensorType): + if not is_consistent(type1, type2): + raise TypeError(f'Inconsistent types {type1}, {type2}') + gub = [t1 if is_more_precise(t1, t2) else t2 for (t1, t2) in zip(type1.__args__, type2.__args__)] + return TensorType(tuple(gub)) + + +@register_inference_rule(Conv2d) +def conv2d_inference_rule(n: Node, module_instance): + """ + Given a Conv2D instance and a node check the following conditions: + - the input type can be expanded to a size 4 tensor: t = (x_1, x_2, H, W) + - the current node type can be expanded to a size 4 tensor: t' = (x_1', x_2', x_3', x_4') + - x_2 is consistent with the module's in_channels + - let o = (x_1, out_channels, H_out, W_out) + then the output is the greatest upper bound of o and the existing node type t'. + """ + assert isinstance(n.args[0], Node) + n.args[0].type = expand_to_tensor_dim(n.args[0].type, 4) + arg_type = n.args[0].type + curr_node_type = expand_to_tensor_dim(n.type, 4) + + if is_consistent(arg_type.__args__[1], module_instance.in_channels): + w_in = arg_type.__args__[3] + h_in = arg_type.__args__[2] + h_out = calculate_out_dimension(h_in, module_instance, 0) + w_out = calculate_out_dimension(w_in, module_instance, 1) + new_type = TensorType((arg_type.__args__[0], module_instance.out_channels, h_out, w_out)) + gub = get_greatest_upper_bound(new_type, curr_node_type) + n.type = gub + return n.type + else: + raise TypeError(f'Cannot apply {module_instance} with input type { arg_type} and existing type {n.type} on {n}') + + +@register_inference_rule(torch.nn.ReLU) +def relu_inference_rule(n: Node, module_instance): + """ + Input and output shapes should be equal. + """ + assert isinstance(n.args[0], Node) + + if n.args[0].type == Dyn and isinstance(n.type, TensorType): + n.args[0].type = expand_to_tensor_dim(n.args[0].type, len(n.type.__args__)) + + if isinstance(n.args[0].type, TensorType): + n.type = get_greatest_upper_bound(n.args[0].type, n.type) + return n.type + + +def maxpool2d_check(typ, module_instance): + """ + Applies the maxpool2d shape information to the input + this affects the last two dimensions + """ + new_type_list = list(typ.__args__) + if len(new_type_list) == 4 or len(new_type_list) == 3: + w_in = new_type_list[-1] + h_in = new_type_list[-2] + + h_out = calculate_out_dimension(h_in, module_instance, 0) + w_out = calculate_out_dimension(w_in, module_instance, 1) + + new_type_list[-1] = w_out + new_type_list[-2] = h_out + return TensorType(tuple(new_type_list)) + + else: + raise TypeError(f'Wrong size {typ} for {module_instance}') + + +@register_inference_rule(torch.nn.MaxPool2d) +def maxpool2d_inference_rule(n: Node, module_instance): + """ + Given a MaxPool2D instance and a node check the following conditions: + - Input size matches size 3 or 4 + - Current node type is consistent with the output type we will calculate + - Input size matches output size and the last two dimensions of the output + are w_out and h_out. The remaining dimensions are the same as the input + - Our final result is the greatest upper bound of the output we calculate + and the current node type. + """ + assert isinstance(n.args[0], Node) + + if n.args[0].type == Dyn and isinstance(n.type, TensorType): + n.args[0].type = expand_to_tensor_dim(n.args[0].type, len(n.type.__args__)) + if isinstance(n.args[0].type, TensorType): + output = maxpool2d_check(n.args[0].type, module_instance) + n.type = get_greatest_upper_bound(output, n.type) + return n.type + + + +def linear_check(tensor_type, module_instance): + """ + Checks that an input tensor type satisfies the conditions for linear operation + and returns the output type based on in and out features given by module_instance + """ + if len(tensor_type.__args__) >= 2: + if is_consistent(module_instance.in_features, tensor_type.__args__[-1]): + new_type_args = list(tensor_type.__args__) + new_type_args[-1] = module_instance.out_features + return TensorType(tuple(new_type_args)) + else: + raise TypeError(f'Inconsistent {module_instance.in_features} and {tensor_type.__args__[-1]} in {module_instance}') + else: + raise TypeError(f'Type {tensor_type} must have rank 2 or more.') + + +@register_inference_rule(torch.nn.Linear) +def linear_inference_rule(n: Node, module_instance): + """ + Applies the shape information to the input then gets the greatest upper bound + of the resulting type and the existing type + """ + assert isinstance(n.args[0], Node) + if n.args[0].type == Dyn and isinstance(n.type, TensorType): + n.args[0].type = expand_to_tensor_dim(n.args[0].type, len(n.type.__args__)) + if isinstance(n.args[0].type, TensorType): + output_type = linear_check(n.args[0].type, module_instance) + n.type = get_greatest_upper_bound(output_type, n.type) + return n.type + + +def adaptiveavgpool2d_check(tensor_type, module_instance): + output_size = module_instance.output_size + if isinstance(output_size, int): + output_size = [output_size, output_size] + elif isinstance(output_size, tuple): + output_size = list(output_size) + if output_size[0] is None: + output_size[0] = output_size[1] + if output_size[1] is None: + output_size[1] = output_size[0] + + new_type_list = list(tensor_type.__args__) + + if len(tensor_type.__args__) == 4 or len(tensor_type.__args__) == 3: + new_type_list[-1] = output_size[1] + new_type_list[-2] = output_size[0] + + return TensorType(tuple(new_type_list)) + + else: + raise TypeError(f'Tensor ranks must be 3 or 4. Got {tensor_type}') + +@register_inference_rule(torch.nn.AdaptiveAvgPool2d) +def adaptiveavgpool2d_inference_rule(n: Node, module_instance): + """ + The input and output sizes should be the same except for the last + two dimensions taken from the input, which represent width and height + """ + assert isinstance(n.args[0], Node) + if n.args[0].type == Dyn and isinstance(n.type, TensorType): + n.args[0].type = expand_to_tensor_dim(n.args[0].type, len(n.type.__args__)) + if isinstance(n.args[0].type, TensorType): + output_type = adaptiveavgpool2d_check(n.args[0].type, module_instance) + n.type = get_greatest_upper_bound(n.type, output_type) + return n.type + +def flatten_check(tensor_type, start_dim, end_dim): + l = len(tensor_type.__args__) + + start_dim = l if start_dim == -1 else abs(start_dim) + end_dim = l + end_dim + 1 if end_dim < 0 else end_dim + 1 + + if 0 <= start_dim <= (l - 1) and 0 <= end_dim <= l and start_dim < end_dim: + my_args = list(tensor_type.__args__) + lhs = my_args[0:start_dim] + rhs = my_args[end_dim:] + mid = my_args[start_dim:end_dim] + if Dyn in mid: + mid = [Dyn] + else: + mid = [reduce(lambda x, y: x * y, my_args[start_dim:end_dim])] + new_type_list = lhs + mid + rhs + return TensorType(tuple(new_type_list)) + else: + raise TypeError(f'Incompatible dimensions {start_dim}, {end_dim - 1} in type {tensor_type}') + +@register_inference_rule(torch.flatten) +def flatten_inference_rule(n: Node): + """ + Applies the flatten shape information to the input then gets the + greatest upper bound of the resulting type and the existing type + """ + assert isinstance(n.args[0], Node) + + # set the default start and end dims + start_dim = 1 + end_dim = -1 + + if len(n.args) > 1: + assert isinstance(n.args[1], int) + start_dim = n.args[1] + + if len(n.args) > 2: + assert isinstance(n.args[2], int) + end_dim = n.args[2] + + if n.args[0].type == Dyn and isinstance(n.type, TensorType): + n.args[0].type = expand_to_tensor_dim(n.args[0].type, len(n.type.__args__)) + + if isinstance(n.args[0].type, TensorType): + output_type = flatten_check(n.args[0].type, start_dim, end_dim) + n.type = get_greatest_upper_bound(output_type , n.type) + + return n.type + +class GraphTypeChecker: + def __init__(self, env, traced): + self.env = env + self.traced = traced + + def type_check(self): + """ + A gradual type checker for graphs + Effect: every node's field type will be + populated with a type after type-checking is done + """ + graph = self.traced.graph + + # type check every node with gradual type rules + # if any node does not type check return false + for n in graph.nodes: + self.type_check_node(n) + return True + + def type_check_node(self, n: Node): + """ + Type check a given fx node. + Current operations: + - Reshape + - Transpose + - Add + - Relu + - conv2d + - batchnorm2d + - flatten + - maxpool2d + - adaptiveavgpool2d + - linear + """ + if n.type is None: + n.type = Dyn + + if n.op == 'placeholder': + return n.type + + elif n.op == 'get_attr': + t = get_parameter(self.traced, n.target) # type: ignore[arg-type] + if isinstance(t.data, torch.Tensor): + n.type = TensorType(t.data.shape) + return n.type + + elif n.op == 'call_function': + if n.target == getattr: + assert getattr in _INFERENCE_RULES + return _INFERENCE_RULES[n.target](n, self.traced) + + elif n.target in _INFERENCE_RULES: + return _INFERENCE_RULES[n.target](n) + else: + raise RuntimeError(f'No inference rule registered for target {n.target}!') + + elif n.op == 'call_module': + module_instance = self.traced.get_submodule(n.target) + if type(module_instance) in _INFERENCE_RULES: + return _INFERENCE_RULES[type(module_instance)](n, module_instance) + else: + raise RuntimeError(f'No inference rule registered for class {type(module_instance)}!') + + elif n.op == 'output': + def get_node_type(a): + return a.type + n.type = torch.fx.node.map_arg(n.args[0], get_node_type) + return n.type + + else: + raise NotImplementedError(f"Method {n.op} not yet implemented") + + +@register_refinement_rule(Conv2d) +def conv_refinement_rule(n: Node): + """ + The equality constraints are between the first dimension of + the input and output + """ + res = [] + assert isinstance(n.args[0], Node) + arg_type = n.args[0].type + if isinstance(arg_type, TensorType) and isinstance(n.type, TensorType): + res = [Equality(arg_type.__args__[0], n.type.__args__[0])] + return res + + +@register_refinement_rule(torch.nn.Linear) +def linear_refinement_rule(n: Node): + """ + The equality constraints are between the first dimension of + the input and output + """ + res = [] + assert isinstance(n.args[0], Node) + arg_type = n.args[0].type + if isinstance(arg_type, TensorType) and isinstance(n.type, TensorType): + res = [Equality(arg_type.__args__[0], n.type.__args__[0])] + return res + +@register_refinement_rule(BatchNorm2d) +@register_refinement_rule(torch.nn.ReLU) +def all_eq(n: Node): + """ + For operations where the input shape is equal to the output shape + """ + res = [] + assert isinstance(n.args[0], Node) + arg_type = n.args[0].type + if isinstance(arg_type, TensorType) and isinstance(n.type, TensorType): + args1 = arg_type.__args__ + args2 = n.type.__args__ + res = [Equality(args1[i], args2[i]) for i in range(len(args1))] + return res + + +@register_refinement_rule(torch.nn.AdaptiveAvgPool2d) +@register_refinement_rule(torch.nn.MaxPool2d) +def first_two_eq(n: Node): + """ + For operations where the first two dimensions of the input and output shape + are equal + """ + res = [] + assert isinstance(n.args[0], Node) + arg_type = n.args[0].type + if isinstance(arg_type, TensorType) and isinstance(n.type, TensorType): + args1 = arg_type.__args__ + args2 = n.type.__args__ + res = [Equality(args1[0], args2[0]), Equality(args1[1], args2[1])] + return res + + +@register_refinement_rule(torch.add) +@register_refinement_rule(operator.add) +def element_wise_eq(n: Node): + """ + For element-wise operations and handles broadcasting. + Note that after applying broadcasting to the arguments + we are able to determine if certain dimensions have not been broadcast + if they are symbolicallu equal. + + in this case, we can establish equality between those dimensions and the + corresponding output dimensions. + + Note that it takes two iterations for this result. One iteration to establish + equality between certain dimensions of the operands (requiring the whole solver + including unification) and another iteration to establish equality between the operands + and the resulting type, requiring another round of constraint generation and unificaiton. + """ + res = [] + if isinstance(n.args[0], Node) and isinstance(n.args[1], Node): + arg_type1 = n.args[0].type + arg_type2 = n.args[1].type + if isinstance(arg_type1, TensorType) and isinstance(arg_type2, TensorType) and isinstance(n.type, TensorType): + args1, args2 = broadcast_types(arg_type1, arg_type2) + # by this point, we know that args1 and args2 are the same size. + a1 = args1.__args__ + a2 = args2.__args__ + a3 = n.type.__args__ + + # we would be here in the second iteration where we establish equality + # between operand type dimensions and the resulting type dimensions + r = [] + for x, y, z in zip(a1, a2, a3): + if x == y: + r.append(Equality(x, z)) + res = r + return res + + +@register_refinement_rule(torch.flatten) +def flatten_refinement_rule(n: Node): + """ + Generates equality constraints between the dimensions of the input and output + that will not be involved in the flatten operation + """ + assert isinstance(n.args[0], Node) + + eq_const = [] + + start_dim = 1 + end_dim = -1 + + if len(n.args) > 1: + assert isinstance(n.args[1], int) + start_dim = n.args[1] + + if len(n.args) > 2: + assert isinstance(n.args[2], int) + end_dim = n.args[2] + + if isinstance(n.type, TensorType) and isinstance(n.args[0].type, TensorType): + l = len(n.type.__args__) + arg_type = n.args[0].type + start_dim = l if start_dim == -1 else start_dim + end_dim = l + end_dim + 1 if end_dim < 0 else end_dim + 1 + + for t1, t2 in zip(n.type.__args__[0:start_dim], arg_type.__args__[0:start_dim]): + eq_const.append(Equality(t1, t2)) + + for t1, t2 in zip(n.type.__args__[end_dim:], arg_type.__args__[end_dim:]): + eq_const.append(Equality(t1, t2)) + return eq_const + + +@register_algebraic_expressions_inference_rule(Conv2d) +def conv_rule(n: Node, module_instance): + """ + Represents the outout in terms of an algrbraic expression w.r.t + the input when possible + """ + assert isinstance(n.args[0], Node) + arg_type = n.args[0].type + if isinstance(arg_type, TensorType) and isinstance(n.type, TensorType): + w_in = arg_type.__args__[3] + h_in = arg_type.__args__[2] + h_out = calculate_out_dimension(h_in, module_instance, 0) + w_out = calculate_out_dimension(w_in, module_instance, 1) + new_type = TensorType((n.type.__args__[0], n.type.__args__[1], h_out, w_out)) + n.type = new_type + return new_type + +class Refine: + """ + Symbolic shape inference. + Generates constraints over type variables. + Currently all constraints are equality constraints. + """ + def __init__(self, traced): + self.constraints = [] + self.traced = traced + self.symbol_iter = itertools.count(start=0, step=1) + + def refine(self): + """ + Generates constraints for + every node in the graph based on + the operation. + """ + graph = self.traced.graph + for n in graph.nodes: + self.refine_node(n) + return True + + def symbolic_relations(self): + """ + Infers algebraic relations + """ + graph = self.traced.graph + for n in graph.nodes: + self.infer_symbolic_relations(n) + return True + + def replace_dyn_with_fresh_var(self, typ): + """ + Replace all unknown types with fresh type variables. + """ + if typ == Dyn: + new_symbol = Var(next(self.symbol_iter)) + return new_symbol + elif isinstance(typ, TensorType): + new_args = [self.replace_dyn_with_fresh_var(a) for a in typ.__args__] + return TensorType(tuple(new_args)) + elif isinstance(typ, list): + return [self.replace_dyn_with_fresh_var(t) for t in typ] + elif isinstance(typ, tuple): + return (self.replace_dyn_with_fresh_var(t) for t in typ) + else: + return typ + + + def convert_to_sympy_symbols(self, typ): + """ + Replace all unknown types with fresh type variables. + """ + if isinstance(typ, Var): + return sympy.symbols(str(typ)) + elif isinstance(typ, TensorType): + new_args = [self.convert_to_sympy_symbols(a) for a in typ.__args__] + return TensorType(tuple(new_args)) + elif isinstance(typ, list): + return [self.convert_to_sympy_symbols(t) for t in typ] + elif isinstance(typ, tuple): + return (self.convert_to_sympy_symbols(t) for t in typ) + else: + return typ + + def refine_node(self, n: Node): + """ + Returns a list of equality constraints for + call_module and call_function nodes. + Models the relation between input and output dimensions + using constraints in case they are both tensors. + All operations used in resnet50 are defined. + """ + if n.type is None: + n.type = Dyn + + n.type = self.replace_dyn_with_fresh_var(n.type) + + if n.op == 'call_function': + if n.target in _REFINEMENT_RULES: + self.constraints += _REFINEMENT_RULES[n.target](n) + else: + pass + + if n.op == 'call_module': + module_instance = self.traced.get_submodule(n.target) + if type(module_instance) in _REFINEMENT_RULES: + self.constraints += _REFINEMENT_RULES[type(module_instance)](n) + else: + pass + + if n.op == 'output': + def get_node_type(a): + return a.type + n.type = torch.fx.node.map_arg(n.args[0], get_node_type) + return n.type + + else: + pass + + def infer_symbolic_relations(self, n: Node): + n.type = self.convert_to_sympy_symbols(n.type) + if n.op == 'call_function': + if n.target in _RULES: + return _RULES[n.target](n) + else: + pass + + if n.op == 'call_module': + module_instance = self.traced.get_submodule(n.target) + if type(module_instance) in _RULES: + return _RULES[type(module_instance)](n, module_instance) + else: + pass + + if n.op == 'output': + def get_node_type(a): + return a.type + n.type = torch.fx.node.map_arg(n.args[0], get_node_type) + return n.type + + else: + pass + +def get_parameter(traced, target: str): + """ + Returns the parameter given by ``target`` if it exists, + otherwise throws an error. + + See the docstring for ``get_submodule`` for a more detailed + explanation of this method's functionality as well as how to + correctly specify ``target``. + + Args: + target: The fully-qualified string name of the Parameter + to look for. (See ``get_submodule`` for how to specify a + fully-qualified string.) + + Returns: + torch.nn.Parameter: The Parameter referenced by ``target`` + + Raises: + AttributeError: If the target string references an invalid + path or resolves to something that is not an + ``nn.Parameter`` + """ + module_path, _, param_name = target.rpartition(".") + + mod: torch.nn.Module = traced.get_submodule(module_path) + + if not hasattr(mod, param_name): + raise AttributeError(mod._get_name() + " has no attribute `" + param_name + "`") + + param: torch.nn.Parameter = getattr(mod, param_name) + + return param diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/merge_matmul.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/merge_matmul.py new file mode 100644 index 0000000000000000000000000000000000000000..bd56694773e9b97087b9a2f83b175fa7ec990b04 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/merge_matmul.py @@ -0,0 +1,171 @@ +import torch + +from torch.fx.node import Node +from torch.fx._symbolic_trace import symbolic_trace +from torch.fx.passes.tools_common import legalize_graph +import itertools +import operator + +from typing import Dict, List, Tuple + + +def split_result_tensors( + result: torch.Tensor, inputs: List[torch.Tensor] +) -> Tuple[torch.Tensor, ...]: + """ + A free function for use in the merge_matmul graph transformation below that + splits the output from a merged matmul into the individual results for each + input tensor. + + Arguments: + result: The merged matmul result tensor. + inputs: The list of inputs that were merged into one for the matmul. + + Returns: + List of matmul results for each input tensor. + """ + # When fx tracer is running, x.shape[0] will be torch.fx.Attribute but we + # need an int even when tracing + if isinstance(result, torch.fx.Proxy): + splits = [0] * len(inputs) + else: + splits = [x.shape[0] for x in inputs] + + return torch.split(result, splits) + + +def may_depend_on(a: Node, b: Node, search_depth: int = 6): + """ + Determine if one node depends on another in a torch.fx.Graph. + + Arguments: + a: The node that may have a dependency on b. + b: The node that a may have a dependency on. + search_depth: In the case of an indirect dependency, this function + searches upto this many nodes away in search of a + data dependency. If none is found, the function + makes the conservative assumption that there is a + dependency. + + Returns: + True if a may depend on b, False if it definitely does not. + """ + # Equivalence is defined as dependence. + if a == b: + return True + + # If a has no inputs, it cannot depend on b. + if len(a.all_input_nodes) == 0: + return False + + # If the search depth has been exhausted and no conclusion has been + # reached, assume that there is a data dependency. + if search_depth == 0: + return True + + # Recursively check all inputs of a. + for inp in a.all_input_nodes: + if may_depend_on(inp, b, search_depth - 1): + return True + + return False + + +def are_nodes_independent(nodes: List[Node]): + """ + Check if all of the given nodes are pairwise-data independent. + + Arguments: + nodes: The nodes to check for data dependencies. + + Returns: + True if any pair in nodes has a data dependency. + """ + # For each pair in nodes: + for i, j in itertools.combinations(nodes, 2): + if may_depend_on(i, j) or may_depend_on(j, i): + return False + + return True + + +def merge_matmul(in_mod: torch.nn.Module): + """ + A graph transformation that merges matrix multiplication operations that share the same right-hand + side operand into one large matrix multiplication. + ____ _________ _________ + ---- | | | | M| A * C | + M| A | T| B | * K| C | = |---------| + ---- , | | | | T| B * C | + K ---- --------- --------- + K R R + """ + gm = symbolic_trace(in_mod) + + rhs_users: Dict[Node, List[Node]] = {} + lhs_users: Dict[Node, List[Node]] = {} + + # Populate rhs_users and lhs_users - maps from LHS/RHS matrix multiply operands to + # the matmul of which they are the LHS/RHS. + for node in gm.graph.nodes: + if node.op != "call_function" or node.target is not torch.matmul: + continue + + lhs, rhs = node.args + + # TODO: Properly handle aliasing caused by get_attr. For now, + # use the attribute name as the operand if the node is a + # get_attr. + lhs = lhs.target if lhs.op == "get_attr" else lhs + rhs = rhs.target if rhs.op == "get_attr" else rhs + + lhs_users.setdefault(lhs, []).append(node) + rhs_users.setdefault(rhs, []).append(node) + + for rhs, mms in rhs_users.items(): + # There must be at least matmuls for a merge to make sense. + if len(mms) < 2: + continue + + # All matmuls must not depend on each other directly or indirectly + # in order for the merge to be possible. + if not are_nodes_independent(mms): + continue + + lhs_vals = [mm.args[0] for mm in mms] + + # Merge the matmul. + # Collect a list of LHS operands and the single RHS operand. + lhs = [gm.graph.get_attr(l) if isinstance(l, str) else l for l in lhs_vals] + rhs = gm.graph.get_attr(rhs) if isinstance(rhs, str) else rhs + + # Concatenate all the LHS operands. + merge_mm_cat = gm.graph.call_function(torch.cat, (lhs,), {}) + + # Multiply the concatenated LHS operands with the one RHS. This will produce + # the same results as all the individual matmuls involving rhs in the original graph, + # but they will all be concatenated together. + merge_mm = gm.graph.call_function(torch.matmul, (merge_mm_cat, rhs,), {}) + + # Split the result of the merged matmul using the shapes of the LHS operands + # to ascertain how large each chunk should be. + merge_mm_split = gm.graph.call_function( + split_result_tensors, (merge_mm, lhs), {} + ) + merge_mm_res = [ + gm.graph.call_function(operator.getitem, (merge_mm_split, out), {}) + for out in range(len(lhs)) + ] + + # Replace all uses of the original, unmerged matmuls with the equivalent split chunk from the merged matmul. + for old, new in zip(mms, merge_mm_res): + old.replace_all_uses_with(new) + gm.graph.erase_node(old) + + # All of the new nodes created above were inserted at the end, so we need to sort + # the nodes topologically to make sure all definitions precede uses. + legalize_graph(gm) + + gm.recompile() + gm.graph.lint() + return gm diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/meta_tracer.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/meta_tracer.py new file mode 100644 index 0000000000000000000000000000000000000000..be19e7b93ac8b850cc3619d983ef748b66cfa0fa --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/meta_tracer.py @@ -0,0 +1,268 @@ +import torch +import torch.fx +import warnings +import functools +import builtins + +from typing import Any, Callable, Dict, Optional, Union + +def embedding_override(self, input): + return torch.empty(*input.shape, self.weight.shape[-1], device='meta') + + +def nn_layernorm_override(self, input): + return input + + +def torch_relu_override(x): + return x + + +def torch_nn_relu_override(self, x): + return x + + +def functional_relu_override(x, inplace=False): + assert not inplace, 'dont support inplace functional.relu for metatensor analysis' + return x + + +def torch_where_override(condition, x, y): + # torch.where returns the broadcasted tensor of condition, x, and y, + # so hack it by using addition + return condition.to(device='meta') + x.to(device='meta') + y.to(device='meta') + + +def torch_abs_override(input, *, out=None): + assert out is None, 'Dont support in-place abs for MetaTensor analysis' + return input + +manual_meta_overrides : Dict[Callable, Callable] = { + torch.nn.Embedding: embedding_override, + torch.nn.LayerNorm: nn_layernorm_override, + torch.relu: torch_relu_override, + torch.nn.functional.relu: functional_relu_override, + torch.nn.ReLU: torch_nn_relu_override, + torch.where: torch_where_override, + torch.abs: torch_abs_override, +} + +def gen_constructor_wrapper(target): + @functools.wraps(target) + def wrapper(*args, **kwargs): + proxy = None + + def check_has_proxy(v): + if isinstance(v, torch.fx.Proxy): + nonlocal proxy + proxy = v + torch.fx.node.map_aggregate(args, check_has_proxy) + torch.fx.node.map_aggregate(kwargs, check_has_proxy) + + if proxy is not None: + return proxy.tracer.create_proxy('call_function', target, args, kwargs) + else: + return target(*args, **kwargs) + return wrapper, target + +class MetaProxy(torch.fx.Proxy): + def install_tensor_meta(self, tensor_meta): + self._tensor_meta = tensor_meta + + def size(self, dim=None): + if hasattr(self, '_tensor_meta') and self._tensor_meta is not None: + return self._tensor_meta.size(*[dim] if dim else []) + return self.tracer.create_proxy('call_method', 'size', (self, dim) if dim else (self,), {}) + + def dim(self): + if hasattr(self, '_tensor_meta') and self._tensor_meta is not None: + return self._tensor_meta.dim() + return self.tracer.create_proxy('call_method', 'dim', (self,), {}) + + @property + def shape(self): + if hasattr(self, '_tensor_meta') and self._tensor_meta is not None: + return self._tensor_meta.shape + return self.tracer.create_proxy('call_function', builtins.getattr, (self, 'shape'), {}) + + @property + def dtype(self): + if hasattr(self, '_tensor_meta') and self._tensor_meta is not None: + return self._tensor_meta.dtype + return self.tracer.create_proxy('call_function', builtins.getattr, (self, 'dtype'), {}) + + @property + def device(self): + # Hack so we can track when devices are used. During meta-tensor propagation, + # replace these values with a constant 'meta' + return MetaDeviceAttribute(self, 'device') + + def __getattr__(self, k): + if k == '_tensor_meta': + return self.__getattribute__(k) + # note: not added to the graph yet, if this is a method call + # we peephole optimize to the method invocation + return MetaAttribute(self, k) + +class MetaAttribute(MetaProxy): + def __init__(self, root, attr: str): + + self.root = root + self.attr = attr + self.tracer = root.tracer + self._node = None + + @property + def node(self): + # the node for attributes is added lazily, since most will just be method calls + # which do not rely on the getitem call + if self._node is None: + self._node = self.tracer.create_proxy('call_function', getattr, (self.root, self.attr), {}).node + return self._node + + def __call__(self, *args, **kwargs): + return self.tracer.create_proxy('call_method', self.attr, (self.root,) + args, kwargs) + +class MetaDeviceAttribute(MetaAttribute): + pass + +def proxys_to_metas(v): + if isinstance(v, MetaDeviceAttribute): + return 'meta' + if isinstance(v, torch.fx.Proxy): + assert isinstance(v, MetaProxy), f'Expected MetaProxy but got {type(v)}' + assert hasattr(v, '_tensor_meta'), 'MetaProxy does not have an associated meta' + return v._tensor_meta + return v + +class MetaTracer(torch.fx.Tracer): + allow_insert_stateless_mods : bool = True + + _TORCH_METHODS_TO_PATCH = ['arange', 'zeros', 'ones', 'full_like', 'eye'] + + def create_proxy(self, kind, target, args, kwargs, name=None, type_expr=None, proxy_factory_fn=None): + rv = super().create_proxy(kind, target, args, kwargs, name, type_expr, proxy_factory_fn) + + if kind == 'placeholder' and target in self.meta_args: + rv.install_tensor_meta(self.meta_args[target]) + return rv + + if target in self.orig_fns: + # NOTE: tensor constructors in PyTorch define the `device` argument as + # *kwargs-only*. That is why this works. If you add methods to + # _TORCH_METHODS_TO_PATCH that do not define `device` as kwarg-only, + # this will break and you will likely see issues where we cannot infer + # the size of the output. + if 'device' in kwargs: + kwargs['device'] = 'meta' + + try: + args_metas = torch.fx.node.map_aggregate(args, proxys_to_metas) + kwargs_metas = torch.fx.node.map_aggregate(kwargs, proxys_to_metas) + + if kind == 'call_function': + meta_target = manual_meta_overrides.get(target, target) + meta_out = meta_target(*args_metas, **kwargs_metas) + elif kind == 'call_method': + meta_out = getattr(args_metas[0], target)(*args_metas[1:], **kwargs_metas) + elif kind == 'call_module': + assert hasattr(self, 'orig_forward') + self._disable_module_getattr = True + try: + mod = self.root.get_submodule(target) + mod_type = type(mod) + if mod_type in manual_meta_overrides: + meta_out = manual_meta_overrides[mod_type](mod, *args_metas, **kwargs_metas) + else: + meta_out = self.orig_forward(*args_metas, **kwargs_metas) + finally: + self._disable_module_getattr = False + elif kind == 'get_attr': + self._disable_module_getattr = True + try: + attr_itr = self.root + atoms = target.split('.') + for atom in atoms: + attr_itr = getattr(attr_itr, atom) + assert isinstance(attr_itr, torch.Tensor) + meta_out = attr_itr.to(device='meta') + finally: + self._disable_module_getattr = False + else: + return rv + + # TODO + assert isinstance(rv, torch.fx.Proxy), 'Dont support composite output yet' + rv.install_tensor_meta(meta_out) + except Exception as e: + warnings.warn(f'Could not compute metadata for {kind} target {target}: {e}') + + return rv + + def getattr(self, attr, attr_val, parameter_proxy_cache): + if getattr(self, '_disable_module_getattr', False): + return attr_val + else: + return super().getattr(attr, attr_val, parameter_proxy_cache) + + def call_module(self, m, forward, args, kwargs): + self.orig_forward = forward + return super().call_module(m, forward, args, kwargs) + + def _insert_module_as_submodule(self, mod: torch.nn.Module) -> str: + """ + Helper method which tries to insert a module that was not declared as submodule. + """ + idx = 0 + mod_name = mod.__class__.__name__.lower() + path = f"{mod_name}_{idx}" + while hasattr(self.root, path): + path = f"{mod_name}_{idx}" + idx += 1 + + self.root.add_module(path, mod) + return path + + def path_of_module(self, mod: torch.nn.Module) -> str: + try: + return super().path_of_module(mod) + except NameError as e: + if self.allow_insert_stateless_mods and len(list(mod.parameters())) == 0 and len(list(mod.buffers())) == 0: + path = self._insert_module_as_submodule(mod) + self.prev_module = path + return path + raise + + def proxy(self, node): + return MetaProxy(node, self) + + def trace(self, root, meta_args : Dict[str, torch.Tensor], concrete_args=None): + assert isinstance(meta_args, dict) + self.meta_args = meta_args + + self.patched_torch_methods = { + target: gen_constructor_wrapper(getattr(torch, target)) for target in self._TORCH_METHODS_TO_PATCH + } + self.orig_fns = set() + + for name, (wrapper, orig) in self.patched_torch_methods.items(): + setattr(torch, name, wrapper) + self.orig_fns.add(orig) + + try: + graph = super().trace(root, concrete_args) + graph._tracer_extras = {'meta_args': meta_args} + return graph + finally: + for name, (_, orig) in self.patched_torch_methods.items(): + setattr(torch, name, orig) + + +def symbolic_trace(root : Union[torch.nn.Module, Callable[..., Any]], + meta_args : Optional[Dict[str, torch.Tensor]] = None, + concrete_args: Optional[Dict[str, Any]] = None) -> torch.fx.GraphModule: + tracer = MetaTracer() + graph = tracer.trace(root, meta_args, concrete_args) + name = root.__class__.__name__ if isinstance(root, torch.nn.Module) else root.__name__ + gm = torch.fx.GraphModule(tracer.root, graph, name) + return gm diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/util.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01b93ffd3df307366f5e2941ed8181ddcf03ed7d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/util.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/z3_types.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/z3_types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d19c18441f97f1c1877cd52d3d69695e694caa3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/z3_types.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/constraint.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/constraint.py new file mode 100644 index 0000000000000000000000000000000000000000..0f0d23d0187490834615d67257e8855f26fdbbc5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/constraint.py @@ -0,0 +1,557 @@ +from torch.fx.experimental.migrate_gradual_types.operation import op_add, op_sub, op_mul, op_div, \ + op_mod, op_gt, op_lt, op_neq, op_eq +from torch.fx.tensor_type import TensorType, Dyn + + +class Constraint: + pass + + +class Conj(Constraint): + def __init__(self, conjuncts): + """ + :param conjuncts: Conjunction of constraints + """ + self.conjucts = conjuncts + + def __eq__(self, other): + if isinstance(other, Conj): + return self.conjucts == other.conjucts and self.conjucts == other.conjucts + else: + return False + + def __repr__(self): + return f'And({self.conjucts})' + + +class Disj(Constraint): + def __init__(self, disjuncts): + """ + :param disjuncts: Disjunction of constraints + """ + self.disjuncts = disjuncts + + def __eq__(self, other): + if isinstance(other, Disj): + return self.disjuncts == other.disjuncts and self.disjuncts == other.disjuncts + else: + return False + + def __repr__(self): + return f'Or({self.disjuncts})' + + +class Prod(Constraint): + def __init__(self, products): + """ + :param products: lists of dimensions to multiply + """ + self.products = products + + def __eq__(self, other): + if isinstance(other, Prod): + return self.products == other.products and self.products == other.products + else: + return False + + def __repr__(self): + return f'Product({self.products})' + + +class T(Constraint): + """ + True + """ + def __init__(self): + pass + + def __eq__(self, other): + return isinstance(other, T) + + def __repr__(self): + return 'True' + +class F(Constraint): + """ + False + """ + def __init__(self): + pass + + def __eq__(self, other): + return isinstance(other, F) + + def __repr__(self): + return 'False' + + +class BinaryConstraint(Constraint): + """ + Represents all binary operations + """ + def __init__(self, lhs, rhs, op): + """ + :param lhs: lhs of the constraint + :param rhs: rhs of the constraint + :param op: string representing the operation + """ + self.lhs = lhs + self.rhs = rhs + self.op = op + + def __eq__(self, other): + if isinstance(other, BinaryConstraint): + return self.lhs == other.lhs and self.rhs == other.rhs and self.op == other.op + else: + return False + + def __repr__(self): + return f'({self.lhs} {self.op} {self.rhs})' + + +class BinConstraintT(BinaryConstraint): + """ + Binary constraints about tensors + """ + def __init__(self, lhs, rhs, op): + assert (isinstance(lhs, (TVar, TensorType, int)) or lhs == Dyn) and \ + (isinstance(rhs, (TVar, TensorType, int)) or rhs == Dyn) + super().__init__(lhs, rhs, op) + + def __eq__(self, other): + return super().__eq__(other) + + +class BinConstraintD(BinaryConstraint): + """ + Binary constraints about dimensions + """ + def __init__(self, lhs, rhs, op): + assert is_algebraic_expression(lhs) or is_dim(lhs) or is_bool_expr(lhs) + assert is_algebraic_expression(rhs) or is_dim(rhs) or is_bool_expr(rhs) + + super().__init__(lhs, rhs, op) + + def __eq__(self, other): + return super().__eq__(other) + + + +class TGreatestUpperBound(Constraint): + """ + Greatest Upper bound for tensors with dynamic type + """ + def __init__(self, res, rhs1, rhs2): + """ + :param res: tensor variable that stores the result of the outout + :param rhs1: tensor or tensor variable + :param rhs2: tensor or tensor variabke + """ + self.res = res + self.rhs1 = rhs1 + self.rhs2 = rhs2 + + def __repr__(self): + return f'{self.res} = {self.rhs1}⊔*{self.rhs2}' + + def __eq__(self, other): + if isinstance(other, TGreatestUpperBound): + return self.res == other.res and self.rhs1 == other.rhs1 and self.rhs2 == other.rhs2 + else: + return False + + +class DGreatestUpperBound(Constraint): + """ + Greatest Upper bound for dimensions + """ + def __init__(self, res, rhs1, rhs2): + """ + :param res: Dimension variable to store the result + :param rhs1: dimension variable 1 + :param rhs2: dimension variable 2 + """ + assert is_dim(res) + assert is_dim(rhs1) + assert is_dim(rhs2) + + self.res = res + self.rhs1 = rhs1 + self.rhs2 = rhs2 + + def __repr__(self): + return f'{self.res} = {self.rhs1}⊔{self.rhs2}' + + def __eq__(self, other): + if isinstance(other, DGreatestUpperBound): + return self.res == other.res and self.rhs1 == other.rhs1 and self.rhs2 == other.rhs2 + else: + return False + + +class CanReshape(Constraint): + """ + can_reshape constraint + """ + def __init__(self, src, target): + """ + :param src: tensor variable + :param target: tensor + """ + self.src = src + self.target = target + + def __repr__(self): + return f'can-reshape({self.src}, {self.target})' + + def __eq__(self, other): + if isinstance(other, CanReshape): + return self.src == other.src and self.target == other.target + else: + return False + + +class IndexSelect(Constraint): + + def __init__(self, tensor_size, input_var, dim_replace, index, output): + """ + Args: + input_var: input to index_select + tensor_size: tensor size we are considering + dim_replace: the dimension of the output at "index" + index: location of the dimensions to replace in the input + output: variable to store the result + """ + assert isinstance(input_var, TVar) + assert isinstance(output, TVar) + assert isinstance(dim_replace, DVar) or dim_replace == Dyn + assert isinstance(index, int) + + self.input_var = input_var + self.tensor_size = tensor_size + self.dim_replace = dim_replace + self.index = index + self.output = output + + def __repr__(self): + + return f' {self.output} = ' \ + f'IndexSelect({self.input_var}, ' \ + f'tensor_size: {self.tensor_size}, ' \ + f'{self.dim_replace}, ' \ + f'{self.index})' + + def __eq__(self, other): + if isinstance(other, IndexSelect): + return self.tensor_size == other.tensor_size and \ + self.dim_replace == other.dim_replace and \ + self.index == other.index and \ + self.output == other.output and \ + self.input_var == other.input_var + else: + return False + + +class Transpose(Constraint): + + def __init__(self, tensor_size, input_var, index1, index2, output): + """ + Args: + tensor_size: current tensor size + input_var: variable to hold input + index1: dimension 1 + index2: dimension 2 + output: output that stores result + """ + assert isinstance(input_var, TVar) + assert isinstance(output, TVar) + assert isinstance(index1, int) + assert isinstance(index2, int) + + self.input_var = input_var + self.tensor_size = tensor_size + self.index1 = index1 + self.index2 = index2 + self.output = output + + def __repr__(self): + + return f' {self.output} = ' \ + f'Transpose({self.input_var}, ' \ + f'tensor_size: {self.tensor_size}, ' \ + f'{self.index1}, ' \ + f'{self.index2})' + + def __eq__(self, other): + if isinstance(other, Transpose): + return self.tensor_size == other.tensor_size and \ + self.index1 == other.index1 and \ + self.index2 == other.index2 and \ + self.output == other.output and \ + self.input_var == other.input_var + else: + return False + + +class GetItem(Constraint): + + def __init__(self, tensor_size, index, res, input_var): + """ + Constraint for getting item given a tensor size + :param tensor_size: actual number + :param index: actual number representing the index + :param res: dimension variable to carry the item we get + :param input_var: a tensor variable from which we will get item + """ + assert isinstance(res, DVar) + + self.res = res + self.tensor_size = tensor_size + self.index = index + self.input_var = input_var + + def __repr__(self): + return f' {self.res} = GetItem({self.input_var}, tensor_size: {self.tensor_size}, {self.index})' + + def __eq__(self, other): + if isinstance(other, GetItem): + return self.res == other.res and \ + self.tensor_size == other.tensor_size and \ + self.index == other.index and \ + self.input_var == other.input_var + else: + return False + +class GetItemTensor(Constraint): + + def __init__(self, tensor_size, index_tuple, res, input_var): + """ + Constraint for getting item given a tensor size + However, when the argument is a tuple, we will + expect a tensor + :param tensor_size: actual number representing the rank + :param index_tuple: tuple for indexing + :param res: tensor variable to carry the item we get + :param input_var: a tensor variable from which we will get item + """ + assert isinstance(res, TVar) + + self.res = res + self.tensor_size = tensor_size + self.index_tuple = index_tuple + self.input_var = input_var + + def __repr__(self): + return f' {self.res} = GetItemT({self.input_var}, tensor_size: {self.tensor_size}, {self.index_tuple})' + + def __eq__(self, other): + if isinstance(other, GetItemTensor): + return self.res == other.res and \ + self.tensor_size == other.tensor_size and \ + self.index_tuple == other.index_tuple and \ + self.input_var == other.input_var + else: + return False + +class CalcConv(Constraint): + + def __init__(self, conv_result, input_var, c_out, kernel, padding, stride, dilation, matching_constraint_vars): + """ + :param conv_result: the convolution result + :param input_var: input to convolution + :param c_out: output chanel type + :param kernel: kernel tuple + """ + self.conv_result = conv_result + self.input_var = input_var + self.c_out = c_out + self.kernel = kernel + self.padding = padding + self.stride = stride + self.dilation = dilation + self.matching_constraint = matching_constraint_vars + + def __repr__(self): + return f'{self.conv_result} =' \ + f' calc-conv({self.input_var},' \ + f' {self.c_out}, {self.kernel}, ' \ + f'{self.padding}, {self.stride},' \ + f' {self.dilation})' + + def __eq__(self, other): + if isinstance(other, CalcConv): + return self.conv_result == other.conv_result and self.input_var == other.input_var and \ + self.c_out == other.c_out and self.kernel == other.kernel and self.padding == other.padding \ + and self.stride == other.stride and self.dilation == other.dilation \ + and self.matching_constraint == other.matching_constraint + else: + return False + + +class CalcMaxPool(Constraint): + + def __init__(self, maxpool_result, input_var, kernel, padding, stride, dilation, matching_constraint_vars): + """ + :param maxpool_result: the result of maxpool + :param input_var: input to convolution + :param kernel: kernel tuple + """ + self.maxpool_result = maxpool_result + self.input_var = input_var + self.kernel = kernel + self.padding = padding + self.stride = stride + self.dilation = dilation + self.matching_constraint = matching_constraint_vars + + def __repr__(self): + return f'{self.maxpool_result} =' \ + f' calc-maxpool({self.input_var},' \ + f' {self.kernel}, ' \ + f'{self.padding}, {self.stride},' \ + f' {self.dilation})' + + def __eq__(self, other): + if isinstance(other, CalcMaxPool): + return self.maxpool_result == other.maxpool_result and self.input_var == other.input_var \ + and self.kernel == other.kernel and self.padding == other.padding \ + and self.stride == other.stride and self.dilation == other.dilation \ + and self.matching_constraint == other.matching_constraint + else: + return False + + +class ApplyBroadcasting(Constraint): + def __init__(self, res1, res2, input1, input2): + """ + :param res1: resulting tensor 1 + :param res2: resulting tensor 2 + :param input1: tensor variable 1 + :param input2: tensor variable 2 + """ + self.res1 = res1 + self.res2 = res2 + self.input1 = input1 + self.input2 = input2 + + def __eq__(self, other): + if isinstance(other, ApplyBroadcasting): + return self.res1 == other.res1 \ + and self.res2 == other.res2 \ + and self.input1 == other.input1 \ + and self.input2 == other.input2 + else: + return False + + def __repr__(self): + return f'{self.res1}, {self.res2} ='f' apply-broadcasting({self.input1},' f' {self.input2})' + + +class CalcProduct(Constraint): + """ + Given correct dimensions, calculate the product for flatten accounting for Dyn + """ + def __init__(self, start, end, flattened, dims_to_flatten): + """ + :param start: start index + :param end: end index + :param flattened: variable to store the product + :param dims_to_flatten: the type which we will flatten + """ + assert isinstance(dims_to_flatten, list) + assert isinstance(flattened, TVar) + assert isinstance(start, int) + assert isinstance(end, int) + + self.start = start + self.end = end + self.dims_to_flatten = dims_to_flatten + self.flattened = flattened + + def __eq__(self, other): + if isinstance(other, CalcProduct): + return self.start == other.start and self.end == other.end and \ + self.dims_to_flatten == other.dims_to_flatten and self.flattened == other.flattened + + else: + return False + + def __repr__(self): + return f'{self.flattened} = CalcProduct({self.start}, {self.end}, {self.dims_to_flatten})' + + +class TVar: + """ + Tensor variable with no tensor constructor + """ + def __init__(self, tvar): + """ + :param tvar: tensor variable + """ + self.tvar = tvar + + def __repr__(self): + return f'TV({self.tvar})' + + def __eq__(self, other): + if isinstance(other, TVar): + return self.tvar == other.tvar + else: + return False + + +class DVar: + """ + Dimension variable + """ + def __init__(self, c): + """ + :param c: character or number + """ + self.c = c + + def __repr__(self): + return f'DV({self.c})' + + def __eq__(self, other): + if isinstance(other, DVar): + return self.c == other.c + else: + return False + + +class BVar: + """ + Boolean variable + """ + def __init__(self, c): + """ + :param c: character or number + """ + self.c = c + + def __repr__(self): + return f'BV({self.c})' + + def __eq__(self, other): + if isinstance(other, BVar): + return self.c == other.c + else: + return False + + +def is_algebraic_expression(constraint): + if isinstance(constraint, BinConstraintD): + return constraint.op in [op_add, op_sub, op_div, op_mul, op_mod] + else: + return isinstance(constraint, Prod) + + +def is_bool_expr(constraint): + if isinstance(constraint, BinConstraintD): + return constraint.op in [op_gt, op_lt, op_neq, op_eq] + else: + return isinstance(constraint, (BVar, Conj, Disj)) + +def is_dim(d): + return isinstance(d, (DVar, int)) or d == Dyn diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/constraint_generator.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/constraint_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..0b5307521533f7630bafa3de56acc959900377ab --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/constraint_generator.py @@ -0,0 +1,1281 @@ +import torch +import operator +import warnings +from typing import Callable, Dict, Iterable + +from torch.fx._symbolic_trace import _assert_is_none +from torch.fx.experimental.migrate_gradual_types.constraint import ApplyBroadcasting, CalcProduct, \ + Disj, TGreatestUpperBound, CalcMaxPool, CalcConv, Conj, BinConstraintT, CanReshape, BinConstraintD, GetItem, T, F, \ + TVar, DVar, GetItemTensor, IndexSelect, Transpose, DGreatestUpperBound +from torch.fx.experimental.migrate_gradual_types.operation import \ + op_eq, op_matching, op_consistency, op_leq, op_precision, op_gt, op_div, op_sub, op_neq, op_lt, op_add, op_mul +from torch.fx.node import Target, Node +from torch.fx.experimental.migrate_gradual_types.util import gen_tensor_dims, gen_nat_constraints, gen_dvar, gen_tvar, \ + gen_bvar + +from torch.fx.tensor_type import Dyn, TensorType +from torch.nn.modules.conv import Conv2d +from torch.nn.modules.batchnorm import BatchNorm2d + +_INFERENCE_RULES: Dict[Target, Callable] = {} + +MAX_TENSOR_RANK = 4 + +def register_inference_rule(call_target): + def register(fn): + if call_target in _INFERENCE_RULES: + raise RuntimeError(f'Inference rule already registered for {call_target}!') + _INFERENCE_RULES[call_target] = fn + return fn + return register + + +def generate_flatten_constraints(start_dim, end_dim, input, flattened, n, counter): + d, counter = gen_tensor_dims(n, counter) + c1 = BinConstraintT(input, TensorType(d), op_eq) + start_dim = n if start_dim == -1 else abs(start_dim) + end_dim = n + end_dim + 1 if end_dim < 0 else end_dim + 1 + c2 = CalcProduct(start_dim, end_dim, flattened, d) + nat_constraints = gen_nat_constraints(d) + return Conj([c1, c2, *nat_constraints]), counter + + +@register_inference_rule(getattr) +def get_attr_inference_rule(n: Node, symbols, constraints, counter): + """ + If the attribute is "device" then the tensor shape is preserved + """ + assert isinstance(n.args[0], Node) + assert isinstance(n.args[1], str) + output, counter = gen_tvar(counter) + symbols[n] = output + + input = symbols[n.args[0]] + attr = n.args[1] + + if attr == 'device': + return [BinConstraintT(input, output, op_eq)], counter + else: + raise NotImplementedError('Not yet implemented') + +@register_inference_rule(torch.bmm) +def bmm_inference_rule(n: Node, symbols, constraints, counter): + """ + Constraints that match the input to a size 3 tensor + and switch the dimensions according to the rules + of batch multiplication + """ + assert isinstance(n.args[0], Node) + assert isinstance(n.args[1], Node) + + bmm_output, counter = gen_tvar(counter) + symbols[n] = bmm_output + + bmm_input1 = symbols[n.args[0]] + bmm_input2 = symbols[n.args[1]] + + dims_input1, counter = gen_tensor_dims(3, counter) + dims_input2, counter = gen_tensor_dims(3, counter) + + inputs_dyn = Conj([BinConstraintT(bmm_input1, Dyn, op_eq), + BinConstraintT(bmm_input2, Dyn, op_eq), + BinConstraintT(bmm_output, Dyn, op_eq)]) + + input1_dyn = Conj([BinConstraintT(bmm_input1, Dyn, op_eq), + BinConstraintT(bmm_input2, TensorType(dims_input2), op_eq), + BinConstraintT(bmm_output, TensorType([dims_input2[0], Dyn, dims_input2[2]]), op_eq)]) + + input2_dyn = Conj([BinConstraintT(bmm_input2, Dyn, op_eq), + BinConstraintT(bmm_input1, TensorType(dims_input1), op_eq), + BinConstraintT(bmm_output, TensorType([dims_input1[0], dims_input1[1], Dyn]), op_eq)]) + + consistency_constraints = [BinConstraintD(dims_input1[0], dims_input2[0], op_consistency)] + + batch_size, counter = gen_dvar(counter) + + inputs_are_tensors = Conj([BinConstraintT(bmm_input1, TensorType(dims_input1), op_eq), + BinConstraintT(bmm_input2, TensorType(dims_input2), op_eq), + BinConstraintT(bmm_output, TensorType([batch_size, dims_input1[1], dims_input2[2]]), op_eq), + *consistency_constraints, DGreatestUpperBound(batch_size, dims_input1[0], dims_input2[0])]) + + return [Disj([inputs_dyn, input1_dyn, input2_dyn, inputs_are_tensors])], counter + + +@register_inference_rule("index_select") +def index_select_inference_rule(n: Node, symbols, constraints, counter): + """ + We constrain the second argument to a vector or Dyn. + The output replaces the input with the shape of the vector + at the position given by the index (first argument) + """ + # print(n.args) + assert isinstance(n.args[0], Node) + assert isinstance(n.args[1], int) + assert isinstance(n.args[2], Node) + + + + index_select, counter = gen_tvar(counter) + symbols[n] = index_select + + dims, counter = gen_tensor_dims(1, counter) + + # equality constraint + is_size_1 = BinConstraintT(symbols[n.args[2]], TensorType(dims), op_eq) + is_dyn = BinConstraintT(symbols[n.args[2]], Dyn, op_eq) + + c2 = Conj([is_size_1, Disj([IndexSelect(i + 1, symbols[n.args[0]], dims[0], n.args[1], index_select) + for i in range(MAX_TENSOR_RANK)])]) + c3 = Conj([is_dyn, Disj([IndexSelect(i + 1, symbols[n.args[0]], Dyn, n.args[1], index_select) + for i in range(MAX_TENSOR_RANK)])]) + + return [Disj([c2, c3])], counter + + +@register_inference_rule("expand") +def expand_inference_rule(n: Node, symbols, constraints, counter): + """ + We generate the exact constraints as we do for tensor additions but we constraint + the rank of this expression to be equal to len(n.args[1:]) so that only + those cases get considered for the output + """ + assert isinstance(n.args[0], Node) + + # define the output for expand + expand, counter = gen_tvar(counter) + symbols[n] = expand + + # since we do not have two nodes here, we will construct an argument variable + e1 = symbols[n.args[0]] + e2, counter = gen_tvar(counter) + + e2_nat_constraints = [] + for arg in n.args[1:]: + assert isinstance(arg, (Node, int)) + if isinstance(arg, Node): + assert isinstance(symbols[arg], DVar) + e2_nat_constraints.append(BinConstraintD(0, symbols[arg], op_leq)) + + e2_constraint = BinConstraintT(e2, TensorType([arg if isinstance(arg, int) else symbols[arg] for arg in n.args[1:]]), op_eq) + + constraints, counter = gen_broadcasting_constraints(e1, e2, symbols, counter, expand) + + # constraint the output size + dims, counter = gen_tensor_dims(len(n.args[1:]), counter) + nat_constraints = gen_nat_constraints(dims) + c = [BinConstraintT(expand, TensorType(dims), op_eq), *nat_constraints, e2_constraint, *e2_nat_constraints] + constraints += c + + return constraints, counter + + +@register_inference_rule(torch.nn.functional.gelu) +@register_inference_rule(torch.nn.functional.dropout) +@register_inference_rule(torch.nn.functional.softmax) +@register_inference_rule("detach") +@register_inference_rule("to") +@register_inference_rule("int") +@register_inference_rule("long") +@register_inference_rule("contiguous") +@register_inference_rule(torch.ones) +@register_inference_rule(torch.zeros) +def equality_inference_rule(n: Node, symbols, constraints, counter): + """ + We generate the constraint: input = output + """ + output, counter = gen_tvar(counter) + symbols[n] = output + + if isinstance(n.args[0], Node): + input = symbols[n.args[0]] + if isinstance(input, TVar): + return [BinConstraintT(input, output, op_eq)], counter + + # then we have dimension variables + else: + for arg in n.args: + assert isinstance(symbols[arg], DVar) + my_size = [symbols[arg] for arg in n.args] + return [BinConstraintT(output, TensorType(my_size), op_eq)], counter + + elif isinstance(n.args[0], tuple): + # then the tuple is the size + assert len(n.args[0]) <= 4 + my_size = [symbols[arg] for arg in n.args[0]] + return [BinConstraintT(output, TensorType(my_size), op_eq)], counter + else: + raise NotImplementedError('Method not yet implemented') + + +@register_inference_rule("transpose") +def transpose_inference_rule(n: Node, symbols, constraints, counter): + """ + Can be considered as a sequence of two index selects, so we generate constraints accordingly + """ + assert isinstance(n.args[0], Node) + assert isinstance(n.args[1], int) + assert isinstance(n.args[2], int) + + output, counter = gen_tvar(counter) + symbols[n] = output + + from_arg = symbols[n.args[0]] + assert isinstance(from_arg, TVar) + + # input and output are dyn + is_dyn = Conj([BinConstraintT(from_arg, Dyn, op_eq), BinConstraintT(output, Dyn, op_eq)]) + + # or input is a tensor and we actually do the replacement + c3 = Disj([Transpose(i + 1, from_arg, n.args[1], n.args[2], output) for i in range(MAX_TENSOR_RANK)]) + + return [Disj([is_dyn, c3])], counter + + +@register_inference_rule("type_as") +def type_inference_rule(n: Node, symbols, constraints, counter): + """ + We generate the constraint: input = output + """ + assert isinstance(n.args[0], Node) + assert isinstance(n.args[1], Node) + + output, counter = gen_tvar(counter) + symbols[n] = output + + from_arg = symbols[n.args[0]] + to_arg = symbols[n.args[1]] + + assert isinstance(from_arg, TVar) + assert isinstance(to_arg, TVar) + + return [BinConstraintT(from_arg, to_arg, op_consistency), + BinConstraintT(output, to_arg, op_eq)], counter + +@register_inference_rule("masked_fill_") +def masked_fill_inference_rule(n: Node, symbols, constraints, counter): + """ + Similar to addition. For now we implement the constraints when + the argument is a boolean tensor. There is also a case for when + it is a condition. We will leave this out for now. + """ + + assert isinstance(n.args[0], Node) + assert isinstance(n.args[1], Node) + + # We will retrieve the type variables from the symbol table + # and confirm they are tensor variables + + e1 = symbols[n.args[0]] + e2 = symbols[n.args[1]] + + if isinstance(e1, TVar) and isinstance(e2, TVar): + masked_fill_tensor, counter = gen_tvar(counter) + symbols[n] = masked_fill_tensor + return gen_broadcasting_constraints(e1, e2, symbols, counter, masked_fill_tensor) + else: + raise NotImplementedError('Not yet implemented') + + +@register_inference_rule(torch.nn.functional.embedding) +def embedding_inference_rule_functional(n: Node, symbols, constraints, counter): + assert isinstance(n.args[0], Node) + + embedding_dim_weights = symbols[n.args[1]] + + # will treat this as a static shape. So we will not use matching. + weight_dims, counter = gen_tensor_dims(2, counter) + equality_constraint = BinConstraintT(embedding_dim_weights, TensorType(weight_dims), op_eq) + embedding_dim = weight_dims[1] + constraints, counter = gen_embedding_rules(n, symbols, embedding_dim, counter) + return [equality_constraint] + constraints, counter + + +@register_inference_rule(torch.nn.modules.sparse.Embedding) +def embedding_inference_rule(n: Node, module_instance, symbols, constraints, counter): + """ + The output shape differs from the input shape in the last dimension + """ + assert isinstance(n.args[0], Node) + return gen_embedding_rules(n, symbols, module_instance.embedding_dim, counter) + + +def gen_embedding_rules(n: Node, symbols, embedding_dim, counter): + + embedding_output, counter = gen_tvar(counter) + symbols[n] = embedding_output + embedding_input = symbols[n.args[0]] + + input_dyn = BinConstraintT(embedding_input, Dyn, op_eq) + output_dyn = BinConstraintT(embedding_output, Dyn, op_eq) + + c1 = Conj([input_dyn, output_dyn]) + c2 = [] + + for i in range(1, MAX_TENSOR_RANK): + new_dims, counter = gen_tensor_dims(i, counter) + nat_constraints = gen_nat_constraints(new_dims) + + # we consider all tensor sizes and append embedding_dim to the end of the output dimension in all cases + c_tensor_i = Conj([BinConstraintT(embedding_input, TensorType(new_dims), op_eq), + BinConstraintT(embedding_output, TensorType(new_dims + [embedding_dim]), op_eq)] + + nat_constraints) + c2.append(c_tensor_i) + + return [Disj([c1, Disj(c2)])], counter + + +@register_inference_rule(torch.tensor) +def tensor_inference_rule(n: Node, symbols, constraints, counter): + """ + If the tensor is a scalar, we will skip it since we + do not support scalars yet. We will add support in the future + if it's needed. For our examples so far, scalars are not needed. + """ + return [], counter + + +@register_inference_rule("reshape") +@register_inference_rule("view") +def view_inference_rule(n: Node, symbols, constraints, counter): + """ + Similar to reshape but with an extra condition on the strides + """ + assert isinstance(n.args[0], Node) + + # generate the new variable + my_view, counter = gen_tvar(counter) + symbols[n] = my_view + + + src_var = symbols[n.args[0]] + t2 = [symbols[elem] if isinstance(elem, Node) else elem for elem in n.args[1:]] # target shape + t2_type = [] + num_constraints = [] + + for t in t2: + if t == -1: + var, counter = gen_dvar(counter) + t2_type.append(var) + num_constraints.append(BinConstraintD(var, Dyn, op_neq)) + + else: + num_constraints.append(BinConstraintD(t, Dyn, op_neq)) + t2_type.append(t) + + t2_type = TensorType(t2_type) # type: ignore[assignment] + + c1 = BinConstraintT(my_view, t2_type, op_eq) + c2 = CanReshape(src_var, t2_type) + + # TODO: add the extra check mentioned here: + # https://pytorch.org/docs/stable/generated/torch.Tensor.view.html#torch.Tensor.view + + return [c1, c2] + num_constraints, counter # type: ignore[operator] + + +@register_inference_rule("size") +def size_inference_rule(n: Node, symbols, constraints, counter): + """ + The constraint is just lhs = rhs. + Ex: size = input_ids.size() + """ + + + if len(n.args) == 1: + # generate the new variable + size, counter = gen_tvar(counter) + symbols[n] = size + input = symbols[n.args[0]] + c = BinConstraintT(input, size, op_eq) + return [c], counter + + elif len(n.args) == 2: + # TODO: review this rule; should input = dyn; output = dyn be included here? + if isinstance(n.args[1], int): + # generate the new variable + size_index, counter = gen_dvar(counter) + symbols[n] = size_index + input = symbols[n.args[0]] + c2 = [GetItem(i + 1, n.args[1], size_index, input) for i in range(MAX_TENSOR_RANK)] + c3 = BinConstraintD(0, size_index, op_leq) + + input_dyn = BinConstraintT(input, Dyn, op_eq) + output_dyn = BinConstraintD(size_index, Dyn, op_eq) + c1 = Conj([input_dyn, output_dyn]) + + return [Disj([c1, Conj([Disj(c2), c3])])], counter + + else: + raise NotImplementedError + + else: + raise NotImplementedError + + +def range_check(i, n): + """ + Checks if an index i is within range of a size n list + Args: + i: index + n: list size + + Returns: Boolean + """ + if i >= 0: + return T() if i < n else F() + else: + return T() if i >= n else F() + + +@register_inference_rule(torch.cumsum) +def cumsum_inference_rule(n: Node, symbols, constraints, counter): + """ + Input and output shapes should be equal + We should verify that the index is valid + """ + assert isinstance(n.args[0], Node) + arg_1 = n.args[1] if len(n.args) > 1 else n.kwargs["dim"] + assert isinstance(arg_1, int) + + output, counter = gen_tvar(counter) + symbols[n] = output + input = symbols[n.args[0]] + + input_dyn = BinConstraintT(input, Dyn, op_eq) + output_dyn = BinConstraintT(output, Dyn, op_eq) + c1 = Conj([input_dyn, output_dyn]) + c2 = [] + for i in range(1, MAX_TENSOR_RANK + 1): + new_dims, counter = gen_tensor_dims(i, counter) + + nat_constraints = gen_nat_constraints(new_dims) + + c_tensor_i = Conj([BinConstraintT(input, TensorType(new_dims), op_eq), + BinConstraintT(output, TensorType(new_dims), op_eq)] + + [range_check(arg_1, i)] + nat_constraints) + + c2.append(c_tensor_i) + dyn_or_tensor = Disj([c1, Disj(c2)]) + return [dyn_or_tensor], counter + + +@register_inference_rule(_assert_is_none) +def assert_inference_rule(n: Node, symbols, constraints, counter): + assert len(n.users) == 0 + return [], counter + + +@register_inference_rule(operator.getitem) +def getitem_inference_rule(n: Node, symbols, constraints, counter): + assert isinstance(n.args[0], Node) + + # dimension output case + if isinstance(n.args[1], int): + # create and store the new dimension variable + get_item_output, counter = gen_dvar(counter) + symbols[n] = get_item_output + + # retrieve arg variables + get_item_arg = symbols[n.args[0]] + assert isinstance(get_item_arg, TVar) + + + # if the input is dynamic, we accept any index and return + # a dynamic dimension as output + input_dyn = BinConstraintT(get_item_arg, Dyn, op_eq) + output_dyn = BinConstraintD(get_item_output, Dyn, op_eq) + c1 = Conj([input_dyn, output_dyn]) + + # if the input is a tensor, + # generate a getItem constraint which will be expanded based on the + # tensor dimension. + + c2 = [GetItem(i + 1, n.args[1], get_item_output, get_item_arg) for i in range(MAX_TENSOR_RANK)] + + + # since the output is a dimension, we make sure it's a natural number + # added as a conjunction to the disjunction of c2 + c3 = BinConstraintD(0, get_item_output, op_leq) + return [Disj([c1, Conj([Disj(c2), c3])])], counter + + # tensor output case + elif isinstance(n.args[1], tuple): + # create and store the new tensor variable + get_item_output, counter = gen_tvar(counter) + symbols[n] = get_item_output + + # retrieve arg variables + if n.args[0] in symbols: + get_item_arg = symbols[n.args[0]] + assert isinstance(get_item_arg, TVar) + + input_dyn = BinConstraintT(get_item_arg, Dyn, op_eq) + output_dyn = BinConstraintT(get_item_output, Dyn, op_eq) # type: ignore[assignment] + c1 = Conj([input_dyn, output_dyn]) + + c2 = [GetItemTensor(i + 1, n.args[1], get_item_output, get_item_arg) # type: ignore[misc] + for i in range(MAX_TENSOR_RANK)] + else: + # TODO: we should figure out why there is a key-error here. + return [], counter + + return [Disj([c1, *c2])], counter + + else: + raise RuntimeError('Method not yet implemented') + + +@register_inference_rule(operator.gt) +def gt_inference_rule(n: Node, symbols, constraints, counter): + assert isinstance(n.args[0], (Node, int)) + assert isinstance(n.args[1], (Node, int)) + + # We make sure this node will not be used again. We do not + # generate a constraint about that node. Only about the operands. + + e1 = symbols[n.args[0]] if isinstance(n.args[0], Node) else n.args[0] + e2 = symbols[n.args[1]] if isinstance(n.args[1], Node) else n.args[1] + + if isinstance(n.args[0], Node) and isinstance(n.args[1], Node): + if isinstance(e1, TVar) and isinstance(e2, TVar): + gt_tensor, counter = gen_tvar(counter) + symbols[n] = gt_tensor + return gen_broadcasting_constraints(e1, e2, symbols, counter, gt_tensor) + + elif isinstance(e1, DVar) and isinstance(e2, DVar): + # This is meant to be used for flow analysis only + gt_constraint = BinConstraintD(e1, e2, op_gt) + + my_gt, counter = gen_bvar(counter) + equality_constraint = BinConstraintD(my_gt, gt_constraint, op_eq) + return [equality_constraint], counter + + else: + raise RuntimeError('Sort Mismatch') + + elif isinstance(n.args[0], Node) and not isinstance(n.args[1], Node): + if isinstance(e1, DVar): + # This is meant to be used for flow analysis only + gt_constraint = BinConstraintD(e1, e2, op_gt) + + my_gt, counter = gen_bvar(counter) + equality_constraint = BinConstraintD(my_gt, gt_constraint, op_eq) + return [equality_constraint], counter + + elif isinstance(e1, TVar) and isinstance(e2, int): + # then we made the wrong assumption about the argument being a tensor + # so we should fix the assumption + warnings.warn(f'Made the wrong assumption for node {n}. Correctness not guaranteed.') + + new_e1, counter = gen_dvar(counter) + symbols[n.args[0]] = new_e1 + symbols[n.args[0]] + + gt_constraint = BinConstraintD(new_e1, e2, op_gt) + + my_gt, counter = gen_bvar(counter) + equality_constraint = BinConstraintD(my_gt, gt_constraint, op_eq) + return [equality_constraint], counter + + else: + raise NotImplementedError('Method not yet implemented') + + else: + raise NotImplementedError('Method not yet implemented') + + +@register_inference_rule(operator.eq) +def eq_inference_rule(n: Node, symbols, constraints, counter): + assert isinstance(n.args[0], (Node, int)) + assert isinstance(n.args[1], (Node, int)) + + e1 = symbols[n.args[0]] if isinstance(n.args[0], Node) else n.args[0] + e2 = symbols[n.args[1]] if isinstance(n.args[1], Node) else n.args[1] + + if isinstance(n.args[0], Node) and isinstance(n.args[1], Node): + if isinstance(e1, TVar) and isinstance(e2, TVar): + eq_tensor, counter = gen_tvar(counter) + symbols[n] = eq_tensor + return gen_broadcasting_constraints(e1, e2, symbols, counter, eq_tensor) + + elif isinstance(e1, DVar) and isinstance(e2, DVar): + # This is meant to be used for flow analysis only + eq_constraint = BinConstraintD(e1, e2, op_eq) + + my_eq, counter = gen_bvar(counter) + equality_constraint = BinConstraintD(my_eq, eq_constraint, op_eq) + return [equality_constraint], counter + + else: + raise RuntimeError('Sort Mismatch') + + elif isinstance(n.args[0], Node) and not isinstance(n.args[1], Node): + if isinstance(e1, DVar): + # This is meant to be used for flow analysis only + eq_constraint = BinConstraintD(e1, e2, op_eq) + + my_eq, counter = gen_bvar(counter) + equality_constraint = BinConstraintD(my_eq, eq_constraint, op_eq) + return [equality_constraint], counter + else: + raise NotImplementedError('Method not yet implemented') + else: + raise NotImplementedError('Method not yet implemented') + +@register_inference_rule(operator.ne) +def neq_inference_rule(n: Node, symbols, constraints, counter): + """ + Translates to inconsistent in gradual types. + To prove inequality, we should prove that + tensors are either different sizes or + disagree on at least one dimension + + This is a WIP (works when the condition + is false. We are working on making this operation work + when the condition is true as well) + """ + assert isinstance(n.args[0], Node) + assert isinstance(n.args[1], tuple) + + # implementing for size 3 and 4 + if len(n.args[1]) == 3: + + assert isinstance(n.args[1][0], (Node, int)) + assert isinstance(n.args[1][1], (Node, int)) + assert isinstance(n.args[1][2], (Node, int)) + + lhs = symbols[n.args[0]] + + b, counter = gen_tensor_dims(4, counter) + input_is_size3 = BinConstraintT(lhs, TensorType([b[0], b[1], b[2]]), op_eq) + + d1 = n.args[1][0] if isinstance(n.args[1][0], int) else symbols[n.args[1][0]] + d2 = n.args[1][1] if isinstance(n.args[1][1], int) else symbols[n.args[1][1]] + d3 = n.args[1][2] if isinstance(n.args[1][2], int) else symbols[n.args[1][2]] + + # dimensions not equal + my_ne, counter = gen_bvar(counter) + neq_1 = BinConstraintD(d1, b[0], op_neq) + neq_2 = BinConstraintD(d2, b[1], op_neq) + neq_3 = BinConstraintD(d3, b[2], op_neq) + + # dimensions inconsistent + dims_inconsistent1 = Conj([BinConstraintD(d1, Dyn, op_neq), BinConstraintD(b[0], Dyn, op_neq), neq_1]) + dims_inconsistent2 = Conj([BinConstraintD(d2, Dyn, op_neq), BinConstraintD(b[1], Dyn, op_neq), neq_2]) + dims_inconsistent3 = Conj([BinConstraintD(d3, Dyn, op_neq), BinConstraintD(b[2], Dyn, op_neq), neq_3]) + + dims_inconsistent = Disj([dims_inconsistent1, dims_inconsistent2, dims_inconsistent3]) + + # we are covering size 3 and 4 only for now + ne_constraint = Conj([input_is_size3, dims_inconsistent]) + + my_ne, counter = gen_bvar(counter) + equality_constraint = BinConstraintD(my_ne, ne_constraint, op_eq) + + elif len(n.args[1]) == 4: + + assert isinstance(n.args[1][0], (Node, int)) + assert isinstance(n.args[1][1], (Node, int)) + assert isinstance(n.args[1][2], (Node, int)) + assert isinstance(n.args[1][3], (Node, int)) + + lhs = symbols[n.args[0]] + + b1, counter = gen_dvar(counter) + b2, counter = gen_dvar(counter) + b3, counter = gen_dvar(counter) + b4, counter = gen_dvar(counter) + + input_is_size4 = BinConstraintT(lhs, TensorType([b1, b2, b3, b4]), op_eq) + + d1 = n.args[1][0] if isinstance(n.args[1][0], int) else symbols[n.args[1][0]] + d2 = n.args[1][1] if isinstance(n.args[1][1], int) else symbols[n.args[1][1]] + d3 = n.args[1][2] if isinstance(n.args[1][2], int) else symbols[n.args[1][2]] + d4 = n.args[1][3] if isinstance(n.args[1][3], int) else symbols[n.args[1][3]] + + # dimensions not equal + my_ne, counter = gen_bvar(counter) + neq_1 = BinConstraintD(d1, b1, op_neq) + neq_2 = BinConstraintD(d2, b2, op_neq) + neq_3 = BinConstraintD(d3, b3, op_neq) + neq_4 = BinConstraintD(d4, b4, op_neq) + + # dimensions to inconsistent + dims_inconsistent1 = Conj([BinConstraintD(d1, Dyn, op_neq), BinConstraintD(b1, Dyn, op_neq), neq_1]) + dims_inconsistent2 = Conj([BinConstraintD(d2, Dyn, op_neq), BinConstraintD(b2, Dyn, op_neq), neq_2]) + dims_inconsistent3 = Conj([BinConstraintD(d3, Dyn, op_neq), BinConstraintD(b3, Dyn, op_neq), neq_3]) + dims_inconsistent4 = Conj([BinConstraintD(d4, Dyn, op_neq), BinConstraintD(b3, Dyn, op_neq), neq_4]) + + dims_inconsistent = Disj([dims_inconsistent1, dims_inconsistent2, dims_inconsistent3, dims_inconsistent4]) + + ne_constraint = Conj([input_is_size4, dims_inconsistent]) + + my_ne, counter = gen_bvar(counter) + + equality_constraint = BinConstraintD(my_ne, ne_constraint, op_eq) + + else: + raise NotImplementedError('Method not yet implemented') + + return [equality_constraint], counter + + +@register_inference_rule(operator.lt) +def lt_inference_rule(n: Node, symbols, constraints, counter): + assert isinstance(n.args[0], (Node, int)) + assert isinstance(n.args[1], (Node, int)) + + # We make sure this node will not be used again. We do not + # generate a constraint about that node. Only about the operands. + + e1 = symbols[n.args[0]] if isinstance(n.args[0], Node) else n.args[0] + e2 = symbols[n.args[1]] if isinstance(n.args[1], Node) else n.args[1] + + if isinstance(n.args[0], Node) and isinstance(n.args[1], Node): + if isinstance(e1, TVar) and isinstance(e2, TVar): + lt_tensor, counter = gen_tvar(counter) + symbols[n] = lt_tensor + return gen_broadcasting_constraints(e1, e2, symbols, counter, lt_tensor) + + elif isinstance(e1, DVar) and isinstance(e2, DVar): + # This is meant to be used for flow analysis only + lt_constraint = BinConstraintD(e1, e2, op_lt) + + my_lt, counter = gen_bvar(counter) + equality_constraint = BinConstraintD(my_lt, lt_constraint, op_eq) + return [equality_constraint], counter + + else: + raise RuntimeError('Sort Mismatch') + + elif isinstance(n.args[0], Node) and not isinstance(n.args[1], Node): + if isinstance(e1, DVar): + # This is meant to be used for flow analysis only + lt_constraint = BinConstraintD(e1, e2, op_lt) + + my_lt, counter = gen_bvar(counter) + equality_constraint = BinConstraintD(my_lt, lt_constraint, op_eq) + return [equality_constraint], counter + else: + raise NotImplementedError('Method not yet implemented') + + else: + raise NotImplementedError('Method not yet implemented') + + +@register_inference_rule(torch.full) +def full_inference_rule(n: Node, symbols, constraints, counter): + full, counter = gen_tvar(counter) + symbols[n] = full + res = [] + + assert isinstance(n.args[0], Iterable) + for arg in n.args[0]: + dim = arg if isinstance(arg, int) else symbols[arg] + res.append(dim) + c = BinConstraintT(full, TensorType(list(res)), op_eq) # type: ignore[arg-type] + return [c], counter + + +# TODO normalize index +@register_inference_rule(torch.arange) +def arange_inference_rule(n: Node, symbols, constraints, counter): + start = 0 + step = 1 + + if len(n.args) == 1: + end = symbols[n.args[0]] + else: + raise NotImplementedError('Not yet implemented') + + # int((end - start) / step) + d1, counter = gen_dvar(counter) + size_constraint = BinConstraintD(d1, BinConstraintD(BinConstraintD(end, start, op_sub), step, op_div), op_eq) + arange, counter = gen_tvar(counter) + symbols[n] = arange + + # either the a parameter is a number or it is Dyn + c1 = Disj([BinConstraintD(end, Dyn, op_eq), + BinConstraintD(start, Dyn, op_eq), + BinConstraintD(step, Dyn, op_eq)]) + c2 = BinConstraintD(d1, Dyn, op_eq) + both_dyn = Conj([c1, c2]) + + c11 = Conj([BinConstraintD(end, Dyn, op_neq), + BinConstraintD(start, Dyn, op_neq), + BinConstraintD(step, Dyn, op_neq)]) + c22 = BinConstraintD(d1, Dyn, op_neq) + both_numbers = Conj([c11, c22, size_constraint]) + + return [BinConstraintT(arange, TensorType([d1]), op_eq), Disj([both_dyn, both_numbers])], counter + +def gen_broadcasting_constraints(e1, e2, symbols, counter, output_var): + # additional vars that don't correspond to expressions + e11, counter = gen_tvar(counter) + e22, counter = gen_tvar(counter) + + # generate constraints + c1 = TGreatestUpperBound(output_var, e11, e22) + c2 = ApplyBroadcasting(e11, e22, e1, e2) + c3 = BinConstraintT(e11, e22, op_consistency) + return [c1, c2, c3], counter + + +@register_inference_rule(operator.mul) +@register_inference_rule(torch.ne) +@register_inference_rule("ne") +@register_inference_rule(torch.add) +@register_inference_rule(operator.add) +def broadcasting_inference_rule(n: Node, symbols, constraints, counter): + + op_code = None + if n.target == operator.add or n.target == torch.add: + op_code = op_add + elif n.target == operator.mul: + op_code = op_mul + + if isinstance(n.args[0], Node) and isinstance(n.args[1], Node): + if isinstance(symbols[n.args[0]], TVar) and isinstance(symbols[n.args[1]], TVar): + my_output, counter = gen_tvar(counter) + symbols[n] = my_output + e1 = symbols[n.args[0]] + e2 = symbols[n.args[1]] + + return gen_broadcasting_constraints(e1, e2, symbols, counter, my_output) + else: + raise NotImplementedError('Method not yet implemented') + + elif isinstance(n.args[0], Node) and isinstance(n.args[1], (int, float)): + if isinstance(symbols[n.args[0]], TVar): + my_output, counter = gen_tvar(counter) + symbols[n] = my_output + e1 = symbols[n.args[0]] + return [BinConstraintT(my_output, e1, op_eq)], counter + elif isinstance(symbols[n.args[0]], DVar): + my_output, counter = gen_dvar(counter) + symbols[n] = my_output + e1 = symbols[n.args[0]] + + # we will propagate the runtime value here since this is regular addition + c = Conj([BinConstraintD(my_output, BinConstraintD(e1, n.args[1], op_code), op_eq), + BinConstraintD(0, my_output, op_leq)]) + return [c], counter + + elif isinstance(n.args[1], Node) and isinstance(n.args[0], (int, float)): + if isinstance(symbols[n.args[1]], TVar): + my_output, counter = gen_tvar(counter) + symbols[n] = my_output + e2 = symbols[n.args[1]] + return [BinConstraintT(my_output, e2, op_eq)], counter + elif isinstance(symbols[n.args[1]], DVar): + my_output, counter = gen_dvar(counter) + symbols[n] = my_output + e2 = symbols[n.args[1]] + + # we will propagate the runtime value here since this is regular addition + c = Conj([BinConstraintD(my_output, BinConstraintD(e2, n.args[0], op_code), op_eq), + BinConstraintD(0, my_output, op_leq)]) + return [c], counter + + else: + raise NotImplementedError('Method not yet implemented') + + else: + # TODO generate add constraints for scalar addition + raise NotImplementedError('Addition not yet implemented') + + +@register_inference_rule(torch.flatten) +def flatten_inference_rule(n: Node, symbols, constraints, counter): + assert isinstance(n.args[0], Node) + + # generate the new variable + flattened, counter = gen_tvar(counter) + symbols[n] = flattened + + input = symbols[n.args[0]] + + # set the default start and end dims + start_dim = 1 + end_dim = -1 + + if len(n.args) > 1: + assert isinstance(n.args[1], int) + start_dim = n.args[1] + + if len(n.args) > 2: + assert isinstance(n.args[2], int) + end_dim = n.args[2] + + c1 = BinConstraintT(input, Dyn, op_eq) + c2 = BinConstraintT(flattened, Dyn, op_eq) + both_dyn = Conj([c1, c2]) + + const = [] + for i in range(1, MAX_TENSOR_RANK + 1): + c, counter = generate_flatten_constraints(start_dim, end_dim, input, flattened, i, counter) + const.append(c) + + return [Disj([both_dyn, *const])], counter + + +@register_inference_rule(torch.nn.functional.layer_norm) +def layer_norm_functional(n: Node, symbols, constraints, counter): + """ + We generate the constraint: input = output + """ + assert isinstance(n.args[0], Node) + return gen_layer_norm_constraints(n, n.args[1], symbols, counter) + + +@register_inference_rule(torch.nn.LayerNorm) +def layer_norm_inference_rule(n: Node, module_instance, symbols, constraints, counter): + """ + Input and output shapes should be equal. + Input should be consistent with the normalized_shape + """ + assert isinstance(n.args[0], Node) + return gen_layer_norm_constraints(n, module_instance.normalized_shape, symbols, counter) + + +def gen_layer_norm_constraints(n: Node, normalized_shape, symbols, counter): + output, counter = gen_tvar(counter) + symbols[n] = output + input = symbols[n.args[0]] + + input_dyn = BinConstraintT(input, Dyn, op_eq) + output_dyn = BinConstraintT(output, Dyn, op_eq) + + c1 = Conj([input_dyn, output_dyn]) + + c2 = [] + for i in range(1, MAX_TENSOR_RANK + 1): + new_dims_rhs, counter = gen_tensor_dims(i, counter) + nat_constraints = gen_nat_constraints(new_dims_rhs) + + c_tensor_i = Conj([BinConstraintT(input, TensorType(new_dims_rhs), op_eq), + BinConstraintT(output, TensorType(new_dims_rhs), op_eq)] + + add_layer_norm_constraints(new_dims_rhs, list(normalized_shape)) + + nat_constraints) + c2.append(c_tensor_i) + return [Disj([c1, Disj(c2)])], counter + +@register_inference_rule(torch.nn.Dropout) +@register_inference_rule(torch.nn.ReLU) +def relu_inference_rule(n: Node, module_instance, symbols, constraints, counter): + """ + Input and output shapes should be equal. + """ + assert isinstance(n.args[0], Node) + output, counter = gen_tvar(counter) + symbols[n] = output + input = symbols[n.args[0]] + assert isinstance(input, TVar) + return [BinConstraintT(input, output, op_eq)], counter + + +@register_inference_rule(torch.nn.Linear) +def linear_inference_rule(n: Node, module_instance, symbols, constraints, counter): + """ + Input and output sizes should be the same except for the last dimension + If the input is Dyn, then so should the output + """ + assert isinstance(n.args[0], Node) + return linear_constraints(n, module_instance.in_features, module_instance.out_features, symbols, counter) + + +@register_inference_rule("dim") # type: ignore[attr-defined] +def torch_dim_inference_rule(n: Node, symbols, constraints, counter): + assert isinstance(n.args[0], Node) + my_dim, counter = gen_dvar(counter) + symbols[n] = my_dim + input = symbols[n.args[0]] + + input_dyn = BinConstraintT(input, Dyn, op_eq) + output_dyn = BinConstraintD(my_dim, Dyn, op_eq) + + c1 = [] + + for i in range(1, MAX_TENSOR_RANK + 1): + new_dims_rhs_1, counter = gen_tensor_dims(i, counter) + + c_tensor_i = Conj([BinConstraintT(input, TensorType(new_dims_rhs_1), op_eq), + BinConstraintD(my_dim, i, op_eq)]) + c1.append(c_tensor_i) + + return [Disj([Conj([input_dyn, output_dyn]), Disj(c1)])], counter + + +@register_inference_rule(torch._C._nn.linear) # type: ignore[attr-defined] +def torch_linear_inference_rule(n: Node, symbols, constraints, counter): + assert isinstance(n.args[0], Node) + weight_dims, counter = gen_tensor_dims(2, counter) + equality_constraint = BinConstraintT(symbols[n.args[1]], TensorType(weight_dims), op_eq) + constraints, counter = linear_constraints(n, weight_dims[1], weight_dims[0], symbols, counter) + return [equality_constraint] + constraints, counter + + +def linear_constraints(n: Node, in_features, out_features, symbols, counter): + linear_output, counter = gen_tvar(counter) + symbols[n] = linear_output + linear_input = symbols[n.args[0]] + + input_dyn = BinConstraintT(linear_input, Dyn, op_eq) + output_dyn = BinConstraintT(linear_output, Dyn, op_eq) + + c1 = Conj([input_dyn, output_dyn]) + + c2 = [] + for i in range(1, MAX_TENSOR_RANK + 1): + new_dims_rhs_1, counter = gen_tensor_dims(i, counter) + new_dims_rhs_2, counter = gen_tensor_dims(i, counter) + + nat_constraints = gen_nat_constraints(new_dims_rhs_1 + new_dims_rhs_2) + + c_tensor_i = Conj([BinConstraintT(linear_input, TensorType(new_dims_rhs_1), op_eq), + BinConstraintT(linear_output, TensorType(new_dims_rhs_2), op_eq)] + + add_linear_constraints(new_dims_rhs_1, new_dims_rhs_2, in_features, out_features) + + nat_constraints) + c2.append(c_tensor_i) + return [Disj([c1, Disj(c2)])], counter + +def add_layer_norm_constraints(input_dim, normalized_dim): + """ + The constraints say that the type has te form: [*, 1024, 1024] + while the normalized_dim have the form [1024, 1024] + Args: + input_dim: Input shape of layer norm + normalized_dim: normalized_dim parameter of the module instance + + """ + + # in this case we return false since there's a pattern mismatch + if len(normalized_dim) > len(input_dim): + return [F()] + + else: + constraints = [] + for i, n in zip(reversed(input_dim), reversed(normalized_dim)): + constraints.append(BinConstraintD(i, n, op_consistency)) + return constraints + + +def add_linear_constraints(dims1, dims2, in_features, out_features): + assert len(dims1) == len(dims2) + constraints = [] + for i in range(len(dims1)): + if i == len(dims1) - 1: + constraints.append(BinConstraintD(dims1[i], in_features, op_consistency)) + constraints.append(BinConstraintD(dims2[i], out_features, op_eq)) + else: + constraints.append(BinConstraintD(dims1[i], dims2[i], op_eq)) + + return constraints + + +@register_inference_rule(torch.reshape) +def reshape_inference_rule(n: Node, symbols, constraints, counter): + assert isinstance(n.args[0], Node) + + # generate the new variable + my_reshape, counter = gen_tvar(counter) + symbols[n] = my_reshape + + src_var = symbols[n.args[0]] + t2 = n.args[1] + t2_type = TensorType([Dyn if elem == -1 else elem for elem in t2]) # type: ignore[union-attr] + c1 = BinConstraintT(my_reshape, t2_type, op_eq) # type: ignore[union-attr] + c2 = CanReshape(src_var, t2_type) + + return [c1, c2], counter + + +@register_inference_rule(BatchNorm2d) +def batchnorm_inference_rule(n: Node, module_instance, symbols, constraints, counter): + assert isinstance(n.args[0], Node) + + # generate the new variable + batchnorm_output, counter = gen_tvar(counter) + symbols[n] = batchnorm_output + batchnorm_input = symbols[n.args[0]] + + # dim vars + d1, counter = gen_dvar(counter) + d2, counter = gen_dvar(counter) + d3, counter = gen_dvar(counter) + d4, counter = gen_dvar(counter) + + nat_constraints = gen_nat_constraints([d1, d2, d3, d4]) + + c1 = BinConstraintT(batchnorm_input, TensorType([d1, d2, d3, d4]), op_matching) + c2 = BinConstraintT(batchnorm_input, batchnorm_output, op_eq) + return [c1, c2, *nat_constraints], counter + + +@register_inference_rule(torch.nn.AdaptiveAvgPool2d) +def adaptive_inference_rule(n: Node, module_instance, symbols, constraints, counter): + assert isinstance(n.args[0], Node) + + avg_pool, counter = gen_tvar(counter) + + symbols[n] = avg_pool + input_var = symbols[n.args[0]] + + # dim vars + d1, counter = gen_dvar(counter) + d2, counter = gen_dvar(counter) + d3, counter = gen_dvar(counter) + d4, counter = gen_dvar(counter) + nat_constraints = gen_nat_constraints([d1, d2, d3, d4]) + c1 = BinConstraintT(input_var, TensorType([d1, d2, d3, d4]), op_matching) + c2 = BinConstraintT(avg_pool, TensorType([d1, d2, module_instance.output_size[0], module_instance.output_size[1]]), op_eq) + + return [c1, c2, *nat_constraints], counter + + +@register_inference_rule(Conv2d) +def conv2d_inference_rule(n: Node, module_instance, symbols, constraints, counter): + assert isinstance(n.args[0], Node) + + my_conv, counter = gen_tvar(counter) + symbols[n] = my_conv + input_var = symbols[n.args[0]] + + # dim vars + [d1, d2, d3, d4], counter = gen_tensor_dims(MAX_TENSOR_RANK, counter) + + # c1 = Matching(input_var, TensorType([d1, d2, d3, d4])) + c1 = BinConstraintT(input_var, TensorType([d1, d2, d3, d4]), op_matching) + + # c2 = DConsistency(module_instance.in_channels, d2) + c2 = BinConstraintD(module_instance.in_channels, d2, op_consistency) + + c3 = CalcConv(my_conv, input_var, + module_instance.out_channels, + module_instance.kernel_size, + module_instance.padding, + module_instance.stride, + module_instance.dilation, [d1, d2, d3, d4]) + + nat_constraints = gen_nat_constraints([d1, d2, d3, d4]) + + return [c1, c2, c3, *nat_constraints], counter + + +@register_inference_rule(torch.nn.MaxPool2d) +def maxpool_inference_rule(n: Node, module_instance, symbols, constraints, counter): + assert isinstance(n.args[0], Node) + maxpool, counter = gen_tvar(counter) + symbols[n] = maxpool + input_var = symbols[n.args[0]] + + # dim vars + [d1, d2, d3, d4], counter = gen_tensor_dims(MAX_TENSOR_RANK, counter) + + c1 = BinConstraintT(input_var, TensorType([d1, d2, d3, d4]), op_matching) + + c2 = CalcMaxPool(maxpool, input_var, module_instance.kernel_size, module_instance.padding, + module_instance.stride, module_instance.dilation, [d1, d2, d3, d4]) + + nat_constraints = gen_nat_constraints([d1, d2, d3, d4]) + + return [c1, c2, *nat_constraints], counter + + +class ConstraintGenerator: + def __init__(self, traced, graph=None): + self.traced = traced # traced or tracer.root + self.traced_params = dict(self.traced.named_parameters()) + self.constraints = [] + self.symbol_dict = {} + self.graph = traced.graph if hasattr(traced, 'graph') else graph + + + def generate_constraints(self, counter=0): + """ + Iterate through every node and generate constraints + Effect: self.constraints will be populated with the final constraints + """ + graph = self.graph + + all_constraints = [] + + for n in graph.nodes: + (constraints, counter) = self.generate_constraints_node(n, counter) + all_constraints += constraints + + return Conj(all_constraints), counter + + def generate_constraints_node(self, n: Node, counter): + """ + Generate constraints the given node: + Currently supported operations: + - Reshape + - Add + - conv2d + """ + + if n.op == 'placeholder': + x, counter = gen_tvar(counter) + self.symbol_dict[n] = x + + my_type = n.type + + if n.type != Dyn and (not isinstance(n.type, TensorType)): + if n.type == torch.nn.parameter.Parameter: + # since we have a parameter, the shape must be static + assert 'example_value' in n.meta + my_type = TensorType(n.meta['example_value'].size()) + else: + my_type = Dyn + + c1 = BinConstraintT(my_type, x, op_precision) + c2 = BinConstraintT(x, MAX_TENSOR_RANK, op_leq) + return [c1, c2], counter + + elif n.op == 'call_function': + if n.target in _INFERENCE_RULES: + return _INFERENCE_RULES[n.target](n, self.symbol_dict, self.constraints, counter) + else: + raise RuntimeError(f'No inference rule registered for target {n.target}!') + + elif n.op == 'call_module': + + module_instance = self.traced.get_submodule(n.target) + if type(module_instance) in _INFERENCE_RULES: + return _INFERENCE_RULES[type(module_instance)](n, + module_instance, + self.symbol_dict, + self.constraints, counter) + else: + raise RuntimeError(f'No inference rule registered for class {type(module_instance)}!') + + elif n.op == 'call_method': + if n.target in _INFERENCE_RULES: + return _INFERENCE_RULES[n.target](n, self.symbol_dict, self.constraints, counter) + else: + raise RuntimeError(f'No inference rule registered for target {n.target}!') + + elif n.op == 'get_attr': + t = self.traced_params.get(n.target, None) + + if isinstance(t, torch.Tensor): + if len(t.shape) > 0: + res = [] + for d in t.shape: + res.append(d) + attr_type = TensorType(res) + output, counter = gen_tvar(counter) + self.symbol_dict[n] = output + return [BinConstraintT(output, attr_type, op_eq)], counter + else: + # scalar? + return [], counter + else: + return [], counter + + elif n.op == 'output': + return [], counter + + else: + raise NotImplementedError(f"Method {n.op} not yet implemented") diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/constraint_transformation.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/constraint_transformation.py new file mode 100644 index 0000000000000000000000000000000000000000..153a8407fc41133748a9f25175607220a340d31c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/constraint_transformation.py @@ -0,0 +1,1040 @@ +# mypy: ignore-errors +import copy +import itertools +from torch.fx.experimental.migrate_gradual_types.constraint_generator import BinConstraintT, MAX_TENSOR_RANK +from torch.fx.experimental.migrate_gradual_types.constraint import T, BinConstraintD, Conj, Constraint, DVar, TVar, \ + Transpose +from torch.fx.experimental.migrate_gradual_types.constraint import Disj, TGreatestUpperBound +from torch.fx.experimental.migrate_gradual_types.constraint import DGreatestUpperBound +from torch.fx.experimental.migrate_gradual_types.constraint import CalcConv, CalcMaxPool +from torch.fx.experimental.migrate_gradual_types.constraint import CalcProduct, CanReshape +from torch.fx.experimental.migrate_gradual_types.constraint import ApplyBroadcasting, Prod, F, GetItem, GetItemTensor, IndexSelect +from torch.fx.experimental.migrate_gradual_types.operation import op_eq, op_precision, op_leq, op_matching +from torch.fx.experimental.migrate_gradual_types.operation import op_consistency, op_neq +from torch.fx.experimental.migrate_gradual_types.operation import op_mul, op_add, op_sub, op_div, op_mod +from torch.fx.experimental.migrate_gradual_types.util import gen_tensor_dims, gen_nat_constraints, gen_dvar +from torch.fx.tensor_type import TensorType, Dyn +from typing import Callable, Dict, List + +_TRANSFORMATION_RULES: Dict[Constraint, Callable] = {} + + +def register_transformation_rule(call_target): + def register(fn): + if call_target in _TRANSFORMATION_RULES: + raise RuntimeError(f'Transformation rule already registered for {call_target}!') + _TRANSFORMATION_RULES[call_target] = fn + return fn + return register + + +def valid_index(index, dims): + """ + Given a list of dimensions, checks if an index is valid in the list + """ + try: + dims[index] + return T() + except IndexError: + return F() + + +@register_transformation_rule(Transpose) +def transform_transpose(constraint, counter): + """ + Similar to a sequence of two index-selects + """ + dims, counter = gen_tensor_dims(constraint.tensor_size, counter) + is_valid_index1 = valid_index(constraint.index1, dims) + is_valid_index2 = valid_index(constraint.index2, dims) + new_dims = copy.deepcopy(dims) + nat_constraints = gen_nat_constraints(dims) + + if is_valid_index1 == T() and is_valid_index2 == T(): + new_dims[constraint.index1] = dims[constraint.index2] + new_dims[constraint.index2] = dims[constraint.index1] + + transformed_constraint = Conj([BinConstraintT(constraint.input_var, TensorType(dims), op_eq), + *nat_constraints, + is_valid_index1, is_valid_index2, + BinConstraintT(constraint.output, TensorType(new_dims), op_eq)]) + return transformed_constraint, counter + + +@register_transformation_rule(IndexSelect) +def transform_index_select(constraint, counter): + """ + The constraints consider the given tensor size, checks if the index is valid + and if so, generates a constraint for replacing the input dimension + with the required dimension + """ + dims, counter = gen_tensor_dims(constraint.tensor_size, counter) + is_valid_index = valid_index(constraint.index, dims) + nat_constraints = gen_nat_constraints(dims) + + # if the index is valid then replace the input dimension with the new dimension + # otherwise the dimension will not be replaced and the clause will contain False + if is_valid_index == T(): + new_dims = copy.deepcopy(dims) + new_dims[constraint.index] = constraint.dim_replace + + transformed_constraint = Conj([BinConstraintT(constraint.input_var, TensorType(dims), op_eq), + *nat_constraints, + is_valid_index, + BinConstraintT(constraint.output, TensorType(new_dims), op_eq)]) + + # print(constraints) + return transformed_constraint, counter + + +@register_transformation_rule(GetItem) +def transform_get_item(constraint, counter): + """ + generate an equality of the form: + t = [a1, ..., an] + then generate constraints that check if the given index is valid + given this particular tensor size. + If the index is valid, generate a constraint to get the item + Note that we already handled the Dyn input case in the previous + step. + Args: + constraint: GetItem which assumes we are getting an item from a tensor (not Dyn) + counter: variable tracking + Returns: simplified constraints for GetItem + + """ + dims, counter = gen_tensor_dims(constraint.tensor_size, counter) + nat_constraints = gen_nat_constraints(dims) + + + is_valid_index = valid_index(constraint.index, dims) + + all_constraints = [BinConstraintT(constraint.input_var, TensorType(dims), op_eq), + *nat_constraints, + is_valid_index] + + # if the index is valid, we generate a constraint for getting an item + # otherwise this clause will have been UNSAT due to the wrong index + if is_valid_index == T(): + all_constraints.append(BinConstraintD(constraint.res, dims[constraint.index], op_eq)) + + return Conj(all_constraints), counter + +def valid_index_tensor(index, dims): + """ + if the slice instances exceed the length of the dimensions + then this is a type error so we return False + """ + slice_count = 0 + for s in index: + if isinstance(s, slice): + slice_count += 1 + if slice_count > len(dims): + return F() + else: + return T() + +@register_transformation_rule(GetItemTensor) +def transform_get_item_tensor(constraint, counter): + """ + When the index is a tuple, then the output will be a tensor + TODO: we have to check if this is the case for all HF models + + The cases we are covering here are a tuple with one of: + - slice with default argument + - None + + None appends 1 to the input tensor dimensions + so each occurrence of 'None' increases the rank by 1 + + slice with default arguments does not change the rank + """ + assert isinstance(constraint.index_tuple, tuple) + + + # generate a result tensor of the expected size + dims, counter = gen_tensor_dims(constraint.tensor_size, counter) + nat_constraints = gen_nat_constraints(dims) + + # generate a place-holder list of the right rank + # where "slice" does not contribute to the rank and "None" does + none_c = constraint.index_tuple.count(None) + resulting_tensor_dims = (none_c + len(dims)) * [None] + + dim_index = 0 + for i in range(len(constraint.index_tuple)): + + # append 1 to the right location of the resulting tensor + if constraint.index_tuple[i] is None: + resulting_tensor_dims[i] = 1 + + elif constraint.index_tuple[i] == slice(None, None, None): + pass + + else: + raise NotImplementedError('Method not yet implemented') + + # append the remaining dimensions to the right location + dim_index = 0 + for i in range(len(resulting_tensor_dims)): + if resulting_tensor_dims[i] is None: + resulting_tensor_dims[i] = dims[dim_index] + dim_index += 1 + + # check if the index is valid + is_valid_index = valid_index_tensor(constraint.index_tuple, dims) + + # check if the resulting tensor is within bounds + if len(resulting_tensor_dims) > 4: + return F(), counter + + else: + constraints = [BinConstraintT(constraint.input_var, TensorType(dims), op_eq), + BinConstraintT(constraint.res, TensorType(resulting_tensor_dims), op_eq), + *nat_constraints, + is_valid_index] + return Conj(constraints), counter + + +@register_transformation_rule(BinConstraintT) +def generate_binconstraint_t(constraint, counter): + """ + Transform binary constraints for tensors + """ + + # precision constraints + if constraint.op == op_precision: + if constraint.lhs == Dyn: + return T(), counter + elif isinstance(constraint.lhs, TensorType): + is_fully_static = all(d != Dyn for d in constraint.lhs.__args__) + if is_fully_static: + return BinConstraintT(constraint.lhs, constraint.rhs, op_eq), counter + else: + new_dims = [] + + for _ in range(len(constraint.lhs.__args__)): + dim, counter = gen_dvar(counter) + new_dims.append(dim) + + new_dim_constraints = [BinConstraintD(old_dim, new_dim, op_precision) for + new_dim, old_dim in zip(new_dims, constraint.lhs.__args__)] + \ + [BinConstraintT(constraint.rhs, TensorType(new_dims), op_eq)] + \ + [BinConstraintD(1, new_dim, op_leq) for + new_dim in new_dims] + return Conj(new_dim_constraints), counter + + # matching + elif constraint.op == op_matching: + assert isinstance(constraint.rhs, TensorType) + d1 = constraint.rhs.__args__[0] + d2 = constraint.rhs.__args__[1] + d3 = constraint.rhs.__args__[2] + d4 = constraint.rhs.__args__[3] + + conj = [BinConstraintT(constraint.lhs, Dyn, op_eq), + BinConstraintD(d1, Dyn, op_eq), + BinConstraintD(d2, Dyn, op_eq), + BinConstraintD(d3, Dyn, op_eq), + BinConstraintD(d4, Dyn, op_eq)] + return Disj([Conj(conj), + BinConstraintT(constraint.lhs, TensorType([d1, d2, d3, d4]), op_eq)]), counter + + elif constraint.op == op_consistency: + c_dyn = Disj([BinConstraintT(constraint.lhs, Dyn, op_eq), BinConstraintT(constraint.rhs, Dyn, op_eq)]) + [c_tensor_1, c_tensor_2, c_tensor_3, c_tensor_4], counter = gen_consistency_constraints(constraint, counter) + + return Disj([c_dyn, c_tensor_1, c_tensor_2, c_tensor_3, c_tensor_4]), counter + + elif constraint.op == op_leq: + assert isinstance(constraint.rhs, int) + disj = [BinConstraintT(constraint.lhs, Dyn, op_eq)] + for i in range(1, constraint.rhs + 1): + dims = [] + for j in range(1, i + 1): + dim_var, counter = gen_dvar(counter) + dims.append(dim_var) + disj.append(BinConstraintT(constraint.lhs, TensorType(dims), op_eq)) + return Disj(disj), counter + else: + return constraint, counter + + +@register_transformation_rule(BinConstraintD) +def generate_binconstraint_d(constraint, counter): + """ + Transform binary constraints for dimensions + """ + if constraint.op == op_precision: + if isinstance(constraint.lhs, int): + return BinConstraintD(constraint.lhs, constraint.rhs, op_eq), counter + elif constraint.lhs == Dyn: + return T(), counter + + elif constraint.op == op_consistency: + return Disj([BinConstraintD(constraint.lhs, constraint.rhs, op_eq), + BinConstraintD(constraint.rhs, Dyn, op_eq), BinConstraintD(constraint.lhs, Dyn, op_eq)]), counter + + else: + return constraint, counter + + +@register_transformation_rule(Conj) +def generate_conj(constraint, counter): + """ + Transform conjunctions + """ + new = [] + for c in constraint.conjucts: + new_c, counter = transform_constraint(c, counter) + new.append(new_c) + return Conj(new), counter + + +@register_transformation_rule(Disj) +def generate_disj(constraint, counter): + """ + Transform disjunctions + """ + new = [] + for c in constraint.disjuncts: + new_c, counter = transform_constraint(c, counter) + new.append(new_c) + return Disj(new), counter + + +@register_transformation_rule(TGreatestUpperBound) +def generate_gub(constraint, counter): + """ + Transform greatest upper bound for tensors. Results in equality and Greatest Upper Bound + on dimensions + """ + c1 = Conj([Disj([BinConstraintT(constraint.rhs1, Dyn, op_eq), + BinConstraintT(constraint.rhs2, Dyn, op_eq)]), BinConstraintT(constraint.res, Dyn, op_eq)]) + + [c2, c3, c4, c5], counter = gen_greatest_upper_bound(constraint, counter) + + return Disj([c1, c2, c3, c4, c5]), counter + + +@register_transformation_rule(DGreatestUpperBound) +def generate_d_gub(constraint, counter): + """ + Transform greatest upper bound for dimensions into equality constraints + """ + c1 = Conj([BinConstraintD(constraint.rhs1, Dyn, op_eq), BinConstraintD(constraint.res, constraint.rhs2, op_eq)]) + c2 = Conj([BinConstraintD(constraint.rhs2, Dyn, op_eq), BinConstraintD(constraint.res, constraint.rhs1, op_eq)]) + c3 = Conj([BinConstraintD(constraint.rhs2, constraint.rhs1, op_eq), BinConstraintD(constraint.res, constraint.rhs1, op_eq)]) + return Disj([c1, c2, c3]), counter + + +@register_transformation_rule(CalcConv) +def generate_calc_conv(constraint, counter): + d, counter = gen_tensor_dims(4, counter) + conv_result = TensorType([d[0], d[1], d[2], d[3]]) + + # the convolution result is a tensor of size 4 + c1 = BinConstraintT(constraint.conv_result, conv_result, op_eq) + + # the second dimension of the output is equal to the output channels + c2 = Conj([BinConstraintD(d[1], constraint.c_out, op_eq), BinConstraintD(d[1], Dyn, op_neq)]) + + # the input corresponds to the output in the first dimension of the convolution + c3 = BinConstraintD(constraint.matching_constraint[0], d[0], op_eq) + + c4, c5 = calc_last_two_dims(constraint, d) + + leq_constraints = Conj([BinConstraintD(0, d[0], op_leq), + BinConstraintD(0, d[1], op_leq), + BinConstraintD(0, d[2], op_leq), + BinConstraintD(0, d[3], op_leq)]) + + return Conj([c1, c2, c3, c4, c5, leq_constraints]), counter + + +@register_transformation_rule(CalcMaxPool) +def generate_calc_maxpool(constraint, counter): + """ + Transform maxpool constraints + """ + d, counter = gen_tensor_dims(4, counter) + maxpool_result = TensorType([d[0], d[1], d[2], d[3]]) + + # the maxpool result is a tensor of size 4 + c1 = BinConstraintT(constraint.maxpool_result, maxpool_result, op_eq) + + # the input corresponds to the output in the first and second dimension of maxpool + c2 = BinConstraintD(constraint.matching_constraint[1], d[1], op_eq) + c3 = BinConstraintD(constraint.matching_constraint[0], d[0], op_eq) + c4, c5 = calc_last_two_dims(constraint, d) + + leq_constraints = Conj([BinConstraintD(0, d[0], op_leq), + BinConstraintD(0, d[1], op_leq), + BinConstraintD(0, d[2], op_leq), + BinConstraintD(0, d[3], op_leq)]) + + return Conj([c1, c2, c3, c4, c5, leq_constraints]), counter + + +@register_transformation_rule(CalcProduct) +def generate_calc_product(constraint, counter): + """ + Transform flatten constraints + """ + start = constraint.start + end = constraint.end + dims = constraint.dims_to_flatten + flattened = constraint.flattened + n = len(constraint.dims_to_flatten) + + # this will be evaluated right here + boundary_check = (0 <= start and start < end and end <= n) + + c_boundary = T() if boundary_check else F() + + lhs = dims[0:start] + rhs = dims[end:] + mid = dims[start:end] + + all_possibilities = generate_all_int_dyn_dim_possibilities(mid) + + all_constraints = [] + + for p in all_possibilities: + p = list(p) + # this tells us there is a dynamic variable + contains_dyn = not(all(constraint.op == op_neq for constraint in p)) + if contains_dyn: + mid_var = [Dyn] + total_constraints = lhs + mid_var + rhs + if len(total_constraints) > 4: + all_constraints.append(F()) + else: + all_constraints.append(Conj([BinConstraintT(flattened, TensorType(lhs + mid_var + rhs), op_eq)] + p)) + else: + new_var, counter = gen_dvar(counter) + mid_eq_prod = Conj([BinConstraintD(new_var, Prod(mid), op_eq), BinConstraintD(new_var, Dyn, op_neq)]) + mid_var = [new_var] + total_constraints = lhs + mid_var + rhs + if len(total_constraints) > 4: + all_constraints.append(F()) + else: + all_constraints.append(Conj([BinConstraintT(flattened, TensorType(lhs + mid_var + rhs), op_eq), mid_eq_prod] + p)) + + return Conj([Disj(all_constraints), c_boundary]), counter + + +@register_transformation_rule(CanReshape) +def generate_reshape(constraint, counter): + """ + Transform reshape constraints + """ + d, counter = gen_tensor_dims(4, counter) + + d1 = d[0] + d2 = d[1] + d3 = d[2] + d4 = d[3] + + target = constraint.target.__args__ + + is_fully_static = all(d != Dyn for d in target) + + # dynamic tensor + c1_dyn = BinConstraintT(constraint.src, Dyn, op_eq) + c2_tensor1 = BinConstraintT(constraint.src, TensorType([d1]), op_eq) + c2_tensor2 = BinConstraintT(constraint.src, TensorType([d1, d2]), op_eq) + c2_tensor3 = BinConstraintT(constraint.src, TensorType([d1, d2, d3]), op_eq) + c2_tensor4 = BinConstraintT(constraint.src, TensorType([d1, d2, d3, d4]), op_eq) + + d1_eq_dyn = BinConstraintD(d1, Dyn, op_eq) + d1_neq_dyn = BinConstraintD(d1, Dyn, op_neq) + + d2_eq_dyn = BinConstraintD(d2, Dyn, op_eq) + d2_neq_dyn = BinConstraintD(d2, Dyn, op_neq) + + d3_eq_dyn = BinConstraintD(d3, Dyn, op_eq) + d3_neq_dyn = BinConstraintD(d3, Dyn, op_neq) + + d4_eq_dyn = BinConstraintD(d3, Dyn, op_eq) + d4_neq_dyn = BinConstraintD(d3, Dyn, op_neq) + + nat_d1 = BinConstraintD(0, d1, op_leq) + nat_d2 = BinConstraintD(0, d2, op_leq) + nat_d3 = BinConstraintD(0, d3, op_leq) + nat_d4 = BinConstraintD(0, d4, op_leq) + + if is_fully_static: + # size 1 tensor + c3_tensor1 = Disj([d1_eq_dyn, + (Conj([d1_neq_dyn, + BinConstraintD(d1, Prod(target), op_eq)]))]) + all_tensor_1 = Conj([c2_tensor1, c3_tensor1]) + + # size 2 tensor + all_tensor_2 = Conj([c2_tensor2, gen_all_reshape_possibilities([d1, d2], target)]) + + # size 3 tensor + all_tensor_3 = Conj([c2_tensor3, gen_all_reshape_possibilities([d1, d2, d3], target)]) + + # size 4 tensor + all_tensor_4 = Conj([c2_tensor4, gen_all_reshape_possibilities([d1, d2, d3, d4], target)]) + + return Conj([Disj([c1_dyn, all_tensor_1, all_tensor_2, all_tensor_3, all_tensor_4]), + nat_d1, nat_d2, nat_d3, nat_d4]), counter + + # then there must be exactly one occurrence of dyn + else: + new_target = [] + + for n in target: + if n != Dyn: + new_target.append(n) + + # tensor 1 + c3_tensor1 = Disj([d1_eq_dyn, + (Conj([d1_neq_dyn, + is_dim_div_by_target(new_target, d1)]))]) + all_tensor_1 = Conj([c2_tensor1, c3_tensor1]) + + # tensor 2 + c21 = Disj([d1_eq_dyn, d2_eq_dyn]) + c22 = Conj([d1_neq_dyn, d2_neq_dyn, is_dim_div_by_target(new_target, Prod([d1, d2]))]) + all_tensor_2 = Conj([c2_tensor2, Disj([c21, c22])]) + + # tensor 3 + c31 = Disj([d1_eq_dyn, d2_eq_dyn, d3_eq_dyn]) + c32 = Conj([d1_neq_dyn, d2_neq_dyn, d3_neq_dyn, is_dim_div_by_target(new_target, Prod([d1, d2, d3]))]) + all_tensor_3 = Conj([c2_tensor3, Disj([c31, c32])]) + + # tensor 4 + c41 = Disj([d1_eq_dyn, d2_eq_dyn, d3_eq_dyn, d4_eq_dyn]) + c42 = Conj([d1_neq_dyn, d2_neq_dyn, d3_neq_dyn, d4_neq_dyn, is_dim_div_by_target(new_target, Prod([d1, d2, d3, d4]))]) + all_tensor_4 = Conj([c2_tensor4, Disj([c41, c42])]) + + return Conj([Disj([c1_dyn, all_tensor_1, all_tensor_2, all_tensor_3, all_tensor_4]), + nat_d1, nat_d2, nat_d3, nat_d4]), counter + + +@register_transformation_rule(ApplyBroadcasting) +def generate_broadcasting(constraint, counter): + """ + Transform broadcasting constraints + """ + e11, e12 = constraint.res1, constraint.res2 + e1, e2 = constraint.input1, constraint.input2 + + e1_dyn = BinConstraintT(e1, Dyn, op_eq) + e2_dyn = BinConstraintT(e2, Dyn, op_eq) + + # Introduce dimensions + e1_equal_e11 = BinConstraintT(e1, e11, op_eq) + e2_equal_e12 = BinConstraintT(e2, e12, op_eq) + + # dyn possibility + e1_dyn_constraint = Conj([e1_dyn, e1_equal_e11, e2_equal_e12]) + e2_dyn_constraint = Conj([e2_dyn, e1_equal_e11, e2_equal_e12]) + + # tensor possibility + # generate dimensions to create tensors of size 1 + final_tensor_1_constraint, _, _, nat_dims_1, counter = \ + gen_broadcasting_constraints(e1, e2, e11, e12, 1, counter) + + # generate dimensions to create tensors of size 2 + final_tensor_2_constraint_no_padding, final_tensor_2_constraint_padding_arg1, \ + final_tensor_2_constraint_padding_arg2, nat_dims_2, counter = \ + gen_broadcasting_constraints(e1, e2, e11, e12, 2, counter) + + # generate dimensions to create tensors of size 3 + final_tensor_3_constraint_no_padding, final_tensor_3_constraint_padding_arg1, \ + final_tensor_3_constraint_padding_arg2, nat_dims_3, counter = \ + gen_broadcasting_constraints(e1, e2, e11, e12, 3, counter) + + # generate dimensions to create tensors of size 4 + final_tensor_4_constraint_no_padding, final_tensor_4_constraint_padding_arg1, \ + final_tensor_4_constraint_padding_arg2, nat_dims_4, counter = \ + gen_broadcasting_constraints(e1, e2, e11, e12, 4, counter) + + final_result = Disj([ + e1_dyn_constraint, + e2_dyn_constraint, + final_tensor_1_constraint, + final_tensor_2_constraint_no_padding, + final_tensor_2_constraint_padding_arg1, + final_tensor_2_constraint_padding_arg2, + final_tensor_3_constraint_no_padding, + final_tensor_3_constraint_padding_arg1, + final_tensor_3_constraint_padding_arg2, + final_tensor_4_constraint_no_padding, + final_tensor_4_constraint_padding_arg1, + final_tensor_4_constraint_padding_arg2 + ]) + + return Conj([final_result, *nat_dims_1, *nat_dims_2, *nat_dims_3, *nat_dims_4]), counter + + +def transform_constraint(constraint: Constraint, counter: int): + """ + Transforms a constraint into a simpler constraint. + Ex: precision and consistency are transformed to equality + Args: + constraint: constraint to be transformed + counter: for variable tracking + + Returns: Constraint + + """ + if type(constraint) in _TRANSFORMATION_RULES: + return _TRANSFORMATION_RULES[type(constraint)](constraint, counter) + + else: + return constraint, counter + + + + +def calc_last_two_dims(constraint, d: List[DVar]): + """ + Generates constraints for the last two dimensions of a convolution or a maxpool output + Args: + constraint: CalcConv or CalcMaxPool + d: The list of output dimensions + + Returns: Constraints for calculating the last two dimensions of the output + + """ + + assert isinstance(constraint, (CalcConv, CalcMaxPool)) + + b3 = constraint.matching_constraint[2] + b4 = constraint.matching_constraint[3] + + b3_dyn = Conj([BinConstraintD(d[2], Dyn, op_eq), BinConstraintD(b3, Dyn, op_eq)]) + b4_dyn = Conj([BinConstraintD(d[3], Dyn, op_eq), BinConstraintD(b4, Dyn, op_eq)]) + + d3_not_dyn = Conj([BinConstraintD(d[2], Dyn, op_neq), BinConstraintD(b3, Dyn, op_neq)]) + d4_not_dyn = Conj([BinConstraintD(d[3], Dyn, op_neq), BinConstraintD(b4, Dyn, op_neq)]) + + # transform parameters into tuples incase they are not already + padding = (constraint.padding, constraint.padding) \ + if isinstance(constraint.padding, int) else constraint.padding + kernel = (constraint.kernel, constraint.kernel) \ + if isinstance(constraint.kernel, int) else constraint.kernel + stride = (constraint.stride, constraint.stride) \ + if isinstance(constraint.stride, int) else constraint.stride + dilation = (constraint.dilation, constraint.dilation) \ + if isinstance(constraint.dilation, int) else constraint.dilation + + f1 = BinConstraintD(b3, BinConstraintD(2, padding[0], op_mul), op_add) + f2 = BinConstraintD(dilation[0], BinConstraintD(kernel[0], 1, op_sub), op_mul) + f3 = BinConstraintD(BinConstraintD(BinConstraintD(f1, f2, op_sub), 1, op_sub), stride[0], op_div) + f4 = BinConstraintD(f3, 1, op_add) + + c4 = Disj([b3_dyn, Conj([d3_not_dyn, BinConstraintD(d[2], f4, op_eq)])]) + + f11 = BinConstraintD(b4, BinConstraintD(2, padding[1], op_mul), op_add) + f22 = BinConstraintD(dilation[1], BinConstraintD(kernel[1], 1, op_sub), op_mul) + f33 = BinConstraintD(BinConstraintD(BinConstraintD(f11, f22, op_sub), 1, op_sub), stride[1], op_div) + f44 = BinConstraintD(f33, 1, op_add) + + c5 = Disj([b4_dyn, Conj([d4_not_dyn, BinConstraintD(d[3], f44, op_eq)])]) + + return c4, c5 + + +def generate_all_int_dyn_dim_possibilities(my_list: List[DVar]): + """ + Generate all possibilities of being equal or not equal to dyn for my_list + Args: + my_list: List of tensor dimensions + + Returns: A list of a list of constraints. Each list of constraints corresponds to + one possibility about the values of the dimension variables + """ + # generate all possibilities of being equal or not equal to dyn for my_list + eq_possibilities = [BinConstraintD(my_list[i], Dyn, op_eq) for i in range(len(my_list))] + neq_possibilities = [BinConstraintD(my_list[i], Dyn, op_neq) for i in range(len(my_list))] + d_possibilities = [] + + for i in zip(eq_possibilities, neq_possibilities): + d_possibilities.append(list(i)) + all_possibilities = list(itertools.product(*d_possibilities)) + return all_possibilities + + +def is_target_div_by_dim(target: List[int], dim: List[DVar]): + """ + Generate constraints to check if the target dimensions are divisible by the input dimensions + Args: + target: Target dimensions + dim: Input dimensions + + Returns: Constraints to check divisibility + + """ + return BinConstraintD(BinConstraintD(Prod(target), dim, op_mod), 0, op_eq) + + +def is_dim_div_by_target(target: List[int], dim: List[DVar]): + """ + Generate constraints to check if the input dimensions is divisible by the target dimensions + Args: + target: Target dimensions + dim: Input dimensions + + Returns: Constraints to check divisibility + + """ + return BinConstraintD(BinConstraintD(dim, Prod(target), op_mod), 0, op_eq) + + +def gen_all_reshape_possibilities(list_of_dims, target): + """ + Consider all possibilities what the input dimensions could be (number or dynamic) + Then generate the appropriate constraints using multiplication or mod depending on the possibility + The possibilities we consider here are the cross product of being equal to dyn or not equal to dyn + for the input. Target is fixed because at most one dimension could be dyn. + We have different cases for this. + + Args: + list_of_dims: The input list of dimensions + target: The tensor we want to reshape to + + Returns: A disjunction of transformed reshape constraints + + """ + all_possibilities = generate_all_int_dyn_dim_possibilities(list_of_dims) + + all_constraints = [] + + for p in all_possibilities: + to_multiply = [] + + p = list(p) + + for constraint in p: + assert isinstance(constraint, BinConstraintD) + if constraint.op == op_neq: + to_multiply.append(constraint.lhs) + + if not to_multiply: + all_constraints.append(Conj(p)) + + elif len(to_multiply) < len(list_of_dims): + all_constraints.append(Conj(p + [is_target_div_by_dim(target, Prod(to_multiply))])) + else: + all_constraints.append(Conj(p + [BinConstraintD(Prod(list_of_dims), + Prod(target), op_eq)])) + + return Disj(all_constraints) + + +def broadcast_dim(tensor_input1, tensor_input2, res1, res2, index, padding=False): + """ + Apply broadcasting to the 'index' dimension of tensor_input1. + Args: + tensor_input1: should represent [d1, ..., d_index, ...] where d_index = 1 + tensor_input2: represents the second input + res1: broadcasted result 1 + res2: broadcasted result 2 + index: the index to broadcast + padding: If padding was used, then tensor_input1[index] does not exist + + Returns: + + """ + if tensor_input1[index] is None: + assert padding + + + if not padding: + # then the inputs are the same length so they all have dimensions at "index" + return Conj([BinConstraintD(tensor_input1[index], 1, op_eq), + BinConstraintD(res1[index], res2[index], op_eq), + BinConstraintD(res2[index], tensor_input2[index], op_eq)]) + + else: + # we don't set the input dimension to 1, since it doesn't exist. + return Conj([BinConstraintD(res1[index], res2[index], op_eq), + BinConstraintD(res2[index], tensor_input2[index], op_eq)]) + + +def apply_padding(e1_var: TVar, + e11: BinConstraintT, + e2: BinConstraintT, + e12: BinConstraintT, + d2: List[DVar], + d11: List[DVar], + d12: List[DVar], + counter: int): + """ + We are considering the possibility where one input has less dimensions than + another input, so we apply padding to the broadcasted results + + Args: + e1_var: Variable representing the first input where padding will be + e11: constraint of the form e11 = Tensortype[d1, ..., dn] + e2: constraint of the form e2 = Tensortype[d1, ..., dn] + e12: constraint of the form e11 = Tensortype[d1, ..., dn] + d2: Tensor variables for the second input + d11: Tensor variables for the broadcasted first input + d12: Tensor variables for the broadcasted second input + counter: variable tracking + + Returns: A new constraint whose goal is to apply padding to the broadcasted result + + """ + + res = [] + + # pad the shorter input with None so we can pass it to the broadcasting helper function + for i in range(1, len(d2)): + + d1, counter = gen_tensor_dims(i, counter) + + nat_constraints = gen_nat_constraints(d1 + d2 + d11 + d12) + + e1 = BinConstraintT(e1_var, TensorType(d1), op_eq) + + simulate_padding = [None] * (len(d2) - i) + + assert len(simulate_padding + d1) == len(d2) + + broadcast_padding = [] + + # for every padding size, we also consider broadcasting + for j in range(len(d2) - i): + broadcast_padding.append(broadcast_dim(simulate_padding, d2, d11, d12, j, True)) + + # we consider the possibilities for broadcasting for every dimension. Since we already + # padded d1, we do not consider it while broadcasting + all_broadcasting_possibilities = generate_all_broadcasting_possibilities_no_padding(d1, + d2[(len(d2) - i):], + d11[(len(d2) - i):], + d12[(len(d2) - i):]) + # combine all constraints into a conjunction + c = Conj([e1, e11, e2, e12, + *broadcast_padding, + all_broadcasting_possibilities, + *nat_constraints + ]) + res.append(c) + + return Disj(res), counter + + +def no_broadcast_dim_with_index(d1: List[DVar], + d2: List[DVar], + d3: List[DVar], + d4: List[DVar], + i: int): + """ + Args: + d1: input 1 + d2: input 2 + d3: simulated broadcasting for input 1 + d4: simulated broadcasting for input 2 + i: the rank of the resulting tensor addition + + Returns: Constraints for when no broadcasting occurs + """ + return Conj([ + Disj([ + Conj([BinConstraintD(d1[i], 1, op_eq), + BinConstraintD(d2[i], 1, op_eq)]), + + Conj([BinConstraintD(d1[i], 1, op_neq), + BinConstraintD(d2[i], 1, op_neq)])]), + + BinConstraintD(d1[i], d3[i], op_eq), + BinConstraintD(d2[i], d4[i], op_eq)]) + + + +def gen_lists_of_dims(num_tensors: int, dim_size: int, counter: int): + """ + Generate lists of DVar to represent tensor dimensions + Args: + num_tensors: the required number of tensors + dim_size: the number of dimensions for each tensor + counter: variable tracking + + Returns: A list of a list of tensor dimensions + + """ + res = [] + + for _ in range(num_tensors): + dims, counter = gen_tensor_dims(dim_size, counter) + res.append(dims) + + return res, counter + + +def create_equality_constraints_for_broadcasting(e1: TVar, + e2: TVar, + e11: TVar, + e12: TVar, + d1: List[DVar], + d2: List[DVar], + d11: List[DVar], + d12: List[DVar]): + """ + Create equality constraints for when no broadcasting occurs + Args: + e1: Input 1 + e2: Input 2 + e11: Broadcasted input 1 + e12: Broadcasted input 2 + d1: Variables that store dimensions for e1 + d2: Variables that store dimensions for e2 + d11: Variables that store dimensions for e11 + d12: Variables that store dimensions for e22 + + Returns: Four equality constraints + + """ + + e1_tensor = BinConstraintT(e1, TensorType(d1), op_eq) + e11_tensor = BinConstraintT(e11, TensorType(d11), op_eq) + e2_tensor = BinConstraintT(e2, TensorType(d2), op_eq) + e12_tensor = BinConstraintT(e12, TensorType(d12), op_eq) + return [e1_tensor, e11_tensor, e2_tensor, e12_tensor] + + +def gen_consistency_constraints(constraint: Constraint, counter: int): + """ + Args: + constraint: Consistency constraint on tensors + counter: for variable tracking + + Returns: Equality and consistency constraints on dimensions + + """ + + all_constraints = [] + + for i in range(1, MAX_TENSOR_RANK + 1): + new_dims_rhs_1, counter = gen_tensor_dims(i, counter) + new_dims_rhs_2, counter = gen_tensor_dims(i, counter) + + nat_constraints = gen_nat_constraints(new_dims_rhs_1 + new_dims_rhs_2) + + c_tensor_i = Conj([BinConstraintT(constraint.lhs, TensorType(new_dims_rhs_1), op_eq), + BinConstraintT(constraint.rhs, TensorType(new_dims_rhs_2), op_eq)] + + [BinConstraintD(d1, d2, op_consistency) for + d1, d2 in zip(new_dims_rhs_1, new_dims_rhs_2)] + nat_constraints) + + all_constraints.append(c_tensor_i) + + return all_constraints, counter + + +def gen_greatest_upper_bound(constraint: TGreatestUpperBound, counter: int): + """ + Args: + constraint: Greatest upper bound on tensors + counter: variable tracking + + Returns: A set of equality constraints and DGreatestUpperBound constraints + + """ + + all_constraints = [] + + for i in range(1, MAX_TENSOR_RANK + 1): + c = [] + dims1, counter = gen_tensor_dims(i, counter) + c1tensor = TensorType(dims1) + + dims2, counter = gen_tensor_dims(i, counter) + c2tensor = TensorType(dims2) + + dims3, counter = gen_tensor_dims(i, counter) + c3tensor = TensorType(dims3) + + c += [BinConstraintT(constraint.rhs1, c1tensor, op_eq), + BinConstraintT(constraint.rhs2, c2tensor, op_eq), + BinConstraintT(constraint.res, c3tensor, op_eq)] + \ + gen_nat_constraints(dims1 + dims2 + dims3) + + assert len(c3tensor.__args__) == len(c1tensor.__args__) == len(c2tensor.__args__) + for i in range(len(c3tensor.__args__)): + c.append(DGreatestUpperBound(c3tensor.__args__[i], + c1tensor.__args__[i], + c2tensor.__args__[i])) + + all_constraints.append(Conj(c)) + return all_constraints, counter + + +def generate_all_broadcasting_possibilities_no_padding(d1: List[DVar], d2: List[DVar], d11: List[DVar], d12: List[DVar]): + """ + Generate broadcasting constraints assuming no padding. Broadcasting can happen at any dimension. + We look at all combinations for all dimensions in d1 and d2 + Args: + d1: input1 dimensions + d2: input2 dimensions + d11: broadcasted input1 dimensions + d12: broadcasted input2 dimensions + + Returns: broadcasting constraints relating the input dimensions to the broadcasted dimensions + + """ + + size = len(d1) + + res2 = [] + + for i in range(size): + t1 = broadcast_dim(d1, d2, d11, d12, i) + t2 = broadcast_dim(d2, d1, d12, d11, i) + t3 = no_broadcast_dim_with_index(d1, d2, d11, d12, i) + + res2.append(Disj([t1, t2, t3])) + + return Conj(res2) + + +def gen_broadcasting_constraints(e1: TVar, e2: TVar, e11: TVar, e12: TVar, i: int, counter: int): + """ + Simulates broadcasting on e1 and e2 and returns the results + respectively in e11 and e12. Because of gradual types, + e1 and e2 may not be equal. Similarly, e11 and e12 may not + be equal. e11 and e12 should be guaranteed to be consistent + as they represent the shapes of the tensors to be added after + broadcasting. + Args: + e1: TVar representing the type of input 1 + e2: TVar representing the type of input 2 + e11: TVar representing the representing broadcasted input 1 + e12: TVar representing the representing broadcasted input 2 + i: The rank of the resulting type of addition + counter: for variable tracking + + Returns: Simplified broadcasting constraints + + """ + dims, counter = gen_lists_of_dims(4, i, counter) + [d1, d2, d3, d4] = dims + nat_dims_i = gen_nat_constraints(list(itertools.chain(*dims))) + + initialize_tensors_constraints = create_equality_constraints_for_broadcasting(e1, e2, e11, e12, + d1, d2, d3, d4) + + [e1_tensor, e11_tensor, e2_tensor, e12_tensor] = initialize_tensors_constraints + + # without padding, broadcast all possibilities for tensors of size i + final_tensor_constraint_no_padding = Conj([*initialize_tensors_constraints, + generate_all_broadcasting_possibilities_no_padding(d1, d2, d3, d4)]) + + # with padding, broadcast all possibilities for tensors of size i + final_tensor_constraint_padding_arg1, counter = \ + apply_padding(e1, e11_tensor, e2_tensor, e12_tensor, d2, d3, d4, counter) + + final_tensor_constraint_padding_arg2, counter = \ + apply_padding(e2, e12_tensor, e1_tensor, e11_tensor, d1, d4, d3, counter) + + return final_tensor_constraint_no_padding, \ + final_tensor_constraint_padding_arg1, \ + final_tensor_constraint_padding_arg2, nat_dims_i, counter diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/operation.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/operation.py new file mode 100644 index 0000000000000000000000000000000000000000..ec2cb91bbcc1790c419fa603b36cf6bc7afddc18 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/operation.py @@ -0,0 +1,14 @@ +op_add = '+' +op_sub = '-' +op_mul = '*' +op_div = '/' +op_eq = '=' +op_neq = '!=' +op_imp = '=>' +op_matching = '⊳' +op_consistency = '~' +op_precision = '⊑' +op_leq = '≤' +op_lt = '<' +op_gt = '>' +op_mod = '%' diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/transform_to_z3.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/transform_to_z3.py new file mode 100644 index 0000000000000000000000000000000000000000..c5c6436c2bc038b5a6c14e7f841f764b1b8b29d2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/transform_to_z3.py @@ -0,0 +1,348 @@ +from torch.fx.experimental.migrate_gradual_types.constraint import Conj, Disj, T, F, BinConstraintT, BVar, is_bool_expr +from torch.fx.experimental.migrate_gradual_types.constraint import BinConstraintD, TVar, DVar +from torch.fx.experimental.migrate_gradual_types.constraint import Prod, is_algebraic_expression, is_dim +from torch.fx.experimental.migrate_gradual_types.constraint_generator import ConstraintGenerator +from torch.fx.experimental.migrate_gradual_types.constraint_transformation import transform_constraint +from torch.fx.experimental.migrate_gradual_types.operation import op_add, op_eq, op_neq, op_gt, op_lt +from torch.fx.experimental.migrate_gradual_types.operation import op_leq, op_sub, op_div, op_mul, op_mod +from torch.fx.tensor_type import TensorType, Dyn + +try: + import z3 # type: ignore[import] + from torch.fx.experimental.migrate_gradual_types.z3_types import tensor_type, z3_dyn, D + HAS_Z3 = True + + def transform_to_z3(constraint, counter, dimension_dict): + if isinstance(constraint, Conj): + conjuncts = [] + for c in constraint.conjucts: + new_c, counter = transform_to_z3(c, counter, dimension_dict) + conjuncts.append(new_c) + return z3.And(conjuncts), counter + + elif isinstance(constraint, Disj): + disjuncts = [] + for c in constraint.disjuncts: + new_c, counter = transform_to_z3(c, counter, dimension_dict) + disjuncts.append(new_c) + return z3.Or(disjuncts), counter + + elif isinstance(constraint, T): + return True, counter + + elif isinstance(constraint, F): + return False, counter + + elif isinstance(constraint, BinConstraintT): + if constraint.op == op_eq: + lhs, counter = transform_var(constraint.lhs, counter, dimension_dict) + rhs, counter = transform_var(constraint.rhs, counter, dimension_dict) + return (lhs == rhs), counter + + else: + raise NotImplementedError('Method not yet implemented') + + elif isinstance(constraint, BinConstraintD): + if constraint.op == op_eq: + + if isinstance(constraint.lhs, BVar) and is_bool_expr(constraint.rhs): + transformed_rhs, counter = transform_to_z3(constraint.rhs, counter, dimension_dict) + transformed_lhs = z3.Bool(constraint.lhs.c) + return transformed_lhs == transformed_rhs, counter + + elif is_dim(constraint.lhs) and is_dim(constraint.rhs): + # with dimension transformations we consider the encoding + lhs, counter = transform_dimension(constraint.lhs, counter, dimension_dict) + rhs, counter = transform_dimension(constraint.rhs, counter, dimension_dict) + return lhs == rhs, counter + + else: + # then we have an algebraic expression which means that we disregard the + # first element of the encoding + lhs, counter = transform_algebraic_expression(constraint.lhs, counter, dimension_dict) + rhs, counter = transform_algebraic_expression(constraint.rhs, counter, dimension_dict) + return lhs == rhs, counter + + # The assumption here is that the LHS and RHS must be dimensions + elif constraint.op == op_neq: + assert is_dim(constraint.lhs) + assert is_dim(constraint.rhs) + lhs, counter = transform_dimension(constraint.lhs, counter, dimension_dict) + rhs, counter = transform_dimension(constraint.rhs, counter, dimension_dict) + if constraint.rhs == Dyn or constraint.lhs == Dyn: + if constraint.rhs == Dyn: + return lhs.arg(0) == 1, counter + elif constraint.lhs == Dyn: + return rhs.arg(0) == 1, counter + + # if one of the instances is a number + elif isinstance(constraint.lhs, int) or isinstance(constraint.rhs, int): + if isinstance(constraint.lhs, int): + return z3.Or([rhs.arg(0) == 0, z3.And([rhs.arg(0) == 1, lhs.arg(1) != rhs.arg(1)])]), counter + + elif isinstance(constraint.rhs, int): + return z3.Or([lhs.arg(0) == 0, z3.And([lhs.arg(0) == 1, lhs.arg(1) != rhs.arg(1)])]), counter + + else: + return z3.Or([z3.And([lhs.arg(0) == 0, rhs.arg(0) != 0]), + z3.And([lhs.arg(0) != 0, rhs.arg(0) == 0]), + z3.And([lhs.arg(0) != 0, rhs.arg(0) != 0, lhs.arg(1) != rhs.arg(1)])]), counter + + + elif constraint.op == op_leq: + # if the dimensions are not dyn, this will come into effect + # there would have been another constraint specifying if a given dimension + # is dyn or not + assert is_dim(constraint.lhs) and is_dim(constraint.rhs) + lhs, counter = transform_algebraic_expression(constraint.lhs, counter, dimension_dict) + rhs, counter = transform_algebraic_expression(constraint.rhs, counter, dimension_dict) + return lhs <= rhs, counter + + elif constraint.op == op_gt: + assert is_dim(constraint.lhs) and is_dim(constraint.rhs) + lhs, counter = transform_algebraic_expression(constraint.lhs, counter, dimension_dict) + rhs, counter = transform_algebraic_expression(constraint.rhs, counter, dimension_dict) + return lhs > rhs, counter + + elif constraint.op == op_lt: + assert is_dim(constraint.lhs) and is_dim(constraint.rhs) + lhs, counter = transform_algebraic_expression(constraint.lhs, counter, dimension_dict) + rhs, counter = transform_algebraic_expression(constraint.rhs, counter, dimension_dict) + return lhs < rhs, counter + + else: + raise NotImplementedError('operation not yet implemented') + + else: + raise NotImplementedError('Operation not yet implemented') + + + def transform_var(tensor, counter, dimension_dict): + """ + Transforms tensor variables to a format understood by z3 + Args: + tensor: Tensor variable or a tensor type potentially with variable dimensions + Returns: Transformed variable to a z3 format + + """ + if isinstance(tensor, TensorType): + res = [] + for t in tensor.__args__: + transformed, counter = transform_dimension(t, counter, dimension_dict) + res.append(transformed) + + assert len(res) <= 4 + if len(tensor.__args__) == 1: + return tensor_type.tensor1(res[0]), counter + elif len(tensor.__args__) == 2: + return tensor_type.tensor2(res[0], res[1]), counter + elif len(tensor.__args__) == 3: + return tensor_type.tensor3(res[0], res[1], res[2]), counter + elif len(tensor.__args__) == 4: + return tensor_type.tensor4(res[0], res[1], res[2], res[3]), counter + + elif tensor == Dyn: + return z3_dyn, counter + + elif isinstance(tensor, TVar): + return z3.Const(tensor.tvar, tensor_type), counter + + def transform_dimension(dimension, counter, dimension_dict): + """ + Takes a dimension variable or a number and transforms it to a tuple + according to our scheme + Args: + dimension: The dimension to be transformed + counter: variable tracking + + Returns: tuple and the current counter + + """ + if dimension == Dyn: + counter += 1 + return D(0, z3.Int(counter)), counter + elif isinstance(dimension, int): + return D(1, dimension), counter + elif isinstance(dimension, DVar): + if dimension.c in dimension_dict: + return D(z3.Int(dimension_dict[dimension.c]), z3.Int(dimension.c)), counter + else: + counter += 1 + dimension_dict[dimension.c] = counter + return D(z3.Int(counter), z3.Int(dimension.c)), counter + + + def transform_algebraic_expression(expr, counter, dimension_dict): + """ + Transforms an algebraic expression to z3 format + Args: + expr: An expression is either a dimension variable or an algebraic-expression + + + Returns: the transformed expression + + """ + assert is_algebraic_expression(expr) or is_dim(expr) + + if is_dim(expr): + transformed, counter = transform_dimension(expr, counter, dimension_dict) + return transformed.arg(1), counter + + elif isinstance(expr, Prod): + + dims = [] + for dim in expr.products: + assert is_dim(dim) + d, counter = transform_dimension(dim, counter, dimension_dict) + dims.append(d.arg(1)) + return z3.Product(dims), counter + + elif is_algebraic_expression(expr): + + lhs, counter = transform_algebraic_expression(expr.lhs, counter, dimension_dict) + rhs, counter = transform_algebraic_expression(expr.rhs, counter, dimension_dict) + + if expr.op == op_sub: + c = lhs - rhs + + elif expr.op == op_add: + c = lhs + rhs + + elif expr.op == op_div: + c = lhs / rhs + + elif expr.op == op_mul: + c = lhs * rhs + + elif expr.op == op_mod: + c = lhs % rhs + + else: + raise NotImplementedError('operation not yet implemented') + + return c, counter + + else: + raise RuntimeError + + + def transform_all_constraints(traced, counter=0): + """ + Given a trace, generates constraints and transforms them to z3 format + + """ + dimension_dict = {} # type: ignore[var-annotated] + + generator = ConstraintGenerator(traced) + new_constraints, counter = generator.generate_constraints(counter) + + # print(new_constraints.conjucts[0]) + # print(*new_constraints.conjucts, sep='\n') + + # transform precision, matching, consistency till obtaining a fixed point + new_constraints, counter = iterate_till_fixed_point(new_constraints, counter) + # print(new_constraints) + # print(new_constraints.conjucts) + # new_constraints.conjucts = new_constraints.conjucts[:-1] + # print(*new_constraints.conjucts, sep='\n') + + transformed, counter = transform_to_z3(new_constraints, counter, dimension_dict) + # print(transformed) + return transformed + + def iterate_till_fixed_point(constraints, counter): + """ + Transform constraints till reaching a fixed point + """ + old_c = None + while old_c != constraints: + old_c = constraints + constraints, counter = transform_constraint(constraints, counter) + return constraints, counter + + def transform_all_constraints_trace_time(tracer_root, graph, node, counter=0): + """ + Takes a node and a graph and generates two sets of constraints. + One set constraints the node's constraints and another set + constraints the negation of the node's constraints + Args: + tracer_root: the root for getting the module instances + graph: the graph so far in the tracing process + node: node that represents a conditional + counter: variable tracking + + Returns: Two sets of constraints. One with a conjunction with the + the conditional constraint and the other with a conjunction with + its negation. + + """ + dimension_dict = {} # type: ignore[var-annotated] + + generator = ConstraintGenerator(tracer_root, graph) + new_constraints, counter = generator.generate_constraints(counter) + + condition_constraint = new_constraints.conjucts[-1] + + # we know the constraint is a conjunction where the last constraint is about the conditional + # so remove the last constraint + new_constraints.conjucts = new_constraints.conjucts[:-1] + + # transform precision, matching, consistency till obtaining a fixed point + new_constraints, counter = iterate_till_fixed_point(new_constraints, counter) + + + # since the function returns a list of one element, we get the first element + # we are only interested in the RHS in this case because the LHS just stores + # the result + + # we make sure the constraint is of the form: + # c = b where b is a boolean expression + # and we consider b (constraint.rhs) for transformation + assert isinstance(condition_constraint.lhs, BVar) + assert is_bool_expr(condition_constraint.rhs) + condition_constraint_rhs = condition_constraint.rhs + + # transform the condition constraint + condition_constraint_rhs, counter = iterate_till_fixed_point(condition_constraint_rhs, counter) + + transformed, counter = transform_to_z3(new_constraints, counter, dimension_dict) + + transformed_condition_constraint, counter = transform_to_z3(condition_constraint_rhs, counter, dimension_dict) + + negation_transformed_condition_constraint = z3.Not(transformed_condition_constraint) + + return z3.And([transformed, transformed_condition_constraint]),\ + z3.And([transformed, negation_transformed_condition_constraint]) + + + def evaluate_conditional_with_constraints(tracer_root, graph, node, counter=0, user_constraints=None): + """ + Given an IR and a node representing a conditional, evaluate the conditional + and its negation + Args: + tracer_root: Tracer root for module instances + node: The node to be evaluated + + Returns: the results of evaluating the condition and the negation with + the rest of the constraints + + """ + + transformed_positive, transformed_negative = \ + transform_all_constraints_trace_time(tracer_root, graph, node, counter) + + s = z3.Solver() + s.add(transformed_positive) + if user_constraints is not None: + s.add(user_constraints) + condition = s.check() + + s = z3.Solver() + s.add(transformed_negative) + if user_constraints is not None: + s.add(user_constraints) + negation = s.check() + return condition, negation + +except ImportError: + HAS_Z3 = False diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/util.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/util.py new file mode 100644 index 0000000000000000000000000000000000000000..a43d8f3ebbe060d8c7659b65a2dd924e34d2ce3b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/util.py @@ -0,0 +1,52 @@ +from torch.fx.experimental.migrate_gradual_types.constraint import TVar, DVar, BinConstraintD, \ + BVar +from torch.fx.experimental.migrate_gradual_types.operation import op_leq + + +def gen_tvar(curr): + """ + Generate a tensor variable + :param curr: The current counter + :return: a tensor variable and the updated counter + """ + curr += 1 + return TVar(curr), curr + + +def gen_dvar(curr): + """ + Generate a dimension variable + :param curr: the current counter + :return: a dimension variable and an updated counter + """ + curr += 1 + return DVar(curr), curr + +def gen_bvar(curr): + """ + Generate a boolean variable + :param curr: the current counter + :return: a boolean variable and an updated counter + """ + curr += 1 + return BVar(curr), curr + +def gen_tensor_dims(n, curr): + """ + Generate a list of tensor dimensions + :param n: the number of dimensions + :param curr: the current counter + :return: a list of dimension variables and an updated counter + """ + dims = [] + for _ in range(n): + dvar, curr = gen_dvar(curr) + dims.append(dvar) + return dims, curr + + +def gen_nat_constraints(list_of_dims): + """ + Generate natural number constraints for dimensions + """ + return [BinConstraintD(0, d, op_leq) for d in list_of_dims] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/z3_types.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/z3_types.py new file mode 100644 index 0000000000000000000000000000000000000000..897a79d5697573a51f5886d5e9965a98e2c4cf6a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/z3_types.py @@ -0,0 +1,29 @@ +try: + import z3 # type: ignore[import] + HAS_Z3 = True + # dynamic type + dyn = z3.DeclareSort('Dyn') + dyn_type = z3.Const('dyn', dyn) + + # dimension + dim = z3.Datatype('dim') + dim.declare('dim', ('0', z3.IntSort()), ('1', z3.IntSort())) + dim = dim.create() + + # tensors + tensor_type = z3.Datatype('TensorType') + tensor_type.declare('Dyn', ('dyn', dyn)) + tensor_type.declare('tensor1', ('0', dim)) + tensor_type.declare('tensor2', ('0', dim), ('1', dim)) + tensor_type.declare('tensor3', ('0', dim), ('1', dim), ('2', dim)) + tensor_type.declare('tensor4', ('0', dim), ('1', dim), ('2', dim), ('3', dim)) + tensor_type = tensor_type.create() + + # create dimension + D = dim.dim + + z3_dyn = tensor_type.Dyn(dyn_type) + + +except ImportError: + HAS_Z3 = False diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/normalize.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/normalize.py new file mode 100644 index 0000000000000000000000000000000000000000..06bc2309975caf6197bbe6ff0c3c4cffeff7ee51 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/normalize.py @@ -0,0 +1,162 @@ +import operator +from typing import Any, Callable, Dict, Tuple, Optional + +import torch +import torch.fx +import torch.fx as fx +from torch.fx import Transformer, Proxy +from torch.fx.node import Argument, Target, Node, map_aggregate +from torch.fx.operator_schemas import ( + normalize_module, + normalize_function, + create_type_hint, +) + +from .schema_type_annotation import AnnotateTypesWithSchema + + +class NormalizeArgs(Transformer): + """ + Normalize arguments to Python targets. This means that + `args/kwargs` will be matched up to the module/functional's + signature and rewritten to exclusively kwargs in positional order + if `normalize_to_only_use_kwargs` is true. Also populates default + values. Does not support positional-only parameters or varargs + parameters (*args, **kwargs). + + If the nodes have 'type' metadata, it will use it to disambiguate + overloads. Otherwise, it will throw an error. + + Example usage: + m = torchvision.models.resnet18() + traced = torch.fx.symbolic_trace(m) + traced = NormalizeArgs(traced).transform() + """ + + def __init__( + self, module: torch.fx.GraphModule, normalize_to_only_use_kwargs: bool = True + ): + super().__init__(module) + self.node_map: Dict[Proxy, Node] = {} + self.normalize_to_only_use_kwargs = normalize_to_only_use_kwargs + + def run_node(self, n: Node) -> Any: + args, kwargs = self.fetch_args_kwargs_from_env(n) + + def get_type(arg): + if isinstance(arg, fx.Node): + return n.meta["type"] if "type" in n.meta else None + return type(arg) + + arg_types = map_aggregate(n.args, get_type) + assert isinstance(arg_types, tuple) + arg_types = tuple([create_type_hint(i) for i in arg_types]) + kwarg_types = {k: get_type(v) for k, v in kwargs.items()} + if n.op == "call_function": + out = self.call_function(n.target, args, kwargs, arg_types, kwarg_types) + else: + out = super().run_node(n) + if n.op != "output": + self.node_map[out] = n + out.node.meta = n.meta + out.node.type = n.type + return out + + def call_function( + self, + target: Target, + args: Tuple[Argument, ...], + kwargs: Dict[str, Any], + arg_types: Optional[Tuple[Any, ...]] = None, + kwarg_types: Optional[Dict[str, Any]] = None, + ): + assert callable(target) + new_args_and_kwargs = normalize_function( + target, + args, # type: ignore[arg-type] + kwargs, + arg_types, # type: ignore[arg-type] + kwarg_types, + self.normalize_to_only_use_kwargs, + ) + if new_args_and_kwargs: + new_args, new_kwargs = new_args_and_kwargs + return self.tracer.create_proxy( + "call_function", target, new_args, new_kwargs + ) + else: + return super().call_function(target, args, kwargs) + + def call_module( + self, target: Target, args: Tuple[Argument, ...], kwargs: Dict[str, Any] + ): + assert isinstance(target, str) + new_args_and_kwargs = normalize_module( + self.module, + target, + args, # type: ignore[arg-type] + kwargs, + self.normalize_to_only_use_kwargs, + ) + if new_args_and_kwargs: + new_args, new_kwargs = new_args_and_kwargs + return super().call_module(target, new_args, new_kwargs) + else: + return super().call_module(target, args, kwargs) + + +class NormalizeOperators(AnnotateTypesWithSchema): + """ + Normalize callsites that are different ways of "spelling" the same + invocation into a single, canonical call. Currently supports: + + 1. Normalize operators (e.g. operator.add) to the `torch` ops they + ultimately invoke (e.g. torch.add) when it is possible to statically + reason that + + Example usage: + + m = torchvision.models.resnet18() + + traced = torch.fx.symbolic_trace(m) + + traced = NormalizeOperators(traced).transform() + """ + + binary_magic_method_remap: Dict[ + Callable[[Any, Any], Any], Callable[[Any, Any], Any] + ] = { + torch.add: operator.add, + torch.mul: operator.mul, + torch.sub: operator.sub, + torch.div: operator.truediv, + torch.floor_divide: operator.floordiv, + torch.remainder: operator.mod, + torch.eq: operator.eq, + torch.ne: operator.ne, + torch.lt: operator.lt, + torch.le: operator.le, + torch.gt: operator.gt, + torch.ge: operator.ge, + } + + def call_function( + self, target: Target, args: Tuple[Argument, ...], kwargs: Dict[str, Any] + ): + # Normalize operators according to the magic methods implemented on tensors here: + # https://github.com/pytorch/pytorch/blob/28c5d90b679c6b38bf4183ec99f16d933c2f1bcd/tools/autograd/templates/python_variable_methods.cpp#L1137 # noqa: B950 + + assert callable(target) + + if target in self.binary_magic_method_remap: + if len(args) != 2: + return super().call_function(target, args, kwargs) + lhs, rhs = args + + return super().call_function( + target=self.binary_magic_method_remap[target], + args=(lhs, rhs), + kwargs={}, + ) + + return super().call_function(target, args, kwargs) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/optimization.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/optimization.py new file mode 100644 index 0000000000000000000000000000000000000000..fc7cbbb22faa391a8068ea6a02619a6ac14395c6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/optimization.py @@ -0,0 +1,405 @@ +import torch.fx as fx +from torch.fx.node import Argument, Target +from torch.nn.utils.fusion import fuse_conv_bn_eval +from typing import Type, Dict, Any, Tuple, Iterable, Optional, List, cast +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.fx.passes.shape_prop import ShapeProp +import copy +from collections import defaultdict +import torch.utils.mkldnn as th_mkldnn +import operator +import time +import logging +from enum import Enum + +def _parent_name(target : str) -> Tuple[str, str]: + """ + Splits a qualname into parent path and last atom. + For example, `foo.bar.baz` -> (`foo.bar`, `baz`) + """ + *parent, name = target.rsplit('.', 1) + return parent[0] if parent else '', name + +# Works for length 2 patterns with 2 modules +def matches_module_pattern(pattern: Iterable[Type], node: fx.Node, modules: Dict[str, Any]): + if len(node.args) == 0: + return False + nodes: Tuple[Any, fx.Node] = (node.args[0], node) + for expected_type, current_node in zip(pattern, nodes): + if not isinstance(current_node, fx.Node): + return False + if current_node.op != 'call_module': + return False + if not isinstance(current_node.target, str): + return False + if current_node.target not in modules: + return False + if type(modules[current_node.target]) is not expected_type: + return False + return True + + +def replace_node_module(node: fx.Node, modules: Dict[str, Any], new_module: torch.nn.Module): + assert(isinstance(node.target, str)) + parent_name, name = _parent_name(node.target) + modules[node.target] = new_module + setattr(modules[parent_name], name, new_module) + +def fuse(model: torch.nn.Module, inplace=False) -> torch.nn.Module: + """ + Fuses convolution/BN layers for inference purposes. Will deepcopy your + model by default, but can modify the model inplace as well. + """ + patterns = [(nn.Conv1d, nn.BatchNorm1d), + (nn.Conv2d, nn.BatchNorm2d), + (nn.Conv3d, nn.BatchNorm3d)] + if not inplace: + model = copy.deepcopy(model) + fx_model = fx.symbolic_trace(model) + modules = dict(fx_model.named_modules()) + new_graph = copy.deepcopy(fx_model.graph) + + for pattern in patterns: + for node in new_graph.nodes: + if matches_module_pattern(pattern, node, modules): + if len(node.args[0].users) > 1: # Output of conv is used by other nodes + continue + conv = modules[node.args[0].target] + bn = modules[node.target] + if not bn.track_running_stats: + continue + fused_conv = fuse_conv_bn_eval(conv, bn) + replace_node_module(node.args[0], modules, fused_conv) + node.replace_all_uses_with(node.args[0]) + new_graph.erase_node(node) + return fx.GraphModule(fx_model, new_graph) + +def remove_dropout(model: nn.Module) -> nn.Module: + """ + Removes all dropout layers from the module. + """ + fx_model = fx.symbolic_trace(model) + + class DropoutRemover(torch.fx.Transformer): + def call_module(self, target : Target, args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any: + if isinstance(self.submodules[target], nn.Dropout): + assert len(args) == 1 + return args[0] + else: + return super().call_module(target, args, kwargs) + return DropoutRemover(fx_model).transform() + +def extract_subgraph(orig_module: nn.Module, nodes: List[fx.Node], inputs: List[fx.Node], outputs: List[fx.Node]): + """ + Given lists of nodes from an existing graph that represent a subgraph, returns a submodule that executes that subgraph. + """ + new_graph = fx.Graph() + env: Dict[fx.Node, fx.Node] = {} + for input in inputs: + new_node = new_graph.placeholder(input.name) + env[input] = new_node + for node in nodes: + new_node = new_graph.node_copy(node, lambda x: env[x]) + env[node] = new_node + new_graph.output([env[output] for output in outputs]) + new_graph.lint() + return fx.GraphModule(orig_module, new_graph) + +mkldnn_supported = [ + nn.Conv2d, nn.Linear, nn.BatchNorm2d, nn.ReLU, nn.MaxPool2d, nn.AvgPool2d, nn.AdaptiveAvgPool2d, + torch.relu, torch.transpose, torch.sigmoid, + F.relu, F.avg_pool2d, F.adaptive_avg_pool2d +] +# These are operators that may not be convertible into MKLDNN ops (e.g. the +# args are scalar values). Thus, we only include them in the subgraph if their +# arguments are already in MKLDNN. +# TODO: Determine whether this can be removed after type inference. +mkldnn_supported_unknown = [operator.add, operator.mul] +mkldnn_map = { + nn.Conv2d: th_mkldnn.MkldnnConv2d, + nn.Linear: th_mkldnn.MkldnnLinear, + nn.BatchNorm2d: lambda a, _: th_mkldnn.MkldnnBatchNorm(a) +} + + +def modules_to_mkldnn(nodes: List[fx.Node], modules: Dict[str, nn.Module]): + """ + For each node, if it's a module that can be preconverted into MKLDNN, + then we do so and create a mapping to allow us to convert from the MKLDNN + version of the module to the original. + """ + old_modules: Dict[nn.Module, nn.Module] = {} + for node in nodes: + if node.op == 'call_module': + assert(isinstance(node.target, str)) + cur_module = modules[node.target] + if type(cur_module) in mkldnn_map: + new_module = mkldnn_map[type(cur_module)](cur_module, torch.float) + assert(isinstance(new_module, nn.Module)) + old_modules[new_module] = copy.deepcopy(cur_module) + replace_node_module(node, modules, new_module) + return old_modules + +def reset_modules(nodes: List[fx.Node], modules: Dict[str, nn.Module], old_modules: Dict[nn.Module, nn.Module]): + """ + Maps each module that's been changed with `modules_to_mkldnn` back to its + original. + """ + for node in nodes: + if node.op == 'call_module': + assert(isinstance(node.target, str)) + cur_module = modules[node.target] + if cur_module in old_modules: + replace_node_module(node, modules, old_modules[cur_module]) + +class MklSubgraph: + def __init__(self, fx_graph: fx.Graph): + self.fx_graph = fx_graph + self.nodes: List[fx.Node] = [] + self.start_nodes: List[fx.Node] = [] + self.end_nodes: List[fx.Node] = [] + +def gen_mkl_autotuner(example_inputs, iters=10, warmup=1): + """ + This generates a heuristic that can be passed into `optimize_for_inference` that + determines whether a subgraph should be run in MKL by running it with the example_inputs. + + Example usage: + heuristic = gen_mkl_autotuner(example_inputs, iters=10) + fast_model = optimization.optimize_for_inference(model, heuristic) + """ + fx_model = None + old_modules = None + + def use_mkl_heuristic(graph: MklSubgraph) -> bool: + nonlocal fx_model, old_modules + input_nodes = graph.start_nodes + if fx_model is None: + fx_model = graph.fx_graph.owning_module + old_modules = graph.fx_graph.old_modules # type: ignore[attr-defined] + ShapeProp(fx_model).propagate(example_inputs) + sample_inputs = [torch.randn(node.shape) for node in input_nodes] # type: ignore[attr-defined] + output_args = cast(List[fx.Node], [node.args[0] for node in graph.end_nodes]) + submodule = extract_subgraph(fx_model, graph.nodes, input_nodes, output_args) + + def benchmark(f): + for _ in range(warmup): + f() + begin = time.time() + for _ in range(iters): + out = f() + return time.time() - begin + + mkl_time = benchmark(lambda: [i.to_dense() for i in submodule(*[i.to_mkldnn() for i in sample_inputs])]) + + reset_modules(submodule.graph.nodes, dict(submodule.named_modules()), old_modules) + no_mkl_time = benchmark(lambda: submodule(*sample_inputs)) + return mkl_time < no_mkl_time + return use_mkl_heuristic + +def use_mkl_length(graph: MklSubgraph) -> bool: + """ + This is a heuristic that can be passed into `optimize_for_inference` that + determines whether a subgraph should be run in MKL by checking if there + are more than 2 nodes in it + """ + return len(graph.nodes) > 2 + +class UnionFind: + def __init__(self, n): + self.parent: List[Optional[int]] = [None] * n + self.size: List[int] = [0] * n + + def make_set(self, v: int): + self.parent[v] = v + self.size[v] = 1 + + def find(self, v: int) -> int: + par = self.parent[v] + if v == par: + return v + assert(par is not None) + self.parent[v] = self.find(par) + return cast(int, self.parent[v]) + + def join(self, a: int, b: int): + a, b = self.find(a), self.find(b) + if a == b: + return a + if self.size[a] < self.size[b]: + a, b = b, a + self.parent[b] = a + self.size[a] += self.size[b] + +def optimize_for_inference( + model: torch.nn.Module, + pass_config: Optional[Dict[str, Any]] = None, + tracer: Type[fx.Tracer] = fx.Tracer +) -> torch.nn.Module: + """ + Performs a set of optimization passes to optimize a model for the + purposes of inference. Specifically, the passes that are run are: + 1. Conv/BN fusion + 2. Dropout removal + 3. MKL layout optimizations + + The third optimization takes a function `use_mkl_heuristic` that's used + to determine whether a subgraph should be explicitly run in MKL layout. + + Note: As FX does not currently handle aliasing, this pass currently + assumes nothing aliases. If that isn't true, use at your own risk. + """ + default_pass_config = { + "conv_bn_fuse": True, + "remove_dropout": True, + "mkldnn_layout_optimize": {'heuristic': use_mkl_length}, + } + if pass_config is None: + pass_config = {} + default_pass_config.update(pass_config) + + if default_pass_config["conv_bn_fuse"]: + model = fuse(model) + if default_pass_config["remove_dropout"]: + model = remove_dropout(model) + if default_pass_config["mkldnn_layout_optimize"] is False: + return model + if not isinstance(default_pass_config["mkldnn_layout_optimize"], dict): + raise RuntimeError("mkldnn_layout_optimize config is not a dict") + if "heuristic" not in default_pass_config["mkldnn_layout_optimize"]: + raise RuntimeError("Heuristic not found in mkldnn_layout_optimize config") + use_mkl_heuristic = default_pass_config["mkldnn_layout_optimize"]["heuristic"] + + cur_tracer = tracer() + fx_graph = cur_tracer.trace(copy.deepcopy(model)) + fx_model = fx.GraphModule(cur_tracer.root, fx_graph) + modules: Dict[str, nn.Module] = dict(model.named_modules()) + + class MklSupport(Enum): + NO = 1 + YES = 2 + UNKNOWN = 3 + + # Inserts to_mkldnn and to_dense around every node we want to be a MKLDNN node. + # If the op is in `mkldnn_supported` then we always treat it as a MKLDNN node. + # However, if it's in `mkldnn_supported_unknown`, then we only treat it as + # a MKLDNN node if its inputs are MKLDNN nodes. + for node in list(fx_graph.nodes): + supports_mkldnn = MklSupport.NO + if node.op == 'call_module': + cur_module = modules[node.target] + if type(cur_module) in mkldnn_supported: + supports_mkldnn = MklSupport.YES + sample_parameter = next(cur_module.parameters(), None) + if sample_parameter is not None: + assert(sample_parameter.dtype == torch.float), "this pass is only for torch.float modules" + assert(sample_parameter.device == torch.device('cpu')), "this pass is only for CPU modules" + elif node.op == 'call_function': + if node.target in mkldnn_supported: + supports_mkldnn = MklSupport.YES + elif node.target in mkldnn_supported_unknown: + supports_mkldnn = MklSupport.UNKNOWN + + if supports_mkldnn != MklSupport.NO: + if supports_mkldnn == MklSupport.UNKNOWN: + if not any(arg.target == 'to_dense' for arg in node.args): + continue + with fx_graph.inserting_before(node): + mkldnn_args = fx.map_arg(node.args, lambda n: fx_graph.call_method('to_mkldnn', (n, ))) + + node.args = cast(Tuple[fx.node.Argument], mkldnn_args) + + with fx_graph.inserting_after(node): + dense_x = fx_graph.create_node('call_method', 'to_dense', (node,)) + node.replace_all_uses_with(dense_x) + dense_x.args = (node,) + + # Does pre-conversion of all modules into MKLDNN (when possible) + old_modules = modules_to_mkldnn(list(fx_graph.nodes), modules) + fx_graph.old_modules = old_modules # type: ignore[attr-defined] + + # optimizes all a -> to_dense -> to_mkldnn -> b patterns into a -> b + for node in fx_graph.nodes: + if node.op == 'call_method' and node.target == 'to_dense': + prv_node = node.args[0] + users = list(node.users) + for user in users: + if user.op == 'call_method' and user.target == 'to_mkldnn': + user.replace_all_uses_with(prv_node) + fx_graph.erase_node(user) + if len(node.users) == 0: + fx_graph.erase_node(node) + + + num_nodes = len(fx_graph.nodes) + uf = UnionFind(num_nodes) + + def get_color(n): + if hasattr(n, 'color'): # Current node is part of a MKL subgraph + return uf.find(n.color) + if hasattr(n, 'start_color'): # Current node is input to MKL subgraph + return uf.find(n.start_color) + return None + + + # This code is to find each MKLDNN subgraph. Each MKLDNN subgraph consists + # of input nodes (which are only `to_mkldnn` calls), output nodes + # (`to_dense` calls), and intermediate nodes, which are run entirely on + # MKLDNN layout tensors. + # + # Specifically, this code does a flood fill on a directed acyclic graph + # (DAG), starting from each possible "start node" (i.e: `to_mkldnn` nodes). + # If every node only had one input, this would be sufficient. However, in + # the case that a node has multiple inputs coming from different start + # nodes (i.e. colors), we need to join these 2 colors into 1. That's done + # using a Disjoint Set Union. + for cur_idx, node in enumerate(fx_graph.nodes): + if node.op == 'call_method' and node.target == 'to_mkldnn': + node.start_color = cur_idx + uf.make_set(cur_idx) + elif node.op == 'call_method' and node.target == 'to_dense': + assert(get_color(node.args[0]) is not None) + node.end_color = get_color(node.args[0]) + else: + cur_colors = [get_color(i) for i in node.all_input_nodes if isinstance(i, fx.Node) if get_color(i) is not None] + + if len(cur_colors) == 0: + continue + assert(not any(i is None for i in cur_colors)) + cur_colors = sorted(cur_colors) + node.color = cur_colors[0] + for other_color in cur_colors[1:]: + uf.join(cur_colors[0], other_color) + + + mkldnn_graphs: Dict[int, MklSubgraph] = defaultdict(lambda: MklSubgraph(fx_graph)) + for node in fx_graph.nodes: + if hasattr(node, 'color'): + mkldnn_graphs[uf.find(node.color)].nodes.append(node) + if hasattr(node, 'start_color'): + mkldnn_graphs[uf.find(node.start_color)].start_nodes.append(node) + if hasattr(node, 'end_color'): + mkldnn_graphs[uf.find(node.end_color)].end_nodes.append(node) + + + # Now that we have all the subgraphs, we need to decide which MKLDNN + # subgraphs we actually want to keep in MKLDNN. + for graph in mkldnn_graphs.values(): + if not use_mkl_heuristic(graph): + for node in graph.start_nodes + graph.end_nodes: + prv = node.args[0] + node.replace_all_uses_with(prv) + fx_graph.erase_node(node) + reset_modules(graph.nodes, modules, old_modules) + + mkldnn_conversions = 0 + for node in fx_graph.nodes: + if node.target == 'to_mkldnn' or node.target == 'to_dense': + mkldnn_conversions += 1 + + logging.getLogger(__name__).info(f"mkldnn conversions: {mkldnn_conversions}") + fx_graph.lint() + result = fx.GraphModule(model, fx_graph) + return result diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/partitioner_utils.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/partitioner_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d96c6b40667f334870a07ad4aa09d207f95080f4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/partitioner_utils.py @@ -0,0 +1,317 @@ +from enum import Enum +from typing import NamedTuple, Dict, List, Set + +from torch.fx.node import Node, map_arg + + +class Partition: + """Partition class contains all the information about an individual partition. + It also provides necessary methods for manipulation the partition. + """ + + def __init__(self, partition_id: int) -> None: + self.nodes: Set[Node] = set() + self.partition_id = partition_id + self.parents: Set[Partition] = set() + self.children: Set[Partition] = set() + self.bfs_level: int = -1 + self.used_mem_bytes: int = 0 + self.logical_device_ids: List[int] = [] + + def __str__(self): + return str(self.partition_id) + + def recalculate_mem_size(self): + self.used_mem_bytes = 0 + for node in self.nodes: + self.used_mem_bytes += get_extra_size_of(node, self.nodes) + + def add_node(self, node): + input_nodes: Dict[Node, None] = {} + map_arg(node.args, input_nodes.setdefault) + map_arg(node.kwargs, input_nodes.setdefault) + # Add current node's input nodes if they are placeholder or constants + for n in input_nodes: + if n.op in {"placeholder", "get_attr"}: + self.nodes.add(n) + self.nodes.add(node) + self.recalculate_mem_size() + + def remove_node(self, node): + # Remove a node only if the node is in the partition + if node in self.nodes: + self.nodes.remove(node) + # Collect the node's input nodes + input_nodes: Dict[Node, None] = {} + map_arg(node.args, input_nodes.setdefault) + map_arg(node.kwargs, input_nodes.setdefault) + # Check if an input node is a placeholder or get_attr, + # and this input node is not used by some other nodes in this partition, + # the remove this input node + for input_node in input_nodes: + if all( + n not in self.nodes for n in input_node.users + ) and input_node.op in {"placeholder", "get_attr"}: + self.nodes.remove(input_node) + self.recalculate_mem_size() + + +class Device(NamedTuple): + name: str + available_mem_bytes: int + logical_id: int + + +class NodeLatency(NamedTuple): + # Latency due to the memory bandwidth + mem_latency_sec: float + # Latency due to the computation + computer_latency_sec: float + + +class PartitionLatency(NamedTuple): + # Sum of all nodes' memory latency on the critical path + mem_latency_sec: float + # Sum of all nodes' compute latency on the critical path + computer_latency_sec: float + # Latency of the critical path + overall_latency_sec: float + + +class PartitionMode(Enum): + size_based = 0 + sparse_nn = 1 + cost_aware = 2 + kl_based = 3 + aot_based = 4 + + +class PartitionerConfig(NamedTuple): + devices: List[Device] + mode: PartitionMode = PartitionMode.size_based + transfer_rate_bytes_per_sec: float = 0.0 + node_to_latency_mapping: Dict[Node, NodeLatency] = {} + node_to_partition_mapping: Dict[Node, int] = {} + partition_to_logical_device_mapping: Dict[int, List[int]] = {} + # Saturate host by replicating partitions to the remaining idle devices. + saturate_host: bool = False + + +def get_extra_size_of(node: Node, nodes: Set[Node]) -> int: + """Given a node and a set of nodes, + this function return the extra size that needed + if this node is included in this set. + """ + # Find all its input nodes + input_nodes: Dict[Node, None] = {} + map_arg(node.args, input_nodes.setdefault) + map_arg(node.kwargs, input_nodes.setdefault) + # Calculate total size of related nodes + total_size_of_input_nodes = 0 + for n in input_nodes: + # Make sure this node hasn't been in this set yet + if n not in nodes: + size_bytes = getattr(n, "size_bytes", None) + if size_bytes: + total_size_of_input_nodes += size_bytes.output_size + else: + raise RuntimeError("node has no size_bytes attr") + # Don't forget the op node itself + size_bytes = getattr(node, "size_bytes", None) + if size_bytes: + total_size_of_input_nodes += size_bytes.total_size + else: + raise RuntimeError("node has no size_bytes attr") + return total_size_of_input_nodes + + +def get_latency_of_one_partition( + partition: Partition, node_to_latency_mapping: Dict[Node, NodeLatency] +) -> PartitionLatency: + """Given a partition and its nodes' latency, return a PartitionLatency for this partition""" + + def get_top_nodes(partition: Partition) -> List[Node]: + """Given a partition, return a list of nodes on the top bfs level""" + top_nodes: List[Node] = [] + for node in partition.nodes: + # Skip placeholder and get_attr nodes + if node.op in {"placeholder", "get_attr"}: + continue + input_nodes: Dict[Node, None] = {} + map_arg(node.args, input_nodes.setdefault) + map_arg(node.kwargs, input_nodes.setdefault) + # If a node has no input nodes in this partition, + # or its input nodes in this partition are placeholders and get_attrs + # this node is on the top bfs level in this partition + if not any( + n in partition.nodes and n.op not in {"placeholder", "get_attr"} + for n in input_nodes + ): + top_nodes.append(node) + return top_nodes + + def dfs_helper(node: Node, partition_latency) -> PartitionLatency: + """Given a top node of a partition, this function returns + the latency of the critical path in the partition + """ + node_latency = node_to_latency_mapping[node] + # Calculate the current overall latency of the partition + overall_latency_sec = partition_latency.overall_latency_sec + max( + node_latency.computer_latency_sec, node_latency.mem_latency_sec + ) + # Update the mem latency of this path + mem_latency_sec = ( + partition_latency.mem_latency_sec + node_latency.mem_latency_sec + ) + # Update the compute latency of this path + computer_latency_sec = ( + partition_latency.computer_latency_sec + node_latency.computer_latency_sec + ) + # Get all users of this node that are in this partition + users = set(node.users).intersection(partition.nodes) + if users: + max_latency = PartitionLatency( + mem_latency_sec=0.0, computer_latency_sec=0.0, overall_latency_sec=0.0 + ) + for n in users: + # Get new partition latency recursively + new_partition_latency = dfs_helper( + n, + PartitionLatency( + mem_latency_sec, computer_latency_sec, overall_latency_sec + ), + ) + if ( + new_partition_latency.overall_latency_sec + > max_latency.overall_latency_sec + ): + max_latency = new_partition_latency + return max_latency + # If there is no user, the node is at bottom of the partition + return PartitionLatency( + mem_latency_sec, computer_latency_sec, overall_latency_sec + ) + + # Main part starts + # Get all top level nodes of this partition + top_nodes = get_top_nodes(partition) + critical_path_latency = PartitionLatency( + mem_latency_sec=0.0, computer_latency_sec=0.0, overall_latency_sec=0.0 + ) + # Go through all top nodes and find the largest latency (critical pass latency) + for node in top_nodes: + partition_latency = dfs_helper( + node, + PartitionLatency( + mem_latency_sec=0.0, computer_latency_sec=0.0, overall_latency_sec=0.0 + ), + ) + if ( + partition_latency.overall_latency_sec + > critical_path_latency.overall_latency_sec + ): + critical_path_latency = partition_latency + return critical_path_latency + + +def get_partition_to_latency_mapping( + partitions: List[Partition], node_to_latency_mapping: Dict[Node, NodeLatency] +) -> Dict[Partition, PartitionLatency]: + """Given all the partitions and node_to_latency_mapping dictionary, + return a mapping dictionary of each partition to its overall latency + """ + partition_to_latency_mapping: Dict[Partition, PartitionLatency] = {} + # Go through each partition and get its latency + for partition in partitions: + partition_latency = get_latency_of_one_partition( + partition, node_to_latency_mapping + ) + partition_to_latency_mapping[partition] = partition_latency + return partition_to_latency_mapping + + +def get_comm_latency_between( + parent_partition: Partition, + child_partition: Partition, + transfer_rate_bytes_per_sec: float, +): + """Given two partitions (parent and child), + calculate the communication latency between the two. + """ + # If two partitions are on the same device, the comm latency is 0. + if ( + parent_partition.logical_device_ids != [] + and child_partition.logical_device_ids != [] + and parent_partition.logical_device_ids == child_partition.logical_device_ids + ): + return 0.0 + # Keep tracking the communication size between parent and child + comm_size = 0 + # Keep tracking all the counted node + visited_nodes = set() + # Go through all nodes in the child partition + # If a node has input nodes from the parent partition, + # the output size of those input nodes will be counted + # and added to comm_size + for node in child_partition.nodes: + input_nodes: Dict[Node, None] = {} + map_arg(node.args, input_nodes.setdefault) + map_arg(node.kwargs, input_nodes.setdefault) + for n in input_nodes: + if n in parent_partition.nodes and n not in visited_nodes: + size_bytes = getattr(n, "size_bytes", None) + if size_bytes is not None: + comm_size += size_bytes.output_size + visited_nodes.add(n) + return comm_size / transfer_rate_bytes_per_sec + + +def get_latency_of_partitioned_graph( + partitions: List[Partition], + partition_to_latency_mapping: Dict[Partition, PartitionLatency], + transfer_rate_bytes_per_sec: float, +): + """Given all partitions in a graph, find the critical path among all partitions + and return its latency as the latency of the whole graph + """ + + def dfs_helper(partition: Partition, latency_so_far_sec: float) -> float: + """This function helps to recursively get the latency of a path of partitions""" + # Update latency by adding current partition's latency + latency_so_far_sec += partition_to_latency_mapping[ + partition + ].overall_latency_sec + children = partition.children + if partition.children: + max_latency_sec = 0.0 + for child in partition.children: + # Calculate latency between + comm_latency_sec = get_comm_latency_between( + partition, child, transfer_rate_bytes_per_sec + ) + new_latency_sec = dfs_helper( + child, latency_so_far_sec + comm_latency_sec + ) + if new_latency_sec > max_latency_sec: + max_latency_sec = new_latency_sec + return max_latency_sec + return latency_so_far_sec + + def get_top_partitions(partitions: List[Partition]) -> List[Partition]: + """This function is to return all the partitions without parents + as the starting points of all the paths + """ + top_partitions = [] + for partition in partitions: + # If a partition has no parents, then it is a top partition + if len(partition.parents) == 0: + top_partitions.append(partition) + return top_partitions + + top_partitions = get_top_partitions(partitions) + critical_path_latency_sec = 0.0 + for partition in top_partitions: + latency_sec = dfs_helper(partition, 0.0) + if latency_sec > critical_path_latency_sec: + critical_path_latency_sec = latency_sec + return critical_path_latency_sec diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/proxy_tensor.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/proxy_tensor.py new file mode 100644 index 0000000000000000000000000000000000000000..e1c2ca45b6ed34eac848c395d2d400a5fe88d170 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/proxy_tensor.py @@ -0,0 +1,924 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +import contextlib +import functools +from typing import Any, Callable, Dict, List, Optional, Tuple, Union +import torch +import torch.utils._pytree as pytree +from torch.fx import Tracer, GraphModule +from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode, unset_fake_temporarily, is_fake +from torch._dispatch.python import enable_python_dispatcher, enable_pre_dispatch +import torch.fx as fx +from torch.fx.passes.shape_prop import _extract_tensor_metadata +from contextlib import contextmanager, nullcontext +import inspect +from dataclasses import dataclass +import weakref +import operator +from torch.utils._stats import count +import logging + +from torch.overrides import TorchFunctionMode + +from torch.utils._python_dispatch import ( + TorchDispatchMode, + _pop_mode, + _push_mode, +) + +from .sym_node import SymNode +from ._sym_dispatch_mode import SymDispatchMode +from torch.fx import Proxy +import torch.fx.traceback as fx_traceback +from torch import SymInt, SymFloat, SymBool +from torch.utils.weak import WeakTensorKeyDictionary + +__all__ = ["PythonKeyTracer", "dispatch_trace", "make_fx", "DecompositionInterpreter", "py_sym_types", "get_innermost_proxy_mode"] +aten = torch.ops.aten +prim = torch.ops.prim + +log = logging.getLogger(__name__) +not_implemented_log = torch._logging.getArtifactLogger(__name__, "not_implemented") + +CURRENT_DECOMPOSITION_TABLE: Dict[torch._ops.OperatorBase, Callable] = {} + +CONSTANT_NUMEL_LIMIT = 1 + +# We currently convert all SymInt to proxies before we use them. +# This could plausibly be handled at the Dynamo level. +pytree.register_pytree_node(torch.Size, lambda x: (list(x), None), lambda xs, _: tuple(xs)) + +def fake_signature(fn, nargs): + """FX gets confused by varargs, de-confuse it""" + argnames = ",".join(f"arg{i}" for i in range(nargs)) + return eval(f"lambda {argnames}: fn({argnames})", {"fn": fn}) + +@contextmanager +def decompose(decomposition_table): + global CURRENT_DECOMPOSITION_TABLE + old_decomposition_table = CURRENT_DECOMPOSITION_TABLE + CURRENT_DECOMPOSITION_TABLE = decomposition_table + try: + yield CURRENT_DECOMPOSITION_TABLE + finally: + CURRENT_DECOMPOSITION_TABLE = old_decomposition_table + +# ensure we cannot collide with other properties +proxy_slot = object() +no_default = object() + +py_sym_types = (SymInt, SymFloat, SymBool) +def is_sym_node(node): + assert hasattr(node, 'meta'), "All nodes traced with proxy_tensor should have meta" + return "val" in node.meta and isinstance(node.meta['val'], py_sym_types) + +def set_proxy_slot(obj, tracer, proxy): + if isinstance(obj, torch.Tensor): + # We DO want to clobber proxies whenever we run an inplace operation + # on a tensor, and it affects the metadata on the proxy. + tracer.tensor_tracker[obj] = proxy + else: + # NB: Never clobber pre-existing proxy. Although the proxies + # are in principle equivalent, when we do graph partitioning + # we need there not to be spurious dependencies on tangent inputs. + # This works because primals get their SymInts set first, and + # THEN later we allocate tangent inputs. Make sure if a SymInt + # is derivable from a primal that we use that. + assert isinstance(obj, SymNode), type(obj) + if obj not in tracer.symnode_tracker: + tracer.symnode_tracker[obj] = proxy + +def has_proxy_slot(obj, tracer): + assert isinstance(obj, (torch.Tensor, SymNode)), type(obj) + return get_proxy_slot(obj, tracer, False, lambda _: True) + +# the default argument is what to return if the slot is not set. +# the transform argument is handy if you need to extract a subfield from +# the successfully looked up result (but NOT the default.) +def get_proxy_slot(obj, tracer, default=no_default, transform=lambda x: x): + if isinstance(obj, torch.Tensor): + tracker = tracer.tensor_tracker + else: + assert isinstance(obj, SymNode), type(obj) + tracker = tracer.symnode_tracker + + if obj not in tracker: + if default is no_default: + raise RuntimeError(f"{obj} is not tracked with proxy for {tracer}") + return default + return transform(tracker[obj]) + +def snapshot_fake(val): + return val.detach() + +def extract_val(val): + if is_fake(val): + return snapshot_fake(val) + elif isinstance(val, py_sym_types): + return val + elif isinstance(val, (list, tuple)): + return val.__class__([extract_val(x) for x in val]) + elif isinstance(val, torch.Tensor): + if not val.is_sparse: + # NB: Kinda hacky, but we should try to get val as the metadata + # everywhere + # TODO: This doesn't properly track storages. A more robust + # approach would be to maintain a per-trace FakeTensorMode and + # from_real_tensor to create fake values (don't forget to + # snapshot_fake) + fake_tensor_mode = FakeTensorMode(allow_fallback_kernels=True) + with fake_tensor_mode: + return torch.empty_strided(val.shape, val.stride(), device=val.device, dtype=val.dtype) + else: + return None + elif isinstance(val, (int, float, bool)): + return val + +# What invariants do we have for the 'val' set on the FX node? It has accurate +# metadata... but only for metadata that exists "below" all other subsystems +# (most notably autograd, but also vmap, functorch transforms, etc). This means +# you can get the dtype, shape, stride, storage, but you CANNOT get requires_grad, +# grad_fn, _base (_base actually may be set due to recursive call to +# ADInplaceOrView, but you shouldn't rely on it.) +def set_meta(proxy, val): + proxy.node.meta['val'] = extract_val(val) + # Best effort tensor_meta setting; prefer using val! + if is_fake(val): + proxy.node.meta['tensor_meta'] = _extract_tensor_metadata(val) + elif isinstance(val, torch.Tensor) and not val.is_sparse: + proxy.node.meta['tensor_meta'] = _extract_tensor_metadata(val) + return proxy + +def thunkify(f, *args, **kwargs): + """ + Delays computation of f until it's called again + Also caches the result + """ + return functools.lru_cache(1)(functools.partial(f, *args, **kwargs)) + +def track_tensor(tensor, proxy, *, constant, tracer): + def try_set_proxy_slot(outer_s, proxy_callable, *args): + assert callable(proxy_callable) + if isinstance(outer_s, SymInt): + inner_s = outer_s.node + set_proxy_slot(inner_s, tracer, thunkify(proxy_callable, outer_s, *args)) + + # The basic idea is that we need to associate each tensor/SymInt + # with a Proxy. How do we setup this association? We just store + # the proxy on the proxy slot of the object, keyed on the tracer + # (so that if we have multiple tracers at the same time, they + # don't clobber each other.) + for i, s in enumerate(tensor.shape): + try_set_proxy_slot(s, lambda x, i: set_meta(torch.ops.aten.sym_size.int(proxy, i), x), i) + + for i, s in enumerate(tensor.stride()): + try_set_proxy_slot(s, lambda x, i: set_meta(torch.ops.aten.sym_stride.int(proxy, i), x), i) + + try_set_proxy_slot(tensor.numel(), lambda x: set_meta(torch.ops.aten.sym_numel.default(proxy), x)) + try_set_proxy_slot(tensor.storage_offset(), lambda x: set_meta(torch.ops.aten.sym_storage_offset.default(proxy), x)) + set_proxy_slot(tensor, tracer, _ProxyTensor(proxy, constant)) + +def track_tensor_tree(inner_res, proxy_res, *, constant, tracer): + def wrap_with_proxy(e, proxy, constant): + if isinstance(e, torch.Tensor): + track_tensor(e, proxy, tracer=tracer, constant=constant) + set_meta(proxy, e) + elif isinstance(e, py_sym_types): + # NB: eagerly set meta here, so that the numbering is in order + set_meta(proxy, e) + set_proxy_slot(e.node, tracer, lambda: proxy) + elif isinstance(e, (tuple, list)): + if isinstance(proxy, fx.Proxy): + set_meta(proxy, e) + + # example use case: allreduce_ returns ([tensor], work) + for idx, ee in enumerate(e): + wrap_with_proxy(ee, proxy[idx], get_constant(idx)) + elif isinstance(e, dict): + # In theory we could support const-prop when proxy-tensor-tracing + # operators that returns dicts of tensors, but we have no use case + # for it today (since the only op we currently trace that can + # return a dict is triton_kernel_wrapper_functional/mutation, + # which does not participate in const-prop) + assert constant is None + + if isinstance(proxy, fx.Proxy): + set_meta(proxy, e) + + # example use case: triton_kernel_wrapper takes arguments as kwargs + for key, val in e.items(): + wrap_with_proxy(val, proxy[key], None) + else: + # intentionally pass on primitives + pass + + + def get_constant(idx): + if constant is None: + return None + else: + return constant[idx] + + wrap_with_proxy(inner_res, proxy_res, constant) + + return inner_res + + +def maybe_disable_fake_tensor_mode(): + # TODO: figure out if this API generally makes sense and bake it into the + # library + return unset_fake_temporarily() + + +@dataclass +class _ProxyTensor: + proxy: Proxy + constant: Optional[torch.Tensor] + + +def fetch_sym_proxy(tracer): + def inner(e): + n = e.node + if n.constant is not None: + return n.constant + else: + # NB: we REQUIRE all symints to be tracked + return get_proxy_slot(n, tracer)() + return inner + + +def fetch_tensor_proxy(tracer): + return lambda t: get_proxy_slot(t, tracer, t) + +HANDLED_TYPES = (torch.Tensor, torch.nn.Parameter, FakeTensor) + +def proxy_call(proxy_mode, func, pre_dispatch, args, kwargs): + unrecognized_types = [] + + def can_handle_tensor(x): + r = type(x) in HANDLED_TYPES or has_proxy_slot(x, proxy_mode.tracer) + if proxy_mode._allow_fake_constant: + r = r or type(x) in (torch._subclasses.FakeTensor,) + if not r: + unrecognized_types.append(type(x)) + return r + + # If there are any tensor subclasses, we need to handle those tensor subclasses first + # TODO: we could use types to test this + if not pytree.tree_all_only(torch.Tensor, can_handle_tensor, (args, kwargs)): + not_implemented_log.debug("ProxyTensorMode tensors without proxy had unrecognized subclasses: %s", unrecognized_types) + return NotImplemented + + r = maybe_handle_decomp(proxy_mode, func, args, kwargs) + if r is not NotImplemented: + return r + + # For pre-autograd tracing, we do not want to run CompositeImplicit decomps. + if not pre_dispatch and func not in [ + torch.ops.aten.size.default, torch.ops.aten.stride.default, torch.ops.aten.storage_offset.default + ]: + with proxy_mode: + r = func.decompose(*args, **kwargs) + if r is not NotImplemented: + return r + + tracer = proxy_mode.tracer + f_args, f_kwargs = pytree.tree_map_only(torch.Tensor, fetch_tensor_proxy(tracer), (args, kwargs)) + + # If there are SymInts, we also should not consider this constant. + # However, fake tensor handling of SymInts is sufficiently broken that + # I couldn't write a test for this case + all_constant = ( + pytree.tree_all_only(_ProxyTensor, lambda t: t.constant is not None, (f_args, f_kwargs)) + # TODO: maybe constant SymInts should also be allowed? Not sure if + # this can happen + and pytree.tree_all_only((SymInt, SymFloat, SymBool), lambda _: False, (args, kwargs)) + ) + + if torch.Tag.data_dependent_output in func.tags: + # Check if all of the Tensor inputs are constants + if all_constant: + const_args, const_kwargs = pytree.tree_map_only( + _ProxyTensor, lambda t: t.constant, (f_args, f_kwargs) + ) + with maybe_disable_fake_tensor_mode(): + return func(*const_args, **const_kwargs) + # If any of the Tensor inputs are "real" (not FakeTensor), we may + # incorrectly burn in constants by allowing this access. Raise + # an error in this case + if proxy_mode._error_on_data_dependent_ops and pytree.tree_all_only(torch.Tensor, lambda t: not is_fake(t), (args, kwargs)): + raise RuntimeError( + f"It appears that you're trying to get value out of a tracing tensor with {func} - erroring out! " + "It's likely that this is caused by data-dependent control flow or similar. " + "It may be possible to trace this with dynamic shapes; try setting tracing_mode='symbolic' " + "in your make_fx call." + ) + proxy_args, proxy_kwargs = pytree.tree_map_only( + (SymInt, SymFloat, SymBool), + fetch_sym_proxy(proxy_mode.tracer), + pytree.tree_map_only(_ProxyTensor, lambda e: e.proxy, (f_args, f_kwargs)) + ) + + # When we trace through a torch.tensor invocation, you never actually + # see a torch.ops.aten.tensor call. Instead, the way this function is + # implemented internally is that we allocate a plain tensor (this is + # *guaranteed* to be a plain tensor, we disable all modes when doing + # so), and then call at::lift_fresh on it (to give modes a chance to do + # their stuff). Furthermore, the tensor argument to lift_fresh is guaranteed + # to be freshly allocated, so we want lift_fresh to be a no-op (directly + # returning the input argument). + # + # Here is the basic problem: when we trace this sequence of executions + # into an FX graph, what happens to this call sequence? Traditionally, + # tensor constants get interned as buffers on the FX GraphModule. But + # this is dangerous. Consider: + # + # x = torch.tensor(1) + # x.add_(2) + # + # Naively, this traces into: + # + # t = self._tensor_constant0 # initialized to torch.tensor(1) + # x = torch.ops.aten.lift_fresh(t) + # x.add_(2) + # + # If lift_fresh returns t directly, the subsequent add_ call will + # modify the tensor constant. Really, the problem is we've violated + # the invariant the argument to lift is fresh. So what we should + # preserve the invariant by replacing lift_fresh with lift_fresh_copy: + # + # t = self._tensor_constant0 # initialized to torch.tensor(1) + # x = torch.ops.aten.lift_fresh_copy(t) + # x.add_(2) + # + # This is what the overload modification does. + if func is torch.ops.aten.lift_fresh.default: + func = torch.ops.aten.lift_fresh_copy.default + + proxy_out = proxy_mode.tracer.create_proxy('call_function', func, proxy_args, proxy_kwargs, + name=proxy_mode.tracer.graph._target_to_str(func.overloadpacket.__name__)) + + # This makes DCE marginally less likely to DCE inplace operations. + # It is not strictly necessary + # Kind of a hacky way to test if an op is in-place or not + if func.overloadpacket.__name__[-1] == "_" and func.overloadpacket.__name__[0] != "_": + if isinstance(args[0], List): + # e.g., c10d::allreduce_ returns a list of tensors as the first element + # in the output. + for i, a in enumerate(args[0]): + a.proxy = proxy_out[0][i] + else: + args[0].proxy = proxy_out + + out = func(*args, **kwargs) + + # In some circumstances, we will be tracing in a situation where a tensor + # is *statically* known to be a constant (currently, this only happens if + # you run torch.tensor; deterministic factory functions like torch.arange + # don't get this treatment). When the tensor in question is small, it's + # helpful to due constant propagation in case we call item() (in which + # case we can return the constant value that is known, rather than give + # an error.) The logic here tests if constant propagation is possible + # (because all of the inputs are constant). If so, we disable fake tensor + # mode (if it is on) and do true compute on the constant. + # + # It's worth highlighting that we're making a policy decision here. + # There is a potential that the tensor is actually quite large, and we + # don't actually want to run the compute. The tensor being quite large + # is one of the reasons why factory functions don't get this treatment + # (since they can be quite large; if a parameter is initialized to a + # constant value it will be!) Similarly, there is also a potential + # to run an operator that blows up the size of a small tensor; we don't + # protect against this case, but we could force, e.g., only single + # element constant computation by testing the numel of the result before + # propagating const-ness. Similarly, we don't require the constant to + # live on CPU, but we could. + any_constant = pytree.tree_any_only(_ProxyTensor, lambda t: t.constant is not None, (f_args, f_kwargs)) + + constant = None + + # If this is a lift, the input tensor is guaranteed to be a + # constant, so we keep a copy of the original argument along so + # we can query it if we're asked to item() it at some later point + if func is torch.ops.aten.lift_fresh_copy.default and out.numel() <= CONSTANT_NUMEL_LIMIT: + with maybe_disable_fake_tensor_mode(): + constant = args[0].clone() + elif ( + torch.Tag.nondeterministic_seeded not in func.tags + and all_constant + and any_constant + and pytree.tree_all_only(torch.Tensor, lambda t: t.numel() <= CONSTANT_NUMEL_LIMIT, out) + ): + # NB: do NOT include factories as constants + with maybe_disable_fake_tensor_mode(): + const_args, const_kwargs = pytree.tree_map_only( + _ProxyTensor, lambda t: t.constant, (f_args, f_kwargs) + ) + constant = func(*const_args, **const_kwargs) + else: + constant = None + + track_tensor_tree(out, proxy_out, constant=constant, tracer=tracer) + return out + + +class PythonKeyTracer(Tracer): + def __init__(self): + super().__init__(autowrap_modules=()) + self.tensor_tracker = WeakTensorKeyDictionary() + self.symnode_tracker = weakref.WeakKeyDictionary() # type: ignore[var-annotated] + + # In general, we don't want to make modules leaves. In principle, users of + # this tracer might want to override this in order to turn a couple specific + # modules into leaves in the traced graph. + def call_module( + self, m: torch.nn.Module, forward: Callable[..., Any], args: Tuple[Any, ...], kwargs: Dict[str, Any] + ) -> Any: + return forward(*args, **kwargs) + + # We don't want to turn getattr calls into proxies. So we just return the actual value. + def getattr(self, attr, attr_val, parameter_proxy_cache): + return attr_val + + def create_arg(self, a: Any): + if isinstance(a, torch.nn.Parameter): + for n, p in self.root.named_parameters(): + if a is p: + return self.create_node('get_attr', n, (), {}) + qualname: Optional[str] = None + + if not qualname: + i = 0 + while True: + qualname = f'_param_constant{i}' + if not hasattr(self.root, qualname): + break + i += 1 + setattr(self.root, qualname, a) + + return self.create_node('get_attr', qualname, (), {}) + elif isinstance(a, (SymInt, SymFloat, SymBool)): + assert a.node.constant is not None + return a.node.constant + return super().create_arg(a) + + def unwrap_proxy(self, e): + if isinstance(e, torch.Tensor): + return get_proxy_slot(e, self, e, lambda e: e.proxy) + elif isinstance(e, (torch.SymInt, torch.SymFloat, torch.SymBool)): + return get_proxy_slot(e.node, self, e, lambda e: e()) + else: + return e + + +@torch._disable_dynamo +def dispatch_trace( + root: Union[torch.nn.Module, Callable], + tracer: Tracer, + concrete_args: Optional[Tuple[Any, ...]] = None, +) -> GraphModule: + graph = tracer.trace(root, concrete_args) + name = root.__class__.__name__ if isinstance(root, torch.nn.Module) else root.__name__ + return GraphModule(tracer.root, graph, name) + + +@contextlib.contextmanager +def _pop_proxy_mode_temporarily(dk): + # This is a shim around the existng per-dispatch-key-mode logic. + # I'll delete the per-dispatch-key-mode logic in a followup PR + if dk is not None: + # During pre_dispatch, pop off of the PreDispatch mode stack + old = _pop_mode(dk) + try: + yield old + finally: + _push_mode(old, dk) + else: + # During normal tracing, pop off of the dedicated proxy mode stack + old = torch._C._unset_dispatch_mode(torch._C._TorchDispatchModeKey.PROXY) + try: + yield old + finally: + torch._C._set_dispatch_mode(old) + +def wrap_key(f, tensors, tracer, pre_dispatch: bool): + flat_tensors, tensors_spec = pytree.tree_flatten(tensors) + dk = torch._C.DispatchKey.PreDispatch if pre_dispatch else None + + @functools.wraps(f) + def wrapped(*proxies): + flat_proxies, proxies_spec = pytree.tree_flatten(proxies) + assert len(flat_proxies) == len(flat_tensors) + with _pop_proxy_mode_temporarily(dk) as m: + assert isinstance(m, ProxyTorchDispatchMode) + track_tensor_tree(flat_tensors, flat_proxies, constant=None, tracer=tracer) + + out = f(*tensors) + out = pytree.tree_map_only( + torch.Tensor, + lambda t: get_proxy_slot(t, tracer, t, lambda x: x.proxy), + out + ) + out = pytree.tree_map_only( + (SymInt, SymFloat, SymBool), + lambda t: get_proxy_slot(t.node, tracer)(), + out + ) + return out + + return wrapped + +ORIGINAL_ATEN = None +@contextmanager +def set_original_aten_op(func): + global ORIGINAL_ATEN + if ORIGINAL_ATEN is None and fx_traceback.has_preserved_node_meta(): + ORIGINAL_ATEN = func + fx_traceback.current_meta['original_aten'] = func + try: + yield + finally: + ORIGINAL_ATEN = None + fx_traceback.current_meta['original_aten'] = None + else: + yield + + + +# This mode is **only** used for pre_dispatch tracing. +# In particular, we need to make sure that autograd/autocast API's +# that do not desugar into dispatcher operators stay in the graph. +class PreDispatchTorchFunctionMode(TorchFunctionMode): + + def __init__(self, tracer): + self.tracer = tracer + + def __torch_function__(self, func, types, args=(), kwargs=None): + kwargs = kwargs or {} + pre_dispatch_ops = [ + torch._C._set_grad_enabled, + torch.amp._enter_autocast, + torch.amp._exit_autocast, + ] + if func in pre_dispatch_ops: + return self.tracer.create_node("call_function", func, args, {}) + # Don't actually run the function! We just want to trace the calls + # into a graph. We don't actualy want to change global autograd state. + return func(*args, **kwargs) + +class ProxyTorchDispatchMode(TorchDispatchMode): + def __init__(self, tracer, tracing_mode, pre_dispatch=False, _allow_fake_constant=False, _error_on_data_dependent_ops=True): + dk = torch._C.DispatchKey.PreDispatch if pre_dispatch else None + super().__init__(dk) + self.tracer = tracer + self.tracing_mode = tracing_mode + self.enable_tracing = True + self.pre_dispatch = pre_dispatch + self._allow_fake_constant = _allow_fake_constant + self._error_on_data_dependent_ops = _error_on_data_dependent_ops + self.sym_mode = ProxySymDispatchMode(tracer) + self.trace_state = {} + self._managers = [] + # Indicates to our torch_dispatch dispatching infra that + # this is an "infra" mode with lower dispatching precedence. + self._mode_key = torch._C._TorchDispatchModeKey.PROXY + # Every time we enter a mode, we maintain a stack telling us what the previous + # ProxyTorchDispatchMode state was (if there was any). + # This lets us properly reset the state on exit. + self.enter_stack: List[Optional[ProxyTorchDispatchMode]] = [] + + @count + def __torch_dispatch__(self, func, types, args=(), kwargs=None): + with self.sym_mode.enable(False), set_original_aten_op(func): + return self.inner_torch_dispatch(func, types, args, kwargs) + + def __enter__(self): + # sym mode first, then us... + m = self.sym_mode.enable(True) + self._managers.append(m) + m.__enter__() + # Stash and store the previous proxy mode (there may or may not be one) + maybe_prev_proxy_mode = torch._C._unset_dispatch_mode(self._mode_key) + self.enter_stack.append(maybe_prev_proxy_mode) + return super().__enter__() + + def __exit__(self, exc_type, exc_value, traceback): + m = self._managers.pop() + # ...exit us first, then sym mode + b = super().__exit__(exc_type, exc_value, traceback) + + # Re-enable the previous proxy mode, if there was one. + mb_previous_proxy_mode = self.enter_stack.pop() + if mb_previous_proxy_mode is not None: + torch._C._set_dispatch_mode(mb_previous_proxy_mode) + + if not b: + return m.__exit__(exc_type, exc_value, traceback) + else: + return m.__exit__(None, None, None) + + + def inner_torch_dispatch(self, func, types, args=(), kwargs=None): + if not self.enable_tracing: + return func(*args, **kwargs) + + if func in [prim.device.default]: + return func(*args, **kwargs) + + return proxy_call(self, func, self.pre_dispatch, args, kwargs) + + +class ProxySymDispatchMode(SymDispatchMode): + def __init__(self, tracer): + super().__init__() + self.tracer = tracer + # When false, we don't trace operations. If you do this, you MUST + # call track_tensor/track_tensor_tree on all results of the operation + # to ensure we can adequately track the results + self.enable_tracing = True + + @contextmanager + def enable(self, b): + old = self.enable_tracing + self.enable_tracing = b + try: + yield + finally: + self.enable_tracing = old + + def _compute_proxy(self, func, args, out: Union[SymInt, SymFloat, SymBool]): + n_args = tuple( + get_proxy_slot(a.node, self.tracer)().node if isinstance(a, py_sym_types) else a + for a in args + ) + + # func doesn't have a __torch_function__ that Proxy can interpose, so + # we gotta do it manually + n_out = self.tracer.create_node("call_function", func, n_args, {}) + p_out = fx.Proxy(n_out, self.tracer) + set_meta(p_out, out) + return p_out + + def __sym_dispatch__(self, func, types, args, kwargs): + if not self.enable_tracing: + return func(*args, **kwargs) + + # Peephole optimize multiply by one + # NB: be careful not to trigger guards here! + if func == operator.mul: + if isinstance(args[1], int) and args[1] == 1: + return args[0] + elif isinstance(args[0], int) and args[0] == 1: + return args[1] + + # For speed, we assume there are no nested data structures + # (otherwise we could use tree_map) + # We also assume there are no keyword arguments. + assert not kwargs + out = func(*args, **kwargs) + + # If func returned a constant, we don't need to trace; we have + # determined that the result is constant (no matter if the inputs + # were symbolic) and it is no longer necessary to trace the + # computation. This could occur if func triggered some guards. + if isinstance(out, py_sym_types): + # Delays tracing out the proxies on this op until we actually need it + p_out_thunk = thunkify(self._compute_proxy, func=func, args=args, out=out) + set_proxy_slot(out.node, self.tracer, p_out_thunk) + + return out + + +# TODO: I'm not sure what the point of this class is; you can just +# make_fx through a regular Interpreter +class DecompositionInterpreter(torch.fx.Interpreter): + def __init__(self, module: torch.fx.GraphModule, new_graph: torch.fx.Graph, decomposition_table=None, **kwargs): + super().__init__(module, **kwargs) + self.new_graph = new_graph + self.tracer = torch.fx.proxy.GraphAppendingTracer(self.new_graph) + # Blegh + self.tracer.tensor_tracker = WeakTensorKeyDictionary() # type: ignore[attr-defined] + self.tracer.symnode_tracker = weakref.WeakKeyDictionary() # type: ignore[attr-defined] + self.decomposition_table = decomposition_table + if self.decomposition_table is None: + self.decomposition_table = {} + self.mode = ProxyTorchDispatchMode(self.tracer, tracing_mode="real") + + def placeholder(self, target, args, kwargs): + out = super().placeholder(target, args, kwargs) + proxy = torch.fx.Proxy(self.new_graph.placeholder(target), self.tracer) + track_tensor_tree(out, proxy, constant=None, tracer=self.tracer) + # TODO handle case where the first character of target is '*' + return out + + def get_attr(self, target, args, kwargs): + out = super().get_attr(target, args, kwargs) + proxy = torch.fx.Proxy(self.new_graph.get_attr(target), self.tracer) + track_tensor_tree(out, proxy, constant=None, tracer=self.tracer) + return out + + # call_function, call_method, call_module get traced automatically by the outer mode. + + def output(self, target, args, kwargs): + out = super().output(target, args, kwargs) + + def unwrap(e): + return get_proxy_slot(e, self.tracer, e, lambda x: x.proxy.node) + self.new_graph.output(pytree.tree_map(unwrap, out)) + return out + + def run(self, *args, **kwargs): + # Should enter the mode at least once for being able to restore it later + # See: https://github.com/pytorch/pytorch/pull/82549#discussion_r934782025 + with decompose(self.decomposition_table), self.mode: + return super().run(*args, **kwargs) + + +def wrapper_and_args_for_make_fx(func, args, kwargs): + # make_fx doesn't support kwargs, so we need to do this flattening + # and then unflatten the args before calling func + flat_args, spec = pytree.tree_flatten((args, kwargs)) + + def wrapped(flat_args): + fn_args, fn_kwargs = pytree.tree_unflatten(flat_args, spec) + return func(*fn_args, **fn_kwargs) + return wrapped, flat_args + +@contextmanager +def disable_autocast_cache(): + old_value = torch.is_autocast_cache_enabled() + torch.set_autocast_cache_enabled(False) + try: + yield + finally: + torch.set_autocast_cache_enabled(old_value) + + +def make_fx(f, + decomposition_table=None, + tracing_mode="real", + _allow_non_fake_inputs=False, + *, + pre_dispatch=False, + _allow_fake_constant=False, + _error_on_data_dependent_ops=True): + assert tracing_mode in ["real", "fake", "symbolic"] + + if decomposition_table is None: + decomposition_table = {} + + @functools.wraps(f) + def wrapped(*args): + # Avoid importing sympy at a module level + from .symbolic_shapes import ShapeEnv + + phs = pytree.tree_map(lambda _: fx.PH, args) # type: ignore[attr-defined] + fx_tracer = PythonKeyTracer() + fake_tensor_mode: Any = nullcontext() + if tracing_mode == "real": + fake_tensor_mode = nullcontext() + elif tracing_mode == "fake": + import torch._dynamo + fake_tensor_mode = torch._dynamo.utils.detect_fake_mode(args) + if fake_tensor_mode is None: + fake_tensor_mode = FakeTensorMode( + allow_fallback_kernels=True, + allow_non_fake_inputs=_allow_non_fake_inputs, + shape_env=ShapeEnv(), + static_shapes=True, + ) + elif tracing_mode == "symbolic": + import torch._dynamo + fake_tensor_mode = torch._dynamo.utils.detect_fake_mode(args) + if fake_tensor_mode is None: + shape_env = ShapeEnv() + fake_tensor_mode = FakeTensorMode( + allow_fallback_kernels=False, + allow_non_fake_inputs=_allow_non_fake_inputs, + shape_env=shape_env) + else: + shape_env = fake_tensor_mode.shape_env + assert shape_env is not None, "shape_env should be set if tracing with 'symbolic'" + + else: + raise AssertionError(f"Unexpected tracing type: {tracing_mode}") + + python_dispatcher_mode: Any = nullcontext() + pre_dispatch_mode: Any = nullcontext() + # pre-autograd tracing uses per-dispatch-key modes, + # which requires the python dispatcher + if tracing_mode == "symbolic" or pre_dispatch: + python_dispatcher_mode = enable_python_dispatcher() + if pre_dispatch: + pre_dispatch_mode = enable_pre_dispatch() + + proxy_function_mode: Any = nullcontext() + if pre_dispatch: + proxy_function_mode = PreDispatchTorchFunctionMode(fx_tracer) + + proxy_mode = ProxyTorchDispatchMode(fx_tracer, + tracing_mode, + pre_dispatch=pre_dispatch, + _allow_fake_constant=_allow_fake_constant, + _error_on_data_dependent_ops=_error_on_data_dependent_ops) + + arg_count = 0 + + def wrap_fake(x): + nonlocal arg_count + # TODO: it would be nice to line these up with the names + # FX will choose for the placeholders, but we don't + # actually know what the names will be at this point yet + # NB: the Source here is actually meaningless + from torch._dynamo.source import ConstantSource + source = ConstantSource(f"input{arg_count}") + if isinstance(x, torch.Tensor): + arg_count += 1 + return fake_tensor_mode.from_tensor(x, source=source) # type: ignore[attr-defined] + # NB: don't match on bools + elif type(x) is int and tracing_mode == "symbolic": + return shape_env.create_symintnode(shape_env.create_symbol(x, source, positive=None), hint=x, source=source) + + return x + + sym_mode = proxy_mode.sym_mode + + wrap_fn_map = { + "real": lambda x: x, + "fake": wrap_fake, + "symbolic": wrap_fake, + } + args = pytree.tree_map(wrap_fn_map[tracing_mode], args) + + if not hasattr(inspect.unwrap(f), '__code__') or inspect.unwrap(f).__code__.co_flags & inspect.CO_VARARGS: + # FX doesn't support varargs, so we gotta fake up a wrapper + # TODO: Would be nice to fix this at the source... + func = fake_signature(f, len(phs)) + else: + func = f + + # We disable the autocast cache as the autocast cache causes type conversions on parameters to + # check a cache, which introduces untracked tensors into the graph + # + # We also disable tracing by any other tensor proxy-based tracers except the current. The + # purpose of `make_fx` is to produce graphmodules as a side effect; its internal execution is + # thus irrelevant to any external functional trace. + with decompose(decomposition_table), fake_tensor_mode, python_dispatcher_mode, pre_dispatch_mode, proxy_function_mode, \ + sym_mode, proxy_mode, disable_autocast_cache(): + t = dispatch_trace(wrap_key(func, args, fx_tracer, pre_dispatch), tracer=fx_tracer, concrete_args=tuple(phs)) + + # TODO: kind of a bad way to do it, should maybe figure out a better way + if tracing_mode == "symbolic": + t.shape_env = shape_env # type: ignore[assignment] + return t + + return wrapped + + +def get_torch_dispatch_modes(): + return torch.utils._python_dispatch._get_current_dispatch_mode_stack() + + +def get_innermost_proxy_mode(): + return torch._C._get_dispatch_mode(torch._C._TorchDispatchModeKey.PROXY) + + +@contextlib.contextmanager +def disable_proxy_modes_tracing(enable_current=False): + # enable_current=True is now a no-op, since only one proxy mode + # can live on the stack at a time. + # We should kill this API in a future PR. + maybe_old = None + if not enable_current: + # Only one proxy_mode can be "active" at a time. + # So we simply remove our active mode. + maybe_old = torch._C._unset_dispatch_mode(torch._C._TorchDispatchModeKey.PROXY) + try: + yield + finally: + if maybe_old is not None: + torch._C._set_dispatch_mode(maybe_old) + + +def maybe_handle_decomp(proxy_mode, op, args, kwargs): + if op in CURRENT_DECOMPOSITION_TABLE: + with proxy_mode: + return CURRENT_DECOMPOSITION_TABLE[op](*args, **kwargs) + return NotImplemented + + +def get_isolated_graphmodule(func, args, kwargs, tracing_mode="real"): + """A helper function used to get the GraphModule for the given func. + + It's expected to be used in the ProxyTensor tracing context. + It detaches the args and kwargs from the current tracer so that the trace of + the current graph module can be created without any side-effects. + """ + wrapped, all_args = wrapper_and_args_for_make_fx(func, args, kwargs) + + with disable_proxy_modes_tracing(): + gm = make_fx(wrapped, tracing_mode=tracing_mode)(all_args) + return gm diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/recording.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/recording.py new file mode 100644 index 0000000000000000000000000000000000000000..9fdc9e1e310795799bfd6da52c85a134f7c158d7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/recording.py @@ -0,0 +1,453 @@ +import functools +import itertools +from dataclasses import dataclass +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import torch +import torch.utils._pytree as pytree + + +__all__ = [ + "ShapeEnvEvent", + "record_shapeenv_event", + "replay_shape_env_events", + "FakeTensorMeta", + "shape_env_check_state_equal", + "NotEqualError", +] + +# [Note: Recording ShapeEnv Events] +# ================================= +# +# What is a ShapeEnv event? +# ------------------------- +# We consider a ShapeEnv event every function call (ShapeEnv method or +# independent function) that modifies the state of the ShapeEnv instance. +# Such calls are recorded alongside their positional and keyword arguments, +# so that it may be replayed over a different ShapeEnv instance. +# +# See [Note: ShapeEnv State Equality] for what is considered the state +# of a ShapeEnv instance. +# +# What is it for? +# --------------- +# ShapeEnv events recording is used for reconstructing the ShapeEnv in an +# arbitrary state in time. +# +# Being able to arbitrarily replay events like so is useful, mainly for +# translation validation bisection. i.e. if a ValidationException has been +# raised, find the earliest point in time where the translation validation +# fails. +# +# Besides that, it also allows us to inspect the given instance and, +# for example, check the guards that would actually be issued at that point. +# +# What kind of arguments can be stored in an event? +# ------------------------------------------------- +# There's no specific rule for what cannot be used as an argument. +# That said, pay special attention to the following cases: +# +# 1. Tensor inputs: there are some tests that check whether the inputs +# were garbage collected after execution. These will fail if there's +# an event that is holding a reference to those inputs. +# +# 2. ShapeEnv arguments: if there is an argument of ShapeEnv type, that +# will be automatically replaced by the new given ShapeEnv instance. +# +# 3. SymTypes arguments: they also hold references to ShapeEnv. So, +# whenever we see them, we create a new instance, replacing the +# ShapeEnv reference. +# +# 4. FX nodes: specifically, FX nodes from the FX graph for symbolic +# shapes. That argument must be replaced when replaying the event at +# ShapeEnvEvent.run, since it has to reference a node from the given +# instance, and not from the recorded instance. + + +# Event class for reconstructing ShapeEnv at arbitrary time. +# +# Represents a method call that mutates ShapeEnv in a way that affects the +# issued guards, when ShapeEnv.produce_guards is called. +@dataclass +class ShapeEnvEvent: + # ShapeEnv method. + f: Callable + + # Arguments and keyword arguments called with. + args: Optional[List[Any]] = None + kwargs: Optional[Dict[str, Any]] = None + + # List of tracked_fakes at the time the method was called. + tracked_fakes: Optional[List[Any]] = None + + # Name of the captured event. + # Used for special handling of particular methods. + name: Optional[str] = None + + # Replay itself, but using shape_env as self. + def run(self, shape_env=None) -> Any: + from torch.fx.experimental.symbolic_shapes import ShapeEnv, SymTypes + + # Special handling for the constructor event. + if self.f is ShapeEnv: + assert shape_env is None and self.args is None and self.kwargs is not None + return ShapeEnv(**self.kwargs) + + assert shape_env is not None + args = list(self.args or list()) + kwargs = dict(self.kwargs or dict()) + + # Replace any argument of type ShapeEnv by the given one. + args, kwargs = pytree.tree_map_only( + ShapeEnv, lambda _: shape_env, (args, kwargs) + ) + + # Replace any argument of type SymTypes by a new instance, + # replacing its ShapeEnv reference. + args, kwargs = pytree.tree_map_only( + SymTypes, + lambda a: type(a)(a.node.with_shape_env(shape_env)), + (args, kwargs), + ) + + # Converts FX nodes using the mapping argument. + def maybe_convert_node(x: Any) -> Any: + if not isinstance(x, torch.fx.Node): + # Don't do anything to x if it's not an FX node. + return x + # If, at some point, we created an FX node, it means that translation validation is on. + # It also means we are building an FX graph for symbolic shapes at shape_env.graph, and + # we are tracking node names at shape_env.name_to_node. + assert hasattr(shape_env, "name_to_node") + name_to_node = shape_env.name_to_node # type: ignore[attr-defined] + assert x.name in name_to_node + return name_to_node[x.name] + + # Replaces the value of an specific argument by the result of fn. + def replacearg(index: int, key: str, fn: Callable): + if index < len(args): + args[index] = fn(args[index]) + if key in kwargs: + kwargs[key] = fn(kwargs[key]) + + if self.is_create_fx_call_function(): + # ShapeEnv.create_fx_call_function: + # "args" parameter is a tuple of FX nodes from the FX graph of the old ShapeEnv. + # They must be replaced, since a "call_function" FX node with this tuple as argument + # will be added to the FX graph of the new shape_env. + replacearg( + index=2, + key="args", + fn=lambda args: tuple(maybe_convert_node(a) for a in args), + ) + if self.is_evaluate_expr() or self.is_defer_runtime_assert(): + # ShapeEnv.evaluate_expr and ShapeEnv.defer_runtime_assert: + # "fx_node" parameter is an (optional) FX node that represents the evaluate expression. + # They must be replaced, since it will be part of a "call_function" FX node for + # torch._assert, which will be added to the FX graph of the new shape_env. + replacearg(index=3, key="fx_node", fn=maybe_convert_node) + + # Actually call the method with the converted arguments. + return self.f(*args, **kwargs) + + def __str__(self) -> str: + name = self.name if self.name is not None else self.f.__name__ + return f"event: {name} ({self.args}, {self.kwargs})" + + def is_create_fx_call_function(self) -> bool: + return self.name == "create_fx_call_function" + + def is_evaluate_expr(self) -> bool: + return self.name == "evaluate_expr" + + def is_defer_runtime_assert(self) -> bool: + return self.name == "defer_runtime_assert" + + +# Extracts a ShapeEnv instance inside args and kwargs. +# Specifically, it looks for: +# 1. ShapeEnv arguments +# 2. SymInt, SymFloat, or SymBool arguments +# If we find more than one object of any of the above types, we +# also check that the ShapeEnv instance is the same for all of them. +def _extract_shape_env_and_assert_equal(args, kwargs): + from torch.fx.experimental.symbolic_shapes import ShapeEnv, SymTypes + + def assert_equal(old: Optional[ShapeEnv], new: ShapeEnv) -> ShapeEnv: + if old is not None: + assert old is new, "call with different ShapeEnv" + return new + + shape_env = None + for val in itertools.chain(args, kwargs.values()): + if isinstance(val, ShapeEnv): + shape_env = assert_equal(shape_env, val) + if isinstance(val, SymTypes): + shape_env = assert_equal(shape_env, val.node.shape_env) + + return shape_env + + +# Decorator for recording the given function as a replayable event. +# +# This decorator should be used at every function that mutates the state of +# ShapeEnv in some way that affects the resulting issued guards (i.e. when +# ShapeEnv.produce_guards is called). +# +# save_tracked_fakes: saves a snapshot of the TrackedFake list. +# This is used when calling ShapeEnv.produce_guards at arbitrary points in time. +# +# When to save the list of TrackedFake? +# ===================================== +# We should save the list of TrackedFake whenever the translation validation +# bisection may actually stop and call the produce_guards method at the moment +# right after the recorded function was played. In other words, since the +# bisection bisects through torch._assert calls, we should save in all methods +# that adds a torch._assert call to the symbolic shapes FX graph. +# +# At the moment, there are 2 methods that save the list: +# - ShapeEnv.evaluate_expr +# - ShapeEnv.defer_runtime_assert +def record_shapeenv_event(*, save_tracked_fakes: bool = False) -> Callable: + def decorator(fn: Callable) -> Callable: + assert callable(fn) + name = fn.__name__ + + @functools.wraps(fn) + def wrapper(*args, **kwargs): + from torch.fx.experimental.symbolic_shapes import ShapeEnv + + if isinstance(args[0], ShapeEnv) and args[0].is_recording: # type: ignore[has-type] + # If ShapeEnv is already recording an event, call the wrapped + # function directly. + # + # NB: here, we skip the check of whether all ShapeEnv instances + # are equal, in favor of a faster dispatch. + return fn(*args, **kwargs) + + # Retrieve an instance of ShapeEnv. + # Assumption: the collection of args and kwargs may not reference + # different ShapeEnv instances. + self = _extract_shape_env_and_assert_equal(args, kwargs) + + # If we are calling this function without any ShapeEnv instance + # alive in its arguments, we don't record and call the original. + if self is None: + return fn(*args, **kwargs) + + # Otherwise, start recording and call the function. + with self.recording(): + # Take a snapshot of the current tracked_fakes. + tracked_fakes = ( + self.snapshot_tracked_fakes() if save_tracked_fakes else None + ) + # Record the event for 'fn'. + event = ShapeEnvEvent( + fn, list(args), kwargs, tracked_fakes, name=fn.__name__ + ) + self.events.append(event) + # Play the event on this ShapeEnv. + return event.run(self) + + return wrapper + + return decorator + + +# Replays the ShapeEnvEvents list. +# It assumes the first event is the constructor call. +# +# fn: transforms an old FX node into one corresponding to the newly created ShapeEnv. +def replay_shape_env_events(events): + from torch.fx.experimental.symbolic_shapes import ShapeEnv + + constructor_event = events[0] + assert constructor_event.f == ShapeEnv + + # Constructs the new ShapeEnv. + shape_env = constructor_event.run() + + for event in events[1:]: + try: + # Actually replays each event. + # We need to call create_mapping_fn every time, since the node list might + # change after each event is replayed. + event.run(shape_env) + except Exception as e: + raise RuntimeError(f"failed when running event: {event}") from e + + return shape_env + + +# FakeTensor metadata. +# This is to be used in place of FakeTensor placeholders when calling +# ShapeEnv.produce_guards. +@dataclass +class FakeTensorMeta: + tensor_size: Tuple[Union[int, torch.SymInt], ...] + tensor_stride: Tuple[Union[int, torch.SymInt], ...] + tensor_storage_offset: Union[int, torch.SymInt] + is_nested: bool + + def size(self) -> Tuple[Union[int, torch.SymInt], ...]: + return self.tensor_size + + def stride(self) -> Tuple[Union[int, torch.SymInt], ...]: + return self.tensor_stride + + def storage_offset(self) -> Union[int, torch.SymInt]: + return self.tensor_storage_offset + + def dim(self) -> int: + return len(self.tensor_size) + + @staticmethod + def from_fake(fake) -> "FakeTensorMeta": + return FakeTensorMeta( + fake.size(), fake.stride(), fake.storage_offset(), fake.is_nested + ) + + +# [Note: ShapeEnv State Equality] +# =============================== +# +# What is considered ShapeEnv state? +# ---------------------------------- +# We consider to be the state of a ShapeEnv instance everything that +# is not in the inline tuple inside remove_nonstate_variables function. +# That is: the fields within ShapeEnv that modify the flow of execution +# of the program. +# +# So, for example: the replacements field might influence on how an +# expression is simplified. That, in turn, may result in a guard being +# statically known (i.e. not added). +# +# On the other hand, var_to_stack serves only changes what is printed +# in the screen, i.e. used only for debugging purposes. Therefore, we +# should not consider it when comparing states. +# +# What to do on NotEqualError? +# ---------------------------- +# Here are a few possible causes for getting a NotEqualError raised: +# +# 1. New field that does not belong in the ShapeEnv state. +# For example: log field of type ShapeEnvLoggerAdapter. Different +# ShapeEnv instances will always have different ShapeEnvLoggerAdapter +# instances, i.e. equality comparison would fail. +# Solution: add it to the inlined tuple inside remove_nonstate_variables +# function inside check_equal method. +# +# 2. New field that is not directly comparable across instances. +# For example: guards field of type List[ShapeGuard]. More specifically, +# the ShapeGuard type holds an expression and a stack information +# for debugging purposes. When replaying the even on a new ShapeEnv +# instance, the stack would be different, which would trigger this error. +# Solution: add a special case to the map_value function inside +# check_equal function. +# +# 3. Mutation of ShapeEnv on some not recorded function. +# If a mutation of the state of ShapeEnv happens inside a function +# that is not recorded (or that no caller in the stack is recorded), +# then, the replayed ShapeEnv won't catch that. +# Solution: decorate the function with record_shape_env_event. + + +# Checks whether the state of two ShapeEnv are equal w.r.t. the guards +# returned by ShapeEnv.produce_guards. +def shape_env_check_state_equal(env1, env2, non_state_variable_names, map_value): + # Collect and remove variables that don't necessarily represent the state + # of a ShapeEnv. Note: we copy the dictionary so that we don't modify the + # instance itself. + env1_vars = vars(env1).copy() + env2_vars = vars(env2).copy() + + for v in non_state_variable_names: + if v in env1_vars: + env1_vars.pop(v) + if v in env2_vars: + env2_vars.pop(v) + + # Function for transforming the mismatched values into string. + # Needed, since dict and set entries order might not be the same every time. + def value_to_str(value: Any) -> str: + if isinstance(value, dict): + return ( + "{" + + ", ".join(f"{k}: {value[k]}" for k in sorted(value.keys(), key=str)) + + "}" + ) + if isinstance(value, set): + return "{" + ", ".join(f"{v}" for v in sorted(value)) + "}" + return str(value) + + # Compares env1_vars with env2_vars. + # Here, we allow the value of each field to be mapped, so that we appropriately + # compare the two values. + def compare_vars( + map_value: Callable[[str, Any], Any] + ) -> List[Tuple[str, str, str]]: + env1_set, env2_set = set(env1_vars), set(env2_vars) + + # First, compare the set of keys in each vars dictionary. + if env1_set != env2_set: + raise NotEqualError( + "field set mismatch:", + [ + ( + "found unique fields:", + str(sorted(env1_set - env2_set)), + str(sorted(env2_set - env1_set)), + ), + ], + ) + + # Then, sort the keys, and compare the mapped values of each key. + sorted_keys = list(env1_set) + sorted_keys.sort() + + mapped_dict = [ + (k, map_value(k, env1_vars[k]), map_value(k, env2_vars[k])) + for k in sorted_keys + ] + + # Return a list of tuples representing the fields that did not match + # alongside their respective mapped values. + return [ + (f"{k}: values don't match.", value_to_str(val1), value_to_str(val2)) + for k, val1, val2 in mapped_dict + if val1 != val2 + ] + + # Accumulate the mismatching fields. + errors = compare_vars(map_value) + + if len(errors) > 0: + raise NotEqualError("field values don't match:", errors) + + +class NotEqualError(Exception): + def __init__( + self, + msg: str, + mismatched: List[Tuple[str, str, str]], + ) -> None: + details = "\n".join( + [ + "\n".join( + [ + f"==> {inner_msg}", + f" > Left: {str1}", + f" > Right: {str2}", + ] + ) + for inner_msg, str1, str2 in mismatched + ] + ) + + super().__init__( + f"""\ +ShapeEnv not equal: {msg} + +{details} +""" + ) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/refinement_types.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/refinement_types.py new file mode 100644 index 0000000000000000000000000000000000000000..762e4340f12b49d1a9f2628ce1e011e38b8d23a1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/refinement_types.py @@ -0,0 +1,16 @@ +class Equality: + def __init__(self, lhs, rhs): + self.lhs = lhs + self.rhs = rhs + + def __str__(self): + return f'{self.lhs} = {self.rhs}' + + def __repr__(self): + return f'{self.lhs} = {self.rhs}' + + def __eq__(self, other): + if isinstance(other, Equality): + return self.lhs == other.lhs and self.rhs == other.rhs + else: + return False diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/rewriter.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/rewriter.py new file mode 100644 index 0000000000000000000000000000000000000000..c4abe52c8c279bf93cd05659423eaceddf023b55 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/rewriter.py @@ -0,0 +1,121 @@ +import ast +import inspect +import textwrap +import copy +import functools +from types import FunctionType +from typing import cast, Union, Callable, Dict, Optional, Any +from torch.fx._symbolic_trace import Tracer +from torch.fx.graph import Graph +from torch._sources import normalize_source_lines +import torch + +class AST_Rewriter(ast.NodeTransformer): + """ + Take a FunctionType object representing a `forward` method, then + perform an AST rewrite to swap out nodes that are not symbolically + traceable with a callsite to the FX alternative. + + To support swapping out an AST node, define a new `visit` method on + that node. For more details, see: + https://docs.python.org/3/library/ast.html#ast.NodeTransformer + """ + + def rewrite(self, fn: FunctionType): + + # Normalize the source lines + sourcelines, _ = inspect.getsourcelines(fn) + sourcelines = normalize_source_lines(sourcelines) + source = ''.join(sourcelines) + normalized_str = textwrap.dedent(source) + + # Rewrite the original AST + source_ast = ast.parse(normalized_str) + dest_ast = ast.fix_missing_locations(self.visit(source_ast)) + + # Pull out the compiled function from the newly-created Module + code = compile(dest_ast, "", "exec") + globals_dict = copy.copy(fn.__globals__) + keys_before = set(globals_dict.keys()) + exec(code, globals_dict) + new_keys = list(set(globals_dict.keys()) - keys_before) + assert len(new_keys) == 1 + fn_compiled = globals_dict[new_keys[0]] + + # return the compiled function with the original globals + def change_func_globals(f, globals): + """Based on https://stackoverflow.com/a/13503277/2988730 (@unutbu)""" + # __globals__ is a private member of the function class + # so we have to copy the function, f, all of its member, except f.__globals__ + g = FunctionType( + f.__code__, + globals, + name=f.__name__, + argdefs=f.__defaults__, + closure=f.__closure__, + ) + g = functools.update_wrapper(g, f) + g.__kwdefaults__ = copy.copy(f.__kwdefaults__) + return g + # Return the correct FunctionType object + return change_func_globals(fn_compiled, globals=fn.__globals__) + + def visit_Assert(self, node): + """ + Swap out the Assert node (Python's `assert`) with a callsite to the + symbolically-traceable torch._assert function + """ + # Create the Call node + n = ast.parse('torch._assert()', mode='eval') + assert isinstance(n, ast.Expression) + call_node = n.body + assert isinstance(call_node, ast.Call) + msg = node.msg if node.msg else ast.Constant(value="", kind=None) + call_node.args = [node.test, msg] + + # Ensure that the new node conforms to the Python AST grammar + expr_wrapper = ast.Expr(value=call_node) + + # Return the new Call node to signify that we want to use it as + # a replacement for the original _assert node + return ast.copy_location(expr_wrapper, node) + + def visit_AnnAssign(self, node): + """ + Swap out Python's AnnAssign with an Assign node where the annotation function is called. + Example: + Original: + y: Tensor_Type(1,2,3, Dyn) = f2(x) + Output: + y = annotate(f2(x),Tensor_Type((1,2,3,Dyn))) + """ + return ast.Assign(targets=[node.target], value=ast.Call( + func=ast.Name(id='annotate', ctx=ast.Load()), + args=[node.value, node.annotation], keywords=[])) + + +class RewritingTracer(Tracer): + def trace(self, root: Union[torch.nn.Module, Callable], concrete_args: Optional[Dict[str, Any]] = None) -> Graph: + return super().trace(_rewrite(root), concrete_args) + + +def _rewrite(fn: Union[torch.nn.Module, Callable]) -> Union[torch.nn.Module, Callable]: + if isinstance(fn, torch.nn.Module): + # Rewrite this module's `forward` as well as the `forward`s of + # all of this module's recursive descendents. Return the new, + # rewritten module hierarchy. + def rewrite_module(m : torch.nn.Module): + class RewrittenModule(torch.nn.Module): + def __init__(self, orig): + super().__init__() + for k, v in orig.__dict__.items(): + if isinstance(v, torch.nn.Module): + self.__dict__[k] = copy.copy(rewrite_module(v)) + else: + self.__dict__[k] = copy.copy(v) + RewrittenModule.forward = AST_Rewriter().rewrite(cast(FunctionType, m.forward)) + return RewrittenModule(m) + return rewrite_module(fn) + else: + # Rewrite this single free function + return AST_Rewriter().rewrite(cast(FunctionType, fn)) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/schema_type_annotation.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/schema_type_annotation.py new file mode 100644 index 0000000000000000000000000000000000000000..a2a840408618a1cf4b1be4a2be136935f964ba2a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/schema_type_annotation.py @@ -0,0 +1,111 @@ +import torch +import torch.fx +import inspect +from typing import Any, Dict, Optional, Tuple +from torch.fx.node import Argument, Target +from torch._jit_internal import boolean_dispatched +from torch.fx.operator_schemas import _torchscript_type_to_python_type + +from torch.fx import Transformer + +class AnnotateTypesWithSchema(Transformer): + """ + Use Python function signatures to annotate types for `Nodes` within an FX graph. + This pulls out Python function signatures for: + + 1. Standard `torch.nn` Module calls + 2. `torch.nn.functional` calls + 3. Attribute fetches via `get_attr` + + Example usage: + + m = torchvision.models.resnet18() + + traced = torch.fx.symbolic_trace(m) + + traced = AnnotateTypesWithSchema(traced).transform() + + """ + def __init__(self, module : torch.nn.Module, annotate_functionals : bool = True, + annotate_modules : bool = True, annotate_get_attrs : bool = True): + super().__init__(module) + self.annotate_functionals = annotate_functionals + self.annotate_modules = annotate_modules + self.annotate_get_attrs = annotate_get_attrs + + def call_function(self, target : Target, args : Tuple[Argument, ...], kwargs : Dict[str, Any]): + python_ret_type = None + if self.annotate_functionals and target.__module__ == 'torch.nn.functional': + target_for_analysis = target + if target in boolean_dispatched: + # HACK: `boolean_dispatch` as used in `torch.nn.functional` makes it so that we have + # a 2-way dispatch based on a boolean value. Here we check that the `true` and `false` + # branches of the dispatch have exactly the same signature. If they do, use the `true` + # branch signature for analysis. Otherwise, leave this un-normalized + assert not isinstance(target, str) + dispatched = boolean_dispatched[target] + if_true, if_false = dispatched['if_true'], dispatched['if_false'] + # TODO: can we emit the union of these? What are the implications on TorchScript + # compilation? + if inspect.signature(if_true).return_annotation != inspect.signature(if_false).return_annotation: + return super().call_function(target, args, kwargs) + target_for_analysis = if_true + + python_ret_type = self._extract_python_return_type(target_for_analysis) + + return_proxy = super().call_function(target, args, kwargs) + return_proxy.node.type = return_proxy.node.type if return_proxy.node.type else python_ret_type + return return_proxy + + def call_module(self, target : Target, args : Tuple[Argument, ...], kwargs : Dict[str, Any]): + python_ret_type = None + assert isinstance(target, str) + submod = self.fetch_attr(target) + if self.annotate_modules and hasattr(submod.__class__, '__name__'): + classname = submod.__class__.__name__ + if getattr(torch.nn, classname, None) == submod.__class__: + python_ret_type = self._extract_python_return_type(submod.forward) + return_proxy = super().call_module(target, args, kwargs) + return_proxy.node.type = return_proxy.node.type if return_proxy.node.type else python_ret_type + return return_proxy + + def get_attr(self, target : torch.fx.node.Target, args : Tuple[Argument, ...], kwargs : Dict[str, Any]): + attr_proxy = super().get_attr(target, args, kwargs) + + if self.annotate_get_attrs: + module_itr = self.module + assert isinstance(target, str) + atoms = target.split('.') + for i, atom in enumerate(atoms): + if not hasattr(module_itr, atom): + raise RuntimeError(f'Node referenced nonextent target {".".join(atoms[:i])}!') + module_itr = getattr(module_itr, atom) + + maybe_inferred_ts_type = torch._C._jit_try_infer_type(module_itr) + if maybe_inferred_ts_type.success(): + python_type = _torchscript_type_to_python_type(maybe_inferred_ts_type.type()) + attr_proxy.node.type = python_type if not attr_proxy.node.type else attr_proxy.node.type + + return attr_proxy + + def _extract_python_return_type(self, target : Target) -> Optional[Any]: + """ + Given a Python call target, try to extract the Python return annotation + if it is available, otherwise return None + + Args: + + target (Callable): Python callable to get return annotation for + + Returns: + + Optional[Any]: Return annotation from the `target`, or None if it was + not available. + """ + assert callable(target) + try: + sig = inspect.signature(target) + except (ValueError, TypeError): + return None + + return sig.return_annotation if sig.return_annotation is not inspect.Signature.empty else None diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/sym_node.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/sym_node.py new file mode 100644 index 0000000000000000000000000000000000000000..369f6ba4b8e2ce5de0c1c463f5ef8dceda40bf5b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/sym_node.py @@ -0,0 +1,1145 @@ +""" +This file does three things: +- Contains the definition of SymNode +- Installs all the magic methods into SymBool, SymFloat, SymFloat at import time +- Does not depend on sympy at import time + +As this file is imported from within torch/__init__.py we do not want it to depend on SymPy +to avoid having to load SymPy at import time, as doing so is *very* slow. +""" + +import builtins +import itertools +import logging +import math +import operator +import sys +from functools import lru_cache +from typing import Optional, Type, TYPE_CHECKING, Union + +# NB: The sym_* functions are used via getattr() and must be imported here. +from torch import ( # noqa: F401 + sym_float, + sym_ite, + sym_max, + sym_min, + sym_not, + sym_sqrt, + SymBool, + SymFloat, + SymInt, +) + +from torch.fx.experimental._sym_dispatch_mode import ( + handle_sym_dispatch, + sym_function_mode, +) + +if TYPE_CHECKING: + from torch.fx.experimental.symbolic_shapes import ShapeEnv + +log = logging.getLogger(__name__) + + +__all__ = ["SymNode", "method_to_operator", "magic_methods", "sym_sqrt"] + + +SymTypes = (SymInt, SymFloat, SymBool) + + +def _to_symtype(t): + if t is bool: + return SymBool + if t is int: + return SymInt + if t is float: + return SymFloat + return t + + +# TODO: An incomplete list +# 1. Set variables to be equal when we do equality +# 2. Specialize on 0/1 when we do subtraction +class SymNode: + """ + This is a type erased SymInt/SymFloat which we use to do actual operations. + End users don't touch this. Magic methods are NOT defined on this object. + """ + + def __init__( + self, + expr, + shape_env, + pytype, + hint: Optional[Union[int, float, bool]], + constant=None, + fx_node=None, + ): + self._expr = expr + self.shape_env = shape_env + self.pytype = pytype + # What's the difference between hint and constant? + # + # - A constant is known to be invariant across invocations of the model; + # it will always be this value. We only really know this when we + # encounter an honest-to-goodness literal (when wrapping it into + # a SymNode, we set constant.) Most of the time, constant is None + # + # - A hint is a *particular* value from the particular run we are + # tracing, but it may vary the next time around. It's useful to + # keep this around, as if we need a concrete value from a SymNode, + # we will return the hint and guard on the expression that produced + # it giving the same hint next time around. The hint is not + # guaranteed to be set either: if you have an unbacked SymNode, + # there won't be any hint; it was the result of some tensor-dependent + # computation, but we don't know what it actually is because we + # haven't actually run the tensor computation. + # + # If _hint is None, we will query maybe_evaluate_static(compute_hint=True) + # in hopes that we've learned enough about the unbacked symints to + # discharge the hint; otherwise, you're likely to just error out. + # + # (A previous version of this system had some optimizations to only + # recompute when it was possible we had learned enough about the + # unbacked symint that a hint was now possible, but as we added more + # potential refinements to unbacked symints this got harder to keep + # in sync, so we've deleted it for now.) + if hint is not None: + assert type(hint) is pytype or type(hint) is _to_symtype(pytype), ( + "Cannot create SymNode of type " + f"{pytype} with incompatible hint of type {type(hint)}" + ) + self._hint = hint + self.constant: Optional[Union[int, float, bool]] = constant + + # Record the FX node of the current node if we are doing translation + # validation. They will be used for building the input assertions for + # the translation validation problem. + self.fx_node = ( + fx_node if self.shape_env._translation_validation_enabled else None + ) + + def with_shape_env(self, shape_env: "ShapeEnv") -> "SymNode": + return SymNode( + self._expr, shape_env, self.pytype, self._hint, self.constant, self.fx_node + ) + + @property + def expr(self): + return self.shape_env.replace(self._expr) + + # Recompute the hint and see if we've got it now + # Precondition: self._hint is None + def _update_hint(self): + r = self.shape_env._maybe_evaluate_static(self.expr, compute_hint=True) + if r is not None: + self._hint = self.pytype(r) if not isinstance(r, SymTypes) else r + + @property + def hint(self): + if self._hint is None: + self._update_hint() + return self._hint + + def has_hint(self): + if self._hint is None: + self._update_hint() + return self._hint is not None + + def require_hint(self, fallback=None): + if self._hint is None: + self._update_hint() + if self._hint is None: + if fallback is not None: + return fallback + # NB: we expect this to raise + return self.shape_env.size_hint(self.expr) + return self._hint + + def maybe_as_int(self): + if self.expr.is_number: + return int(self.expr) + else: + return None + + def is_int(self): + return self.pytype is int + + def is_float(self): + return self.pytype is float + + def is_bool(self): + return self.pytype is bool + + def wrap_int(self, num): + assert type(num) is int + import sympy + + return SymNode( + sympy.Integer(num), self.shape_env, int, num, constant=num, fx_node=num + ) + + def wrap_float(self, num): + assert type(num) is float + import sympy + + return SymNode( + sympy.Float(num), self.shape_env, float, num, constant=num, fx_node=num + ) + + def wrap_bool(self, num): + assert type(num) is bool + import sympy + + return SymNode( + sympy.true if num else sympy.false, + self.shape_env, + bool, + num, + constant=num, + fx_node=num, + ) + + def clone(self): + return self + + def str(self): + return f"{self.expr}" + + def __str__(self): + return self.str() + + def __repr__(self): + return self.str() + + # These methods call the metaprogrammed methods, they're hand written + # here so we get good stack traces + def abs(self) -> "SymNode": + return self._abs() # type: ignore[attr-defined] + + def add(self, other) -> "SymNode": + return self._add(other) # type: ignore[attr-defined] + + def sub(self, other) -> "SymNode": + return self._sub(other) # type: ignore[attr-defined] + + def mul(self, other) -> "SymNode": + return self._mul(other) # type: ignore[attr-defined] + + def mod(self, other) -> "SymNode": + return self._mod(other) # type: ignore[attr-defined] + + def pow(self, other) -> "SymNode": + return self._pow(other) # type: ignore[attr-defined] + + def and_(self, other) -> "SymNode": + return self._and_(other) # type: ignore[attr-defined] + + def or_(self, other) -> "SymNode": + return self._or_(other) # type: ignore[attr-defined] + + def truediv(self, other) -> "SymNode": + return self._truediv(other) # type: ignore[attr-defined] + + def floordiv(self, other) -> "SymNode": + return self._floordiv(other) # type: ignore[attr-defined] + + def lshift(self, other) -> "SymNode": + return self._lshift(other) # type: ignore[attr-defined] + + def rshift(self, other) -> "SymNode": + return self._rshift(other) # type: ignore[attr-defined] + + def sym_not(self) -> "SymNode": # noqa: F811 + return self._sym_not() # type: ignore[attr-defined] + + def eq(self, other) -> "SymNode": + return self._eq(other) # type: ignore[attr-defined] + + def ne(self, other) -> "SymNode": + return self._ne(other) # type: ignore[attr-defined] + + def gt(self, other) -> "SymNode": + return self._gt(other) # type: ignore[attr-defined] + + def lt(self, other) -> "SymNode": + return self._lt(other) # type: ignore[attr-defined] + + def le(self, other) -> "SymNode": + return self._le(other) # type: ignore[attr-defined] + + def ge(self, other) -> "SymNode": + return self._ge(other) # type: ignore[attr-defined] + + def floor(self) -> "SymNode": + return self._floor() # type: ignore[attr-defined] + + def sym_float(self) -> "SymNode": # noqa: F811 + return self._sym_float() # type: ignore[attr-defined] + + def sym_int(self) -> "SymNode": + return self._sym_int() # type: ignore[attr-defined] + + def ceil(self) -> "SymNode": + return self._ceil() # type: ignore[attr-defined] + + def neg(self) -> "SymNode": + return self._neg() # type: ignore[attr-defined] + + def sym_min(self, other) -> "SymNode": # noqa: F811 + return self._sym_min(other) # type: ignore[attr-defined] + + def sym_max(self, other) -> "SymNode": # noqa: F811 + return self._sym_max(other) # type: ignore[attr-defined] + + def sym_ite(self, then_val, else_val) -> "SymNode": + return self._sym_ite(then_val, else_val) # type: ignore[attr-defined] + + def sym_sqrt(self) -> "SymNode": + return self._sym_sqrt() # type: ignore[attr-defined] + + def is_contiguous(self, sizes, strides) -> "SymNode": + return self._is_contiguous(sizes, strides) # type: ignore[attr-defined] + + def is_channels_last_contiguous_2d(self, sizes, strides) -> "SymNode": + return self._is_channels_last_contiguous_2d(sizes, strides) # type: ignore[attr-defined] + + def is_channels_last_contiguous_3d(self, sizes, strides) -> "SymNode": + return self._is_channels_last_contiguous_3d(sizes, strides) # type: ignore[attr-defined] + + def is_channels_last_strides_2d(self, sizes, strides) -> "SymNode": + return self._is_channels_last_strides_2d(sizes, strides) # type: ignore[attr-defined] + + def is_channels_last_strides_3d(self, sizes, strides) -> "SymNode": + return self._is_channels_last_strides_3d(sizes, strides) # type: ignore[attr-defined] + + def is_non_overlapping_and_dense_indicator(self, sizes, strides) -> "SymNode": + return self._is_non_overlapping_and_dense_indicator(sizes, strides) # type: ignore[attr-defined] + + # Make C++ happy + def sym_or(self, other): + return self.or_(other) + + def sym_and(self, other): + return self.and_(other) + + def is_non_overlapping_and_dense(self, sizes, strides): + return self.is_non_overlapping_and_dense_indicator(sizes, strides).eq(to_node(self, 1)) # type: ignore[attr-defined] + + def int_(self): + return self.guard_int("", 0) # NB: uses Python backtrace + + # You can manually trigger a guard with this function + def guard_int(self, file, line): + # TODO: use the file/line for some useful diagnostic on why a + # guard occurred + r = self.shape_env.evaluate_expr(self.expr, self.hint, fx_node=self.fx_node) + try: + return int(r) + except Exception: + log.warning("Failed to convert to int: %s", r) + raise + + def guard_float(self, file, line): + # TODO: use the file/line for some useful diagnostic on why a + # guard occurred + r = self.shape_env.evaluate_expr( + self.expr, self.hint, fx_node=self.fx_node, expect_rational=False + ) + try: + return float(r) + except Exception: + log.warning("Failed to convert to float: %s", r) + raise + + def guard_bool(self, file, line): + # TODO: use the file/line for some useful diagnostic on why a + # guard occurred + r = self.shape_env.evaluate_expr(self.expr, self.hint, fx_node=self.fx_node) + try: + return bool(r) + except Exception: + log.warning("Failed to convert to bool: %s", r) + raise + + def expect_true(self, file, line): + if self.has_hint(): + # OK to generate guards + return self.guard_bool(file, line) + # Generate a deferred runtime assert (this might actually end up doing + # a regular guard if we can!) + # TODO: file/line here is very important, because the assert has been + # deferred so you can't backtrace easily + return self.shape_env.defer_runtime_assert( + self.expr, f"{file}:{line}", fx_node=self.fx_node + ) + + def expect_size(self, file, line): + from torch.fx.experimental.symbolic_shapes import _advise_is_size + + b = self.ge(self.wrap_int(0)) + # Generate a deferred runtime assert + r = b.expect_true(file, line) + # Refine compile time range, but only if it's unbacked. + # If you refine range for hinted variables, you can end up making + # improper deductions since compile time reasoning may be + # incompatible with runtime reasoning. + if r and not self.has_hint(): + _advise_is_size(SymInt(self)) + return r + + def bool_(self): + return self.guard_bool("", 0) + + def is_symbolic(self): + return True + + def singleton_int(self): + return None + + def is_constant(self): + return False + + +# TODO: this probably needs the sizes-strides eval functions +METHOD_TO_OPERATOR = { + "abs": operator.abs, + "add": operator.add, + "and": operator.and_, + "ceil": math.ceil, + "eq": operator.eq, + "floor": math.floor, + "floordiv": operator.floordiv, + "ge": operator.ge, + "gt": operator.gt, + "le": operator.le, + "lshift": operator.lshift, + "lt": operator.lt, + "mod": operator.mod, + "mul": operator.mul, + "ne": operator.ne, + "neg": operator.neg, + "or": operator.or_, + "pow": operator.pow, + "rshift": operator.rshift, + "sub": operator.sub, + "sym_float": sym_float, + "sym_ite": sym_ite, + "sym_max": sym_max, + "sym_min": sym_min, + "sym_not": sym_not, + "sym_sqrt": sym_sqrt, + "truediv": operator.truediv, +} + +unary_magic_methods = { + "abs", + "sym_float", + "ceil", + "floor", + "neg", + "sym_sqrt", + "sym_not", +} + +# Most methods are only registered on SymInt and SymFloat +# Some methods are only be registered on SymBool +only_bool_magic_methods = {"and", "or", "sym_not", "sym_ite"} +# Methods that implicitly convert SymBool into SymInt +bool_becomes_int_magic_methods = {"add", "sub", "mul"} +# Methods that are also on SymBool, in addition to on SymInt and SymFloat +also_bool_magic_methods = {"eq"} +bool_magic_methods = only_bool_magic_methods | also_bool_magic_methods + + +magic_methods_on_operator_with_trailing_underscore = {"and", "or"} + + +always_float_magic_methods = {"truediv", "sym_float", "sym_sqrt", "pow"} +always_int_magic_methods = {"ceil", "floor"} +always_bool_magic_methods = { + "eq", + "ne", + "gt", + "lt", + "le", + "ge", + "and", + "or", + "sym_not", + "is_non_overlapping_and_dense", +} + +# Methods that have a `__foo__` as well as `__rfoo__` + + +def _sympy_truediv(a, b): + from torch.utils._sympy.functions import TrueDiv + + return TrueDiv(a, b) + + +def _sympy_floordiv(a, b): + from torch.utils._sympy.functions import FloorDiv + + return FloorDiv(a, b) + + +def _sympy_mod(a, b): + from torch.utils._sympy.functions import Mod + + return Mod(a, b) + + +def _sympy_pow(a, b): + from torch.utils._sympy.functions import Pow + + return Pow(a, b) + + +def _sympy_and(a, b): + import sympy + + return sympy.And(a, b) + + +def _sympy_or(a, b): + import sympy + + return sympy.Or(a, b) + + +def _sympy_lshift(a, b): + from torch.utils._sympy.functions import LShift + + return LShift(a, b) + + +def _sympy_rshift(a, b): + from torch.utils._sympy.functions import RShift + + return RShift(a, b) + + +reflectable_magic_methods = { + "add": lambda a, b: a + b, + "sub": lambda a, b: a - b, + "mul": lambda a, b: a * b, + "mod": _sympy_mod, + "pow": _sympy_pow, + "and": _sympy_and, + "or": _sympy_or, + "truediv": _sympy_truediv, + "floordiv": _sympy_floordiv, + "lshift": _sympy_lshift, + "rshift": _sympy_rshift, +} + + +def _floor_ceil_helper(a, fn): + import sympy + + if isinstance(a, sympy.Mul): + aa = a.args + if len(aa) == 2 and isinstance(aa[0], sympy.Float) and aa[1].is_integer: + coef = sympy.Integer(aa[0]) + if aa[0] == coef: # structural equality test + return coef * aa[1] + if ( + isinstance(a, sympy.Float) + and a == sympy.Integer(a) + or isinstance(a, sympy.Integer) + ): + return sympy.Integer(a) + return fn(a) + + +def _sympy_floor(a): + import sympy + + return _floor_ceil_helper(a, sympy.floor) + + +def _sympy_ceil(a): + import sympy + + return _floor_ceil_helper(a, sympy.ceiling) + + +def _sympy_eq(a, b): + import sympy + + return sympy.Eq(a, b) + + +def _sympy_ne(a, b): + import sympy + + return sympy.Ne(a, b) + + +def _sympy_gt(a, b): + import sympy + + return sympy.Gt(a, b) + + +def _sympy_lt(a, b): + import sympy + + return sympy.Lt(a, b) + + +def _sympy_le(a, b): + import sympy + + return sympy.Le(a, b) + + +def _sympy_ge(a, b): + import sympy + + return sympy.Ge(a, b) + + +def _sympy_min(a, b): + import sympy + + return sympy.Min(a, b) + + +def _sympy_max(a, b): + import sympy + + return sympy.Max(a, b) + + +def _sympy_ite(a, t, f): + import sympy + + return sympy.Piecewise((t, a), (f, True)) + + +def _sympy_sqrt(a): + import sympy + + return sympy.sqrt(a) + + +def _sympy_abs(a): + import sympy + + return sympy.Abs(a) + + +def _sympy_sym_float(a): + # Cannot use sympy.Float(a) here, coz it expects python literals + # Multiply by 1.0 to cast to float. This is needed when the input + # is a SymInt which has the assumption that it is integer and + # SymPy will otherwise assume that return value cannot be a float. + return a * 1.0 + + +magic_methods = { + **reflectable_magic_methods, + "sym_not": lambda a: ~a, + "eq": _sympy_eq, + "ne": _sympy_ne, + "gt": _sympy_gt, + "lt": _sympy_lt, + "le": _sympy_le, + "ge": _sympy_ge, + "floor": _sympy_floor, + "sym_float": _sympy_sym_float, + "ceil": _sympy_ceil, + "neg": lambda a: -a, + "sym_min": _sympy_min, + "sym_max": _sympy_max, + "sym_ite": _sympy_ite, + "sym_sqrt": _sympy_sqrt, + "abs": _sympy_abs, +} + + +def sympy_is_contiguous(sizes, strides): + dim = len(sizes) + return sympy_is_contiguous_generic(sizes, strides, list(range(dim - 1, -1, -1))) + + +def sympy_is_contiguous_generic(sizes, strides, dim_order): + import sympy + + dim = len(sizes) + + if len(dim_order) != dim: + return sympy.false + + is_contiguous = sympy.true + z = sympy.Integer(1) + # Contiguous if the strides make sense (or the dim is size 1) + for d in dim_order: + is_contiguous &= sympy.Eq(sizes[d], sympy.Integer(1)) | sympy.Eq(strides[d], z) + z *= sizes[d] + # OR if any size is zero + for d in range(dim): + is_contiguous |= sympy.Eq(sizes[d], sympy.Integer(0)) + return is_contiguous + + +# NB: There is a TODO in C++ to allow omitting the batch dim. If that +# happens you will need to refactor this + + +def sympy_is_channels_last_contiguous_2d(sizes, strides): + return sympy_is_contiguous_generic(sizes, strides, [1, 3, 2, 0]) + + +def sympy_is_channels_last_contiguous_3d(sizes, strides): + return sympy_is_contiguous_generic(sizes, strides, [1, 4, 3, 2, 0]) + + +def sympy_is_channels_last_strides_generic(sizes, strides, dim_order): + import sympy + + dim = len(sizes) + + if dim != len(dim_order): + return sympy.false + + m = sympy.Integer(0) + r = sympy.true + + # special case for trivial C dimension. default to NCHW + r &= sympy.Ne(strides[1], 0) + + for d in dim_order: + r &= sympy.Ne(sizes[d], 0) & (strides[d] >= m) + # Fallback to NCHW as default layout for ambiguous cases + # This is the flaw of implicit memory_format from strides. + # N111 tensor with identical strides for size 1 dimension; + # Two cases could lead us here: + # a. N111 contiguous Tensor ([N,1,1,1]@[1,1,1,1]) + # b. N11W contiguous Tensor sliced on the W-dimension. + # ([N,1,1,1]@[W,W,W,W]) + if d == 0: + r &= sympy.Ne(m, strides[1]) + # This is necessary to: + # 1. distinguish the memory_format of N1H1; + # [H, 1, 1, 1] channels_last stride + # [H, H, 1, 1] contiguous stride + # 2. permutation of 1C1W: + # [1, C, 1, H]@[HC, H, H, 1] transpose(1, 3) + # [1, H, 1, C]@[HC, 1, H, H] shouldn't be identified as + # channels_last + m = strides[d] * sympy.Max(sizes[d], 1) + + return r + + +def sympy_is_channels_last_strides_2d(sizes, strides): + return sympy_is_channels_last_strides_generic(sizes, strides, [1, 3, 2, 0]) + + +def sympy_is_channels_last_strides_3d(sizes, strides): + return sympy_is_channels_last_strides_generic(sizes, strides, [1, 4, 3, 2, 0]) + + +def _sympy_is_non_overlapping_and_dense_indicator(sizes, strides): + from torch.utils._sympy.functions import IsNonOverlappingAndDenseIndicator + + return IsNonOverlappingAndDenseIndicator(*sizes, *strides) + + +sizes_strides_methods = { + # TODO: These could also be done with indicators, maybe it is better + # for reasoning to do it that way + "is_contiguous": sympy_is_contiguous, + "is_channels_last_contiguous_2d": sympy_is_channels_last_contiguous_2d, + "is_channels_last_contiguous_3d": sympy_is_channels_last_contiguous_3d, + "is_channels_last_strides_2d": sympy_is_channels_last_strides_2d, + "is_channels_last_strides_3d": sympy_is_channels_last_strides_3d, + "is_non_overlapping_and_dense_indicator": _sympy_is_non_overlapping_and_dense_indicator, +} + +alternate_impl_if_hinted_methods = { + "sym_min": builtins.min, + "sym_max": builtins.max, +} + + +def to_node(self, num): + if isinstance(num, SymTypes): + return num.node + elif type(num) is bool: + return self.wrap_bool(num) + elif type(num) is int: + return self.wrap_int(num) + elif type(num) is float: + return self.wrap_float(num) + else: + # NotImplemented is important so that Python tries the + # other magic method + return NotImplemented + + +def wrap_node(x): + # TODO: let C++ also take advantage of this + if isinstance(x, SymNode) and x.constant is not None: + return x.constant + if x.is_int(): + return SymInt(x) + elif x.is_float(): + return SymFloat(x) + elif x.is_bool(): + return SymBool(x) + else: + raise AssertionError(f"unrecognized return type {x}") + + +def method_to_operator(method): + return METHOD_TO_OPERATOR[method] + + +def _make_node_magic(method, func): + func = lru_cache(256)(func) + + if method in magic_methods_on_operator_with_trailing_underscore: + method_attr = f"{method}_" + else: + method_attr = method + + def binary_magic_impl(self, other): + from torch.fx.experimental.symbolic_shapes import safe_expand + + op = method_to_operator(method) + + out_hint = None + if self.hint is not None and other.hint is not None: + out_hint = op(self.hint, other.hint) + + alternate_impl = alternate_impl_if_hinted_methods.get(method) + if alternate_impl and out_hint is not None: + return to_node(self, alternate_impl(wrap_node(self), wrap_node(other))) + + if sym_function_mode(): + return to_node( + self, handle_sym_dispatch(op, (wrap_node(self), wrap_node(other)), {}) + ) + assert isinstance(other, SymNode) + # TODO: consider constant prop here + try: + out = func(self.expr, other.expr) + except Exception: + log.warning("failed to eval %s(%s, %s)", method, self.expr, other.expr) + raise + out = safe_expand(out) + pytype: Type + # This is not strictly correct. In Python, a**b may return complex when + # a < 0 and b is a float: (-1)**2.1. Same for sympy.sqrt(-3.14). This + # returns a float while both arguments are ints: 2**(-1). Also, max and + # min do not type promote. To avoid having data-dependent control flow + # here, we just set the type to float if one of the args is a float. In + # case of a type mismatch, we assume that it will be detected during + # evaluation. + if method in always_float_magic_methods: + pytype = float + elif method in always_bool_magic_methods: + pytype = bool + elif self.pytype is float or other.pytype is float: + pytype = float + else: + pytype = self.pytype + + if ( + pytype is not None + and out_hint is not None + and not isinstance(out_hint, SymTypes) + ): + out_hint = pytype(out_hint) + + # Create a FX node that corresponds to the operation being applied to + # this node. + fx_node, _ = self.shape_env.create_fx_call_function( + op, (self.fx_node, other.fx_node) + ) + return SymNode(out, self.shape_env, pytype, out_hint, fx_node=fx_node) + + def unary_magic_impl(self): + from torch.fx.experimental.symbolic_shapes import safe_expand + + op = method_to_operator(method) + if sym_function_mode(): + return to_node(self, handle_sym_dispatch(op, (wrap_node(self),), {})) + # TODO: consider constant prop here + expr = self.expr + if method == "floor" or method == "ceiling": + expr = self.shape_env._simplify_floor_div(expr) + + try: + out = func(expr) + except Exception: + log.warning("failed to eval %s(%s)", method, expr) + raise + + out_hint = None + if self.hint is not None: + out_hint = op(self.hint) + out = safe_expand(out) + pytype: Type + if method in always_int_magic_methods: + pytype = int + elif method in always_float_magic_methods: + pytype = float + else: + pytype = self.pytype + + fx_node, _ = self.shape_env.create_fx_call_function(op, (self.fx_node,)) + return SymNode(out, self.shape_env, pytype, out_hint, fx_node=fx_node) + + if method in unary_magic_methods: + setattr(SymNode, f"_{method_attr}", unary_magic_impl) + elif method == "sym_ite": + + def sym_ite_impl(pred_node, then_node, else_node): + from torch.fx.experimental.symbolic_shapes import safe_expand + + out_hint = then_node.hint if pred_node.hint else else_node.hint + if sym_function_mode(): + return to_node( + pred_node, + handle_sym_dispatch( + sym_ite, + ( + wrap_node(pred_node), + wrap_node(then_node), + wrap_node(else_node), + ), + {}, + ), + ) + + try: + out = func(pred_node.expr, then_node.expr, else_node.expr) + except Exception: + log.warning( + "failed to eval %s(%s, %s, %s)", + method, + pred_node.expr, + then_node.expr, + else_node.expr, + ) + raise + + out = safe_expand(out) + fx_node, _ = pred_node.shape_env.create_fx_call_function( + sym_ite, (pred_node.fx_node, then_node.fx_node, else_node.fx_node) + ) + return SymNode( + out, pred_node.shape_env, then_node.pytype, out_hint, fx_node=fx_node + ) + + setattr(SymNode, f"_{method_attr}", sym_ite_impl) + else: + setattr(SymNode, f"_{method_attr}", binary_magic_impl) + + +def _make_node_sizes_strides(method, func): + # NB: don't LRU cache, lots of arguments + + def sizes_strides_impl(self, sizes, strides): + op = getattr(sys.modules[__name__], method) + if sym_function_mode(): + return to_node( + self, + handle_sym_dispatch( + op, + ([wrap_node(s) for s in sizes], [wrap_node(s) for s in strides]), + {}, + ), + ) + size_exprs = [s.expr for s in sizes] + stride_exprs = [s.expr for s in strides] + try: + out = func(size_exprs, stride_exprs) + except Exception: + log.warning("failed to eval %s(%s, %s)", method, size_exprs, stride_exprs) + raise + # bool is never expandable + + size_hints = [] + out_hint = None + for s in sizes: + if s.hint is None: + break + size_hints.append(s.hint) + else: + stride_hints = [] + for s in strides: + if s.hint is None: + break + stride_hints.append(s.hint) + else: + out_hint = op(size_hints, stride_hints) + + # NB: This is the indicator function, not the actual bool! + pytype: Type + if method.endswith("_indicator"): + pytype = int + else: + pytype = bool + return SymNode(out, self.shape_env, pytype, out_hint) + + setattr(SymNode, f"_{method}", sizes_strides_impl) + + # TODO: This is technically hotpath, but in the ideal end state + # guards on this will resolve at a higher level so you never + # spend time in this code + def sizes_strides_user(sizes, strides): + import sympy + + from torch.fx.experimental.symbolic_shapes import ( + eval_is_non_overlapping_and_dense, + ) + + for a in itertools.chain(sizes, strides): + if isinstance(a, SymInt): + return wrap_node( + getattr(a.node, method)( + [to_node(a.node, b) for b in sizes], + [to_node(a.node, b) for b in strides], + ) + ) + if method == "is_non_overlapping_and_dense_indicator": + return eval_is_non_overlapping_and_dense(sizes, strides) + else: + # TODO: this is an awful implementation + return bool( + func( + [sympy.sympify(a) for a in sizes], + [sympy.sympify(a) for a in strides], + ) + ) + + # Skip for is_non_overlapping_and_dense_indicator + if not hasattr(sys.modules[__name__], method): + setattr(sys.modules[__name__], method, sizes_strides_user) + + +for method, func in magic_methods.items(): + _make_node_magic(method, func) + +for method, func in sizes_strides_methods.items(): + _make_node_sizes_strides(method, func) + + +def _make_user_magic(method, user_type): + # User magic takes care of wrapping the other operand into a node, + # so that our internal logic can assume everything is nodes + + if method in magic_methods_on_operator_with_trailing_underscore: + method_attr = f"{method}_" + else: + method_attr = method + + def get_constant(x: Union[SymInt, int, SymFloat, float, SymBool, bool]): + if isinstance(x, (int, float, bool)): + return x + if isinstance(x, SymBool): + return x.node.guard_bool("", 0) + raise AssertionError("expect to be called with constant SymBools") + + def is_constant(x): + if isinstance(x, (int, float, bool)): + return True + if isinstance(x, (SymInt, SymFloat, SymBool)): + return x.node.is_constant() + return False + + if method in bool_becomes_int_magic_methods: + + def promote(x): + """Implements True+True=2, which works in python but not sympy""" + if isinstance(x, SymBool): + return SymInt(x.node.wrap_int(int(x))) + return x + + else: + + def promote(x): + return x + + # Before and after performing the operation, check if any operands are constant. + # If so, extract out the constant values first. If `self` itself is a + # constant, then "redispatch" by calling back into the operator. Sometimes + # this means that operations involving SymBool return plain bools. + # Alternatively, we could also rewrap into constant Symbool (i.e. by + # implementing wrap_bool in ConstantSymNodeImpl), but we're not doing that + # today for no particular reason. + def unary_magic_impl(self): + self = promote(self) + if is_constant(self): + return (method_to_operator(method))(get_constant(self)) + return wrap_node(getattr(self.node, method_attr)()) + + def binary_magic_impl(self, other): + self = promote(self) + other = promote(other) + if is_constant(self): + return (method_to_operator(method))(get_constant(self), other) + if is_constant(other): + other = get_constant(other) + other_node = to_node(self.node, other) + if other_node is NotImplemented: + return NotImplemented + ret = wrap_node(getattr(self.node, method_attr)(other_node)) + return get_constant(ret) if is_constant(ret) else ret + + def rbinary_magic_impl(self, other): + self = promote(self) + other = promote(other) + if is_constant(self): + return (method_to_operator(method))(get_constant(self), other) + if is_constant(other): + other = get_constant(other) + other_node = to_node(self.node, other) + if other_node is NotImplemented: + return NotImplemented + ret = wrap_node(getattr(other_node, method_attr)(self.node)) + return get_constant(ret) if is_constant(ret) else ret + + if method in unary_magic_methods: + setattr(user_type, f"__{method}__", unary_magic_impl) + elif method == "sym_ite": + + def sym_ite_magic_impl(pred, then_val, else_val): + pred_node = pred.node + then_node = to_node(pred_node, then_val) + else_node = to_node(pred_node, else_val) + if then_node is NotImplemented or else_node is NotImplemented: + return NotImplemented + assert ( + isinstance(then_node, SymNode) + and isinstance(else_node, SymNode) + and then_node.pytype == else_node.pytype + ) + ret = wrap_node(getattr(pred.node, method_attr)(then_node, else_node)) + return get_constant(ret) if ret.node.is_constant() else ret + + setattr(user_type, f"__{method}__", sym_ite_magic_impl) + else: + setattr(user_type, f"__{method}__", binary_magic_impl) + if method in reflectable_magic_methods: + setattr(user_type, f"__r{method}__", rbinary_magic_impl) + + +for method, func in magic_methods.items(): # type: ignore[assignment] + if method in only_bool_magic_methods: + _make_user_magic(method, SymBool) + continue + if method in also_bool_magic_methods or method in bool_becomes_int_magic_methods: + _make_user_magic(method, SymBool) + _make_user_magic(method, SymInt) + _make_user_magic(method, SymFloat) + +del method +del func diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/symbolic_shapes.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/symbolic_shapes.py new file mode 100644 index 0000000000000000000000000000000000000000..da5897365dc74969aeb1b895eeafd6a73b6989a5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/symbolic_shapes.py @@ -0,0 +1,3776 @@ +import builtins +import collections +import functools +import inspect +import itertools +import logging +import math +import operator +import re +import sys +import threading +import traceback +from collections import defaultdict +from contextlib import contextmanager +from dataclasses import dataclass, field +from enum import Enum +from functools import lru_cache +from typing import Any, cast, Callable, Dict, List, Optional, Sequence, Set, Tuple, Type, Union, Iterable + +import torch +import torch.fx +import torch.fx.traceback as fx_traceback +from torch.fx.experimental import _config as config + +from torch.fx.experimental.recording import ( + FakeTensorMeta, + ShapeEnvEvent, + record_shapeenv_event, + replay_shape_env_events, + shape_env_check_state_equal +) +from torch.fx.experimental.sym_node import SymNode, SymTypes + +# NB: The sym_* functions are used via getattr() and must be imported here. +from torch import SymBool, SymFloat, SymInt +from torch._guards import ShapeGuard, Source, TracingContext +from torch.utils._python_dispatch import is_traceable_wrapper_subclass +from torch.utils._sympy.functions import FloorDiv, Mod, IsNonOverlappingAndDenseIndicator +from torch.utils._sympy.solve import try_solve +from torch.utils._sympy.value_ranges import bound_sympy, SymPyValueRangeAnalysis, ValueRanges, ValueRangeError +from torch.utils._sympy.singleton_int import SingletonInt +from torch.utils._traceback import format_frame, CapturedTraceback +from torch._utils_internal import signpost_event + +from torch._logging import LazyString + +InputList = List +DimList = List + +log = logging.getLogger(__name__) + +class GuardOnDataDependentSymNode(RuntimeError): + pass + +import sympy +from sympy.printing.str import StrPrinter +from sympy.printing.precedence import precedence, PRECEDENCE + +aten = torch._ops.ops.aten # type: ignore[has-type] + +__all__ = [ + "has_symbolic_sizes_strides", "create_contiguous", "ShapeEnv", "is_concrete_int", + "guard_int", "guard_float", "guard_scalar", "canonicalize_bool_expr", + "hint_int", "SYMPY_INTERP", "free_symbols", "is_symbol_binding_fx_node", + "is_concrete_bool", "SHAPEENV_EVENT_KEY", "CURRENT_NODE_KEY", + "has_free_symbols", "sym_eq", "SymbolicContext", "StatelessSymbolicContext", "StatefulSymbolicContext" +] + +# FX node metadata keys for symbolic shape FX graph. +SHAPEENV_EVENT_KEY = "shapeenv_event" +CURRENT_NODE_KEY = "current_node" + +# These are modules that contain generic code for interacting with ShapeEnv +# which are unlikely to identify a particular interesting guard statement +@lru_cache(None) +def uninteresting_files(): + import torch._inductor.sizevars + import torch._library.abstract_impl + mods = [ + sys.modules[__name__], + torch.fx.experimental.recording, + torch.fx.experimental.sym_node, + torch, + torch._inductor.sizevars, + torch._library.abstract_impl, + ] + return {inspect.getfile(m) for m in mods} + +# We don't bother with the metaclass as all of the dispatching logic happens +# entirely from Python +# +# Didn't bother with ancestors for now, unlikely to have multiple modes for +# symints right now + +class ConstraintViolationError(RuntimeError): + pass + +def has_symbolic_sizes_strides(elem): + return elem._has_symbolic_sizes_strides + +def create_contiguous(shape): + strides = [1] + for dim in reversed(shape[:-1]): + strides.append(dim * strides[-1]) + return list(reversed(strides)) + +def hint_int(a, fallback=None): + """ + Retrieve the hint for an int (based on the underlying real values as observed + at runtime). If no hint is available (e.g., because data dependent shapes), + if fallback is not None, use that instead (otherwise raise an error). + """ + if isinstance(a, torch.SymInt): + return a.node.require_hint(fallback) + assert type(a) is int, a + return a + +def has_hint(a): + if isinstance(a, SymTypes): + return a.node.has_hint() + return True + +def is_concrete_int(a: Union[int, SymInt]): + r""" Utility to check if underlying object + in SymInt is concrete value. Also returns + true if integer is passed in. + + Args: + a (SymInt or int): Object to test if it int + """ + assert isinstance(a, (SymInt, int)) + + if isinstance(a, int): + return True + + if isinstance(a.node.expr, sympy.core.numbers.Integer): + return True + + return False + +def canonicalize_bool_expr(expr: sympy.Expr): + r""" Canonicalize a boolean expression by transforming it into a lt / le + inequality and moving all the non-constant terms to the rhs. + We canonicalize And / Ors / Not via cnf and then canonicalize their subexpr + recursively + nb. sympy.Rel.canonical is not good enough https://github.com/sympy/sympy/issues/25924 + + Args: + expr (sympy.Expr): Expression to canonicalize + """ + # Canonicalise an inequality by transforming it into a lt / le + # inequality and moving all the non-constant terms to the rhs + # We canonicalise And / Ors / Not via cnf + # nb. Relational.canonical in sympy is broken + # https://github.com/sympy/sympy/issues/25924 + + if not isinstance(expr, (sympy.Rel, sympy.And, sympy.Or, sympy.Not, sympy.Eq, sympy.Ne)): + return expr + + if isinstance(expr, (sympy.And, sympy.Or, sympy.Not)): + expr = sympy.logic.boolalg.to_cnf(expr) + return _canonicalize_bool_expr_impl(expr) + +def _canonicalize_bool_expr_impl(expr: sympy.Expr): + if isinstance(expr, (sympy.And, sympy.Or)): + return type(expr)(*map(canonicalize_bool_expr, expr.args)) + + opposite = {sympy.Gt: sympy.Lt, sympy.Ge: sympy.Le} + if isinstance(expr, tuple(opposite.keys())): + lhs = expr.rhs - expr.lhs + t = opposite[type(expr)] + else: + assert isinstance(expr, (sympy.Lt, sympy.Le, sympy.Eq, sympy.Ne)) + lhs = expr.lhs - expr.rhs + t = type(expr) + rhs = 0 + if isinstance(lhs, sympy.Add): + cts = [] + variables = [] + for term in lhs.args: + if term.is_number: + cts.append(term) + else: + variables.append(term) + lhs = sympy.Add(*variables) + rhs = -sympy.Add(*cts) + return t(lhs, rhs) + +def is_concrete_bool(a: Union[bool, SymBool]): + r""" Utility to check if underlying object + in SymBool is concrete value. Also returns + true if integer is passed in. + Args: + a (SymBool or bool): Object to test if it bool + """ + assert isinstance(a, (SymBool, bool)) + + if isinstance(a, bool): + return True + + if isinstance(a.node.expr, (sympy.logic.boolalg.BooleanTrue, sympy.logic.boolalg.BooleanFalse)): + return True + + return False + +def _iterate_exprs(val: Union[SymInt, torch.Tensor]) -> Iterable[sympy.Basic]: + if isinstance(val, SymTypes): + # This allow applies to the jagged layout NestedTensor case as + # singleton ints are not symbolic + if is_symbolic(val): + yield val.node.expr + elif isinstance(val, sympy.Basic): + yield val + elif isinstance(val, (int, float, bool)): + pass + elif isinstance(val, torch.Tensor): + yield from _iterate_exprs(val.size()) + yield from _iterate_exprs(val.stride()) + yield from _iterate_exprs(val.storage_offset()) + elif isinstance(val, (tuple, list)): + for s in val: + yield from _iterate_exprs(s) + else: + raise AssertionError(f"cannot extract sympy expressions from {val} {type(val)}") + +def free_symbols(val: Union[SymInt, torch.Tensor]) -> Set[sympy.Symbol]: + itr = _iterate_exprs(val) + # we need at least 1 to call union, so we hand code the identity + try: + first_expr = next(itr) + except StopIteration: + return set() + + return first_expr.free_symbols.union(*(e.free_symbols for e in itr)) + +def has_free_symbols(val: Union[SymInt, torch.Tensor]) -> bool: + """Faster version of bool(free_symbols(val))""" + return not all(e.is_number for e in _iterate_exprs(val)) + +# Like free_symbols, but filtered to only report unbacked symbols +def free_unbacked_symbols(x): + # NB: keep synced with is_unbacked_symint + return {s for s in free_symbols(x) if s.name.startswith(("i", "f"))} + +# WARNING: Don't use this on Dynamo produced graphs, they don't have meta +# setup! +def is_symbol_binding_fx_node(node) -> Optional[sympy.Symbol]: + if ( + node.op == "placeholder" and + "val" in node.meta and + isinstance(node.meta["val"], torch.SymInt) and + isinstance(node.meta["val"].node.expr, sympy.Symbol) + ): + return node.meta["val"].node.expr + return None + +def find_symbol_binding_fx_nodes(graph): + return { + node.meta["val"].node.expr: node + for node in graph.nodes + if is_symbol_binding_fx_node(node) + } + +def definitely_true(a): + """ + Returns True only if we can tell that a is True, possibly introducing + a guard in the process. If a depends on some unbacked SymInt, we may + return False even though there may exist a possible value of the SymInt + that would cause the expression to return True. + + When is it appropriate to use definitely_true? First, if you can use + a higher level combinator like parallel_or/parallel_and, prefer using + those instead, they are definitely safe (modulo short-circuiting). + Second, it can be used if the program would behave equivalently if + definitely_true always returned False (parallel_or/parallel_and are + examples of this pattern, modulo short-circuiting). Finally, it even + be OK if the program wouldn't behave equivalently, so long as the + change is semantics preserving. It can be semantics preserving if + the program errors in more cases than it did previously (but otherwise + behaves identically), or if it changes some quantity in a way that + doesn't matter (e.g., strides often fall in this bucket.) + """ + if isinstance(a, SymBool): + if a.node.has_hint(): + return guard_bool(a) + else: + return False + return bool(a) + +def definitely_false(a): + """ + Returns True only if we can tell that a is False, possibly introducing + a guard in the process. If a depends on some unbacked SymInt, we may + return False even though there may exist a possible value of the SymInt + that would cause the expression a to be False. See definitely_true + for more usage guidance. + """ + if isinstance(a, SymBool): + if a.node.has_hint(): + return not guard_bool(a) + else: + return False + return not bool(a) + +# TODO: could improve parallel_or/parallel_and by avoiding guards +# if there exists a quantity that can be handled un-guardedly. However, +# for backed SymInts, avoiding guards doesn't really matter in practice, +# so I chose not to do it. + +def parallel_or(*args): + """ + Evaluate the logical OR of several arguments, avoiding guarding on + unbacked SymInts if another argument is definitely True. + """ + if any(definitely_true(a) for a in args): + return True + return any(args) + +def parallel_and(*args): + """ + Evaluate the logical FALSE of several arguments, avoiding guarding on + unbacked SymInts if another argument is definitely False. + """ + if any(definitely_false(a) for a in args): + return False + return all(args) + +def sym_eq(x, y): + """ + Like ==, but when run on list/tuple, it will recursively test equality + and use sym_and to join the results together, without guarding. + """ + if (isinstance(x, tuple) and isinstance(y, tuple)) or (isinstance(x, list) and isinstance(y, list)): + if len(x) != len(y): + return False + return functools.reduce(operator.and_, map(sym_eq, x, y), True) + elif isinstance(x, (int, torch.SymInt)) and isinstance(y, (int, torch.SymInt)): + return x == y + else: + raise AssertionError(f"unexpected sym_eq between {type(x)} {type(y)}") + +def guard_scalar(a): + if isinstance(a, (SymBool, bool)): + return guard_bool(a) + elif isinstance(a, (SymInt, int)): + return guard_int(a) + elif isinstance(a, (SymFloat, float)): + return guard_float(a) + else: + raise AssertionError(f"unrecognized scalar {a}") + + +@record_shapeenv_event() +def _constrain_symbol_range(shape_env, s: sympy.Symbol, compiler_min: int, compiler_max: int, runtime_min: int, runtime_max: int): + log.debug("_constrain_symbol_range %s [%s, %s] [%s, %s]", s, compiler_min, compiler_max, runtime_min, runtime_max) + if r := shape_env.var_to_range.get(s, None): + shape_env.var_to_range[s] = ValueRanges( + builtins.max(r.lower, compiler_min), builtins.min(r.upper, compiler_max) + ) + else: + shape_env.var_to_range[s] = ValueRanges(compiler_min, compiler_max) + + if r := shape_env.runtime_var_to_range.get(s, None): + shape_env.runtime_var_to_range[s] = ValueRanges( + builtins.max(r.lower, runtime_min), builtins.min(r.upper, runtime_max) + ) + else: + shape_env.runtime_var_to_range[s] = ValueRanges(runtime_min, runtime_max) + + +def _advise_is_size(a): + """ + Don't use this directly; use torch._check_is_size instead. + + This is a softer version of _constrain_range_for_size (with min=0, + max=Inf). Instead of forcibly constraining a variable (and erroring if we + failed to constrain it), it will simply advise us that a size is + constrained in some way. We will always defer a runtime assert for this + constraint if we cannot prove it at compile-time, but we we only + *sometimes* learn useful extra information at compile-time with this + information. This is in contrast to constrain_range_for_size, where if + you don't call that on a fresh unbacked symint, chances are we will choke. + + TODO: Make Dynamo handle this appropriately if this is seen in Dynamo-ed + code. Right now this is only really used in code with AOTAutograd trace + through, so it is not a big problem that this isn't supported, but in + principle all of this code should be Dynamo'able too. + + TODO: I didn't support min/max because I didn't have a use case where this + actually helped. In principle we can support it, it just makes the + implementation below more complicated. + """ + + # This must always succeed, because the sole allowed caller _check_is_size + # was responsible for expect_true'ing this + assert a >= 0 + + # NB: it's important not to constrain range for size for *hinted* SymInts, + # because it is not only unsound, it will immediately trip our asserts + # that hints have to be consistent with static analysis! If you somehow + # have an unbounded SymInt that later constrains to 1, this will be + # inconsistent with the range + if ( + isinstance(a, SymInt) + and isinstance(a.node, SymNode) + and not a.node.has_hint() + and isinstance(a.node.expr, sympy.Symbol) + ): + _constrain_range_for_size(a) + +@record_shapeenv_event() +def _constrain_range_for_size(a, min: Optional[int] = None, max: Optional[int] = None): + """ + This function is NOT INTENDED to be used by itself. + """ + + if isinstance(a, (SymFloat, SymBool)): + raise ValueError("Constraining SymFloat/SymBool is nyi") + + assert isinstance(a, SymInt), "can only constrain range for SymInt" + assert isinstance(a.node.expr, sympy.Symbol), "constraining non-Symbols NYI" + + if min is None: + min = 0 + if max is None: + max = sympy.oo + + if max <= 2: + raise ValueError(f"Maximum value to constrain_as_size must be greater than 2, but was {max}") + + if max < min: + raise ValueError( + "Maximum value to constrain_as_size can't be less than the specified min value, " + "received min={min} and max={max}" + ) + + compiler_min = 2 if min < 2 else min + + _constrain_symbol_range( + a.node.shape_env, + a.node.expr, + compiler_min=compiler_min, + compiler_max=max, + runtime_min=min, + runtime_max=max + ) + + +# inclusive both ways +@record_shapeenv_event() +def constrain_range(a, *, min: Optional[int], max: Optional[int] = None): + """ + Applies a constraint that the passed in SymInt must lie between min-max + inclusive-inclusive, WITHOUT introducing a guard on the SymInt (meaning + that it can be used on unbacked SymInts). If min/max are None, we assume + that the dimension is unbounded in that direction. Repeated application + of constrain_range intersects the ranges. This is a fairly low level API + that doesn't have a lot of safety guarantees (TODO: provide higher level + APIs). + + Currently, we use this API in the following circumstance: when we allocate + an unbacked SymInt, denoting an integer quantity which is data dependent, + we ordinarily do not know anything about what values it may take. This + means that any sort of guard on it will immediately fail. However, in + many cases, we know something about the unbacked SymInt: for example, we + know that nonzero(x).size(0) must be >= 0. We use constrain_range to + narrow the possible range, declaring that negative symbols are impossible. + This permits to definitely answer True to queries like 'nnz >= 0', even if + we don't know what the actual (hinted) value of 'nnz' is. In fact, we + actually use constrain_range to unsoundly discharge common guards: for an + unbacked SymInt produced by nonzero, we will also assume that it is not + equal to 0/1 (even though these are perfectly possible values at runtime), + because we generally expect graphs that are valid for N=2 to also be valid + for N=1. + + .. warning:: + If you use constrain_range in the context of tracing, we do NOT check + that the constraint was actually valid at runtime! In fact, we + cannot (easily) do so, as we currently unsoundly assume that unbacked + SymInt can never be zero/one, even if it may actually take on these + values at runtime (we assume that a graph that is valid for N=2 will + also be valid for N=1). + """ + if min is None: + min = -sympy.oo + if max is None: + max = sympy.oo + + if max < min: + raise ValueError( + "Maximum value to constrain_as_size can't be less than the specified min value, " + "received min={min} and max={max}" + ) + + if isinstance(a, int): + if not (min <= a <= max): + raise ValueError(f"Invalid value {a} for range [{min}:{max}]") + return + + if isinstance(a.node.expr, sympy.Integer): + if not (min <= int(a.node.expr) <= max): + raise ValueRangeError(f"Invalid value {int(a.node.expr)} for range [{min}:{max}]") + return + assert isinstance(a.node.expr, sympy.Symbol), "constraining non-Symbols NYI" + + # TODO: Shouldn't we install a guard if the symbol is backed? Or is the + # semantics that this is an "unchecked" assert (but it this actually + # something useful? Might be better to restrict only for unbacked + # SymInt). + _constrain_symbol_range( + a.node.shape_env, + a.node.expr, + compiler_min=min, + compiler_max=max, + runtime_min=min, + runtime_max=max + ) + + +@record_shapeenv_event() +def constrain_unify(a, b): + """ + Given two SymInts, constrain them so that they must be equal. NB: + this will not work with SymInts that represent nontrivial expressions + (yet!) + """ + # TODO: Maybe dedupe this with _maybe_guard_eq? + if not isinstance(a, SymInt): + if not isinstance(b, SymInt): + assert a == b + else: + assert isinstance(b.node.expr, sympy.Symbol), "constraining non-Symbols NYI" + shape_env = b.node.shape_env + shape_env.replacements[b.node.expr] = sympy.Integer(a) + else: + # TODO: Actually, we can support this as long as one of them is a symbol. + # NB: We can't actually do "unification" as our operators are not + # injective + assert isinstance(a.node.expr, sympy.Symbol), "constraining non-Symbols NYI" + shape_env = a.node.shape_env + if not isinstance(b, SymInt): + shape_env.replacements[a.node.expr] = sympy.Integer(b) + else: + assert a.node.shape_env is b.node.shape_env + assert isinstance(b.node.expr, sympy.Symbol), "constraining non-Symbols NYI" + new_var = shape_env._find(a.node.expr) + shape_env.replacements[b.node.expr] = new_var + +# Assume that a boolean is true for the purposes of subsequent symbolic +# reasoning. This will keep track of corresponding runtime checks to verify +# that the result is upheld: either as a regular guard, or as a special set +# of asserts which are triggered when an unbacked SymInt is allocated. +# +# DO NOT use this function for these cases: +# +# - This is inappropriate for "branching" conditions (where both +# true and false result in valid programs). We will always assume +# the condition evaluates true, and so it will never be possible +# to trace the false condition when you use it. For true branching +# on unbacked SymInts, you must use torch.cond; if you incorrectly +# use expect_true in this case, you will make the false branch +# unreachable (as we will simply assume that only the true branch +# is ever exercised). +# +# - This is inappropriate for situations where you know some other system +# invariant guarantees that this property holds, since you don't +# really need to insert a runtime check in that case. Use something +# like constrain_range in that case. +# +# This API has a hitch. To avoid having to reimplement error reporting +# capabilities, this function CAN return False. The invariant is that +# the surrounding code must raise an error when this function returns +# False. This is quite low level, so we recommend using other functions +# like check() which enforce this in a more intuitive way. +# +# By the way, this name is a nod to the __builtin_expect macro, +# which is used similarly (but unlike __builtin_expect, you MUST fail +# in the unlikely branch.) (I think expect is a good name; in recent +# versions of C++, this is replaced with [[likely]], which is weaker +# and not accurate for this function!) +def expect_true(a, skip: int = 0): + if isinstance(a, SymBool): + # TODO: check perf implications of this + frame = inspect.currentframe() + for _ in range(skip + 1): # always run this loop at least once + frame = frame.f_back + return a.node.expect_true(frame.f_code.co_filename, frame.f_lineno) + assert type(a) is bool, a + return a + +def guard_bool(a): + if isinstance(a, SymBool): + return a.node.guard_bool("", 0) # NB: uses Python backtrace + assert type(a) is bool, a + return a + +def guard_int(a): + if isinstance(a, SymInt): + return a.node.guard_int("", 0) # NB: uses Python backtrace + assert type(a) is int, a + return a + +def guard_float(a): + if isinstance(a, SymFloat): + return a.node.guard_float("", 0) # NB: uses Python backtrace + assert isinstance(a, float), a + return a + +# Given a GraphModule, return all the FakeTensors for all the placeholders +def fx_placeholder_vals(gm): + return [n.meta['val'] for n in gm.graph.nodes if n.op == "placeholder"] + +def fx_placeholder_targets(gm): + return [n.target for n in gm.graph.nodes if n.op == "placeholder"] + +# Given a GraphModule and arguments to run it with, evaluate that the guards +# for its associated ShapeEnv are satisfied by the passed arguments. This +# WILL check for duck sizing. +def eval_guards(gm, *args, ignore_static=True): + return gm.shape_env.evaluate_guards_for_args(fx_placeholder_vals(gm), args, ignore_static=ignore_static) + +def bind_symbols(gm, *args): + return gm.shape_env.bind_symbols(fx_placeholder_vals(gm), args) + +def _assert_bound_is_rational(expr: sympy.Expr, bound: ValueRanges): + """ + We assert that the bounds are either Boolean, or not finite, or can be computed + in exact prevision via rational arithmetic. + The only exception to this is the rare case when the user calls `sqrt(s0)` + sqrt is turned into sympy.Pow so we just match for that (it matches more things, but still) + """ + assert bound.lower.is_rational or bound.lower.is_Boolean or not bound.lower.is_finite or expr.has(sympy.Pow), (bound, expr) + assert bound.upper.is_rational or bound.upper.is_Boolean or not bound.upper.is_finite or expr.has(sympy.Pow), (bound, expr) + +class DimDynamic(Enum): + """ + Controls how to perform symbol allocation for a dimension. It is always + sound to default this to DYNAMIC, but the policies DUCK and STATIC can + result in better trace-time and compile-time performance, as they reduce + the number of allocated symbols and generally make your graph more static. + + NB: If we notice you've applied a constraint to the dimension, we will + force it to DYNAMIC for simplicity. + + DimDynamic is controlled by a variety of higher level UX features. + Currently: + + - In eager mode, the default policy is DUCK. + - The default is changed to STATIC with assume_static_by_default. + - An individual dim is marked DYNAMIC if you mark_dynamic_dim. + - In export mode, the default policy is STATIC. + - An individual dim is marked DYNAMIC if you mention it as dynamic_dim + in the constraints kwarg. + """ + # Treat the dimension symbolically + DYNAMIC = 0 + # Treat the dimension symbolically, but if its hint matches another + # dynamic dimension, unify the two symbols ("duck sizing") + DUCK = 1 + # Treat the dimension statically based on its hint + STATIC = 2 + + +# NB: These constraints affect both clients and backends: given some +# constraint C, the client must pass inputs that satisfy the constraint, +# while a backend must not introduce guards BEYOND this constraint. +# For clarity, we document the implications on both sides for both the client +# and the backend. +# +# NB: These constraints are on a *single* dimension. In principle, we could +# also have multi-dimension constraints, but our guess is that this is not +# actually useful and so we are not supporting it right now. +# +# NB: Strict constraints are typically only suitable for export, as in eager +# a backend like inductor may validly introduce extra, discretionary guards +# to improve performance of code. A StrictMinMaxConstraint would be brittle +# under future optimizations performed by inductor; we don't guarantee +# eager code with StrictMinMaxConstraint will keep working in the future! + +@dataclass(frozen=True) +class Constraint: + warn_only: bool + +@dataclass(frozen=True) +class StrictMinMaxConstraint(Constraint): + """ + For clients: the size at this dimension must be within 'vr' (which + specifies a lower and upper bound, inclusive-inclusive) AND it + must be non-negative and should not be 0 or 1 (but see NB below). + + For backends: there must not be any guards on this dimension which + are not implied by the given lower and upper bound. Regardless of + the lower bound, the backend can assume the size is non-negative + and that it is not 0 or 1. + + An unbounded StrictMinMaxConstraint can be thought of as a strict version + of "RelaxedUnspecConstraint". + + NB: Export will often unsoundly assume that a graph works for 0/1, even + though at trace time we assumed size is not 0 or 1. The idea is that + if we produce a graph that works for a range of values, it will be OK + for N=0/1 too. + """ + vr: ValueRanges + + def render(self, source: Source): + # TODO: better printing for -oo and oo + return f"{self.vr.lower} <= {source.name()} <= {self.vr.upper}" + +@dataclass(frozen=True) +class RelaxedUnspecConstraint(Constraint): + """ + For clients: no explicit constraint; constraint is whatever is implicitly + inferred by guards from tracing. + + For backends: there must exist at least TWO possible values for the + size at this dimension which satisfy the guards for this dimension. + + In other words, this constraint helps us distinguish between "we don't + care if this dimension specializes or not" versus "this dimension must be + unspecialized." However, this constraint doesn't say very much about what + specialization is permitted; for example, if we guard on a size being + even, this would still be acceptable under an unspec constraint. This + makes RelaxedUnspecConstraint useful for eager mode, where your backend compiler + may add constraints to otherwise dynamic dimensions; we can't assert that + there are NO guards as this is brittle because compilers should be able to + add extra constraints. If you want to assert that there are no guards, + use StrictMinMaxConstraint with an unbounded ValueRanges. + """ + def render(self, source: Source): + return f"RelaxedUnspecConstraint({source.name()})" + +# NB: None here indicates the client constraint is whatever is implicitly +# inferred by guards from tracing, and that a backend can add whatever guards +# it wants (including fully specializing the value). +DimConstraint = Union[StrictMinMaxConstraint, RelaxedUnspecConstraint, None] + +@dataclass(frozen=True) +class EqualityConstraint(Constraint): + """ + Given pairs of sources corresponding to pairs of dynamic dimensions that + are specified equal, represent them in a union-find data structure so that + we can efficiently check whether two such sources are transitively equal. + """ + source_pairs: List[Tuple[Source, Source]] + + def __post_init__(self): + object.__setattr__(self, "_parents", {}) + for source1, source2 in self.source_pairs: + self._union(self._find(source1), self._find(source2)) + + def _find(self, source): + if source in self._parents: + return self._find(self._parents[source]) + else: + return source + + def _union(self, root1, root2): + if root1 != root2: + self._parents[root1] = root2 + + def render(self): + buf = ", ".join( + f"{source1.name()} == {source2.name()}" + for (source1, source2) in self.source_pairs + ) + return "{" + buf + "}" + + def is_equal(self, source1, source2): + return self._find(source1) == self._find(source2) + + +def _assert_symbol_context(symbolic_context): + assert isinstance(symbolic_context, SymbolicContext), "Invalid symbolic_context object" + assert type(symbolic_context) is not SymbolicContext, "Illegal usage of symbolic_context ABC" + + +@dataclass(frozen=True) +class SymbolicContext: + """ + Data structure specifying how we should create symbols in + ``create_symbolic_sizes_strides_storage_offset``; e.g., should + they be static or dynamic. + + This is an abstract base class because we are probably going to add + another version of this that says "use exactly these SymInts, don't + allocate fresh symbols." + """ + pass + + +@dataclass(frozen=True) +class StatelessSymbolicContext(SymbolicContext): + """ + Create symbols in ``create_symbolic_sizes_strides_storage_offset`` via + a symbolic_context determination as given by ``DimDynamic`` and ``DimConstraint``. + This will cause fresh symbols to be allocated + """ + dynamic_sizes: DimList[DimDynamic] + constraint_sizes: DimList[DimConstraint] = None + # TODO: add storage offset and stride symbolic_context + + def __post_init__(self): + if self.constraint_sizes is None: + object.__setattr__(self, 'constraint_sizes', [None] * len(self.dynamic_sizes)) + + +# note [Tensor Fakification and Symbol Caching] +# +# As of the time of this note, dynamo creates a fresh fake tensor mode for backends. +# The reason we do this is because there are certain classes of operations, namely, +# metadata mutations, that change tensor size, stride, etc. This means that the fake tensor +# state at the end of a dynamo trace is different than the fake tensor state at the beginning +# of a trace. Backends like aot_autograd need a fresh fake tensor to correctly track metadata mutation, +# view relationships, etc. +# +# As we create a new fake mode, we also lose the memoization that comes with it. Rather than +# transfer the memoization cache, we instead transfer the shape env. However, with this +# comes nuance - as dynamo is selective in how it makes symbolic shapes. Due to strategies in +# automatic dynamic and constraints, the policy for which dims are dynamic is nuanced and varies across +# recompilations. +# +# In order to preserve the symbolic decisions made during dynamo tensor fakification, we pass +# a StatefulSymbolicContext at creation time. This object is tracked, per tensor, on the TracingContext. +# The lifecycle of this object should match the lifecycle of the original dynamo tracked tensor, and it is +# safe to reuse this object as many times as necessary to create a fake tensor. Fake tensors +# created with new fake modes should produce the same exact symbols as the original, providing the same shape_env +# is used. +# TODO(voz): Shape env validation +@dataclass(frozen=True) +class StatefulSymbolicContext(StatelessSymbolicContext): + """ + Create symbols in ``create_symbolic_sizes_strides_storage_offset`` via + a symbolic_context determination as given by a cache of Source:Symbol. A cache hit + will reuse a stored symbol, and a cache miss will write to this cache. + + This behaves like StatelessSymbolicContext, except the cache supersedes the + other values - dynamic_sizes and constraint_sizes will not be read if we cache + hit. + + It is the cache owners responsibility to maintain the lifecycle of the cache + w/r/t different shape_envs, clearing, etc. + """ + tensor_source: Source = None + source_to_symint_node_cache : Dict["TensorPropertySource", SymInt] = None + + def __post_init__(self): + # The None default is annoying, but required because of dataclass limitations + assert self.tensor_source is not None + if not self.source_to_symint_node_cache: + object.__setattr__(self, 'source_to_symint_node_cache', {}) + + +def is_symbolic(val: Union[int, SymInt, float, SymFloat, bool, SymBool]) -> bool: + if isinstance(val, (int, float, bool)): + return False + return val.node.is_symbolic() + +IndicatorTypes = (IsNonOverlappingAndDenseIndicator,) + +@lru_cache(256) +def safe_expand(r): + if hasattr(r, 'expand'): + try: + return sympy.expand(r) + except RecursionError: + log.warning("RecursionError in sympy.expand(%s)", r) + return r + else: + return r + +def error(): + raise AssertionError("shouldn't be hit") + + +# TODO: Deduplicate this with torch/_prims_common/__init__.py +def eval_is_non_overlapping_and_dense(sizes, strides): + return int(guard_bool(_eval_is_non_overlapping_and_dense(sizes, strides))) + +def _eval_is_non_overlapping_and_dense(sizes, strides): + dim = len(sizes) + + # Short-circuits for tensors of rank one, which are + # non-overlapping and "dense" if their stride is one + # or it is a 0/1 element tensor + if dim == 1: + return strides[0] == 1 or sizes[0] < 2 + + # Checks that there exists a permutation of the strides s.t. the tensor would be contiguous + # Sorts (length, stride) pairs by stride + lengths_and_strides = sorted( + zip(sizes, strides), key=operator.itemgetter(1) + ) + + # Unlike the C++ code, we don't move the 0/1 size dimensions to the + # end. So we have to keep going for this code. + expected_stride = 1 + for length, stride in lengths_and_strides: + + if length == 1: + continue + + if stride != expected_stride: + return False + + expected_stride *= length + + return True + + +def cast_symbool_to_symint_guardless(symbool: torch.SymBool) -> torch.SymInt: + int_sym = sympy.Piecewise((1, symbool.node.expr), (0, True)) + return symbool.node.shape_env.create_symintnode(int_sym, hint=int(symbool.node.require_hint())) + +SYMPY_INTERP = { + 'Abs': operator.abs, + 'Eq': operator.eq, + 'Ne': operator.ne, + 'Gt': operator.gt, + 'Lt': operator.lt, + 'Le': operator.le, + 'Ge': operator.ge, + 'Min': min, + 'Max': max, + 'Mod': operator.mod, + 'FloorDiv': operator.floordiv, + 'TrueDiv': operator.truediv, + 'IsNonOverlappingAndDenseIndicator': eval_is_non_overlapping_and_dense, + 'floor': math.floor, + 'ceiling': math.ceil, + 'cast_symbool_to_symint_guardless': cast_symbool_to_symint_guardless, +} + + +def _lru_cache(fn, maxsize=None): + """ + Wrapper around lru_cache that clears when new info about shapes has been + updated. + + Use lru_cache if the output is always the same, regardless of the + constraints we know now (i.e. evaluate_expr) + + Use _lru_cache otherwise. + + Also note that this depends on _update_version_counter being called on the + shape environment whenever the constraints are updated, otherwise the cache + will not be cleared. + """ + fn_cache = lru_cache(maxsize)(fn) + prior_version = 0 + + if config.validate_shape_env_verison_key: + prior_key = None + + @functools.wraps(fn) + def wrapper(self, *args, **kwargs): + nonlocal prior_version, prior_key + if prior_key is None: + prior_key = self._get_key() + + if prior_version != self._version_counter: + fn_cache.cache_clear() + prior_version = self._version_counter + prior_key = self._get_key() + else: + assert prior_key == self._get_key(), \ + "ShapeEnv cache key changed without version being updated!" + + return fn_cache(self, *args, **kwargs) + + else: + + @functools.wraps(fn) + def wrapper(self, *args, **kwargs): + nonlocal prior_version + if prior_version != self._version_counter: + fn_cache.cache_clear() + prior_version = self._version_counter + + return fn_cache(self, *args, **kwargs) + + wrapper.cache_clear = fn_cache.cache_clear + wrapper.cache_info = fn_cache.cache_info # type: ignore[attr-defined] + return wrapper + + +# This is pretty similar to ShapeGuard but it also comes with a message, +# and is exclusively used for things that MUST be true (unlike guards, +# which can evaluate False, in which case you just choose not to use +# a particular specialization) +@dataclass(frozen=True) +class RuntimeAssert: + expr: sympy.Expr + msg: str = field(repr=False) + stack: str = field(repr=False) + + +class ShapeGuardPrinter(StrPrinter): + def __init__( + self, + symbol_to_source, + source_ref, + var_to_sources, + ): + super().__init__() + self.symbol_to_source = symbol_to_source + self.source_ref = source_ref + self.var_to_sources = var_to_sources + + def _print_Not(self, expr): + return 'not %s' % (self.parenthesize(expr.args[0], PRECEDENCE["Not"])) + + def _print_And(self, expr): + return self.stringify(expr.args, " and ", PRECEDENCE["And"]) + + def _print_Or(self, expr): + return self.stringify(expr.args, " or ", PRECEDENCE["Or"]) + + def _print_Symbol(self, expr) -> str: + assert isinstance(expr, sympy.Symbol), str(type(expr)) + + def repr_symbol_to_source(): + return repr({ + symbol: [s.name() for s in sources] + for symbol, sources in self.symbol_to_source.items() + }) + + assert self.symbol_to_source.get(expr), ( + f"{expr} (could be from {[s.name() for s in self.var_to_sources[expr]]}) " + f"not in {repr_symbol_to_source()}. If this assert is failing, it could be " + "due to the issue described in https://github.com/pytorch/pytorch/pull/90665" + ) + return self.source_ref(self.symbol_to_source[expr][0]) + + +class LoggingShapeGuardPrinter(ShapeGuardPrinter): + def __init__(self, var_to_sources): + super().__init__(var_to_sources, lambda n: n.name(), var_to_sources) + + +class DynamicDimConstraintPrinter(StrPrinter): + """ + Printer for dynamic dim constraints. + - Instead of t.size()[d] it prints dynamic_dim(t, d) + - Instead of Eq(_, _), Mod(_, _), etc. it prints _ == _, _ % _, etc. + + We use this to suggest code for specifying dynamic dim constraints. + """ + def __init__(self, symbol_to_source, source_name_to_debug_name): + super().__init__() + self.symbol_to_source = symbol_to_source + self.source_name_to_debug_name = source_name_to_debug_name + + def print_source(self, source) -> str: + if self.source_name_to_debug_name: + return source.name() + return f"dynamic_dim({source.base.name()}, {source.idx})" + + def _print_Symbol(self, expr) -> str: + assert isinstance(expr, sympy.Symbol), str(type(expr)) + + return self.print_source(self.symbol_to_source[expr][0]) + + def _print_Relational(self, expr): + return '{} {} {}'.format( + self.parenthesize(expr.lhs, precedence(expr)), + expr.rel_op, + self.parenthesize(expr.rhs, precedence(expr)) + ) + + +class DimConstraints: + """ + Custom solver for a system of constraints on symbolic dimensions. + Solutions are "static" values or simplified "dynamic" constraints. + """ + + def __init__(self, symbol_to_source, var_to_val, marked_dynamic, source_name_to_debug_name): + # We try to solve systems of inequalities with 1 free variable. + self._univariate_inequalities: Dict[sympy.Symbol, Set[sympy.Expr]] = defaultdict(set) + # Among them, we prioritize solving for a free variable that has equalities. + # NOTE: _symbols_with_equalities is always a subset of _univariate_inequalities.keys() + # and removing a symbol from the former => removing it from the latter. + self._symbols_with_equalities: Set[sympy.Symbol] = set() + # A solution of a free variable with equalities becomes a substitution. + # We use these substitutions to simplify other constraints. + # NOTE: removing a symbol from _symbols_with_equalities => adding it to _substitutions. + self._substitutions: Dict[sympy.Symbol, sympy.Integer] = {} + + # In general, constraints may have // and % operations. + # Of course, // can be expressed in terms of / and %. + # Our inequality solver can handle / but not %. So we need to transform them away. + # We do so by using the values of variables as hints to evaluate %. + # For soundness we record additional congruence guards and solve them separately. + self._var_to_val: Dict[sympy.Symbol, sympy.Integer] = var_to_val + self._congruences: Set[sympy.Expr] = defaultdict(set) + + # We do not try to (directly) solve inequalities with > 1 free variables. + # NOTE: free variables in these inequalities cannot also be in _substitutions. + self._multivariate_inequalities: Set[sympy.Expr] = set() + + # We park external equalities between free variables here. + self._symbolic_equivalences: List[Tuple[Source, sympy.Expr]] = [] + + # Solutions come in two forms: + # - (static) specializations + # - (dynamic) inequalities / congruences + self._static_results: Set[str] = set() + self._dynamic_results: Set[str] = set() + + # printer for solutions + self._dcp = DynamicDimConstraintPrinter(symbol_to_source, source_name_to_debug_name) + + # inconsistencies found on substituting with concrete values / static solutions + self._inconsistencies: List[str] = [] + + # symbols that are marked dynamic + self._marked_dynamic = marked_dynamic + + def rewrite_with_congruences(self, s, expr): + """ + Eliminate expressions of the form b // d and b % d while adding congruences of the form b % d == k. + This leaves rational operators (in particular of the form b / d) that our inequality solver can handle. + We solve the added congruences separately (using our congruence solver, see below). + """ + def mod_handler(*args): + # Suppose that we have an expression of the form b % d with free variable s. + # Using the value of s as a "hint," we can evaluate b % d to a value k. + # Then we can rewrite b % d to k while adding the guard b % d == k. + + # NOTE(avik): This abstraction is provably sound but, in general, incomplete. It is complete IFF + # the original expression always evaluates to a constant value (i.e., it does not vary with s). + # In other words, + # - solutions of s with the rewritten expression are guaranteed to also be solutions of s with + # the original expression; + # - while it may be possible to find solutions of s with the original expression that are not + # solutions with the rewritten expression, in that case the original expression cannot evaluate + # to the same value for all solutions of s. + # + # Should we be worried about this incompleteness? No, because of the following reasons: + # 1. It unblocks dramatic simplification that would not be otherwise possible with current tech + # (i.e., "don't let perfect be the enemy of the good"). + # 2. We already have a tradition of using hints to add guards in the compiler for making progress. + # 3. We have not yet seen a counterexample arise in practice! In particular, any congruence guards + # we generate (or simplify to) seem to be of the form b % d == k where k is a constant. + # + # Here's a theoretical counterexample: 3*s % (s + 1) == s - 2, that is satisfied by all s >= 2. + # With any hint (say) s = k, we'd rewrite this to: 3*s % (s + 1) == k - 2. But, substituting, we + # would then get k - 2 == s - 2, and thus s = k as the (only, constant) solution! + base, divisor = args + base, divisor = self.rewrite_with_congruences(s, base), self.rewrite_with_congruences(s, divisor) + mod_reduced = base.subs(self._var_to_val) % divisor.subs(self._var_to_val) + congruence = (base - mod_reduced) % divisor + if congruence != 0: + self._congruences[s].add(congruence) + return mod_reduced + + def floor_div_handler(*args): + # Suppose that we have an expression of the form b // d with free variable s. + # Using the value of s, we can evaluate b % d to a value k. + # Then we can rewrite b // d to (b - k) / d, while adding the guard b % d == k. + + # NOTE(avik): This is exactly equivalent to rewriting b // d as (b - (b % d)) / d + # and eliminating b % d as above. + base, divisor = args + base, divisor = self.rewrite_with_congruences(s, base), self.rewrite_with_congruences(s, divisor) + mod_reduced = base.subs(self._var_to_val) % divisor.subs(self._var_to_val) + congruence = (base - mod_reduced) % divisor + if congruence != 0: + self._congruences[s].add(congruence) + return (base - mod_reduced) / divisor + + if expr.has(Mod): + expr = expr.replace(Mod, mod_handler) + if expr.has(FloorDiv): + expr = expr.replace(FloorDiv, floor_div_handler) + return expr + + def add(self, expr) -> bool: + # Add an expression to the set of constraints. + # Return whether the expression is a trivial constraint (i.e., an obvious tautology). + if expr == sympy.true: + return True + orig_expr = expr + orig_reduced = orig_expr.subs(self._var_to_val) + # TODO(avik): https://github.com/pytorch/pytorch/issues/101093 + # It is possible that `expr` will fail the consistency check because of + # precision errors. Specifically, on substituting its free symbols with + # their concrete values, we might end up comparing floats. Until we have + # a fix for this issue, we delay raising such failures. See solve(). + if orig_reduced == sympy.false: + self._inconsistencies.append(f"{orig_expr} is inconsistent!") + free_symbols = expr.free_symbols + assert free_symbols, f"Did not expect constraint with no free variables: {expr}" + if len(free_symbols) > 1: + # multivariate: record and move on + self._multivariate_inequalities.add(expr) + else: + # univariate: can solve these immediately + s = next(iter(free_symbols)) + # eliminate // and % (see documentation of `rewrite_with_congruences` above) + expr = self.rewrite_with_congruences(s, expr) + if expr == sympy.true: + return True + reduced = expr.subs(self._var_to_val) + if reduced == sympy.false: + self._inconsistencies.append( + f"{expr}, obtained by rewriting {orig_expr} with congruences, " + "is inconsistent!" + ) + if isinstance(expr, sympy.Eq): + # special status for symbols that have equalities (see `solve` below) + self._symbols_with_equalities.add(s) + self._univariate_inequalities[s].add(expr) + return False + + def add_equality(self, source, expr): + if expr.is_number: + # specialization, right here + self._static_results.add(f"{source.name()} == {expr}") + else: + # these will resolve to either specializations or dynamic equality constraints + self._symbolic_equivalences.append((source, expr)) + + def reduce_congruences(self): + reduced_congruences = {} + for s, congruences in self._congruences.items(): + remainder_modulus_pairs = [] + congruences_to_check = set() + for congruence in congruences: + base, divisor = congruence.args + # We are given a congruence of the form base % divisor == 0 with a free variable s. So: + # - we transform this into an equation of the form base = divisor * tmp; + # - we solve this equation for s to get a linear solution with free variable tmp. + tmp = sympy.Symbol("tmp", integer=True) + symbol, solution = sympy.solve_linear(base - divisor * tmp, symbols=[s]) + # See https://docs.sympy.org/latest/modules/solvers/solvers.html#sympy.solvers.solvers.solve_linear + # for how to interpret the results. + if s == symbol: + # This means the solution is of the form s = modulus*tmp + remainder. + modulus, remainder = sympy.polys.polytools.div(solution, tmp) + if isinstance(modulus, sympy.Integer) and isinstance(remainder, sympy.Integer): + # Make sure 0 <= remainder <= modulus. + remainder = remainder % modulus + remainder_modulus_pairs.append((remainder, modulus)) + continue + # This means that we did not get a unique solution to the equation. + # No problem, we will check it. + congruences_to_check.add(congruence) + # Finally we solve for a congruence s such that s = r_i mod m_i for each (r_i, m_i). + # The solution will be a congruence of the form s = r mod m. + # NOTE(avik): Since the given m_i may not be pairwise coprime, we can't just use CRT. + if remainder_modulus_pairs: + remainder, modulus = sympy.ntheory.modular.solve_congruence(*remainder_modulus_pairs) + reduced_congruences[s] = {(s - remainder) % modulus} + substitution = {s: modulus * sympy.Symbol("tmp", integer=True) + remainder} + reduced_congruences[s].update( + congruence for congruence in congruences_to_check + if not sympy.checksol(congruence, substitution) + ) + else: + reduced_congruences[s] = congruences_to_check + + return reduced_congruences + + def raise_inconsistencies(self): + if self._inconsistencies: + msg = "\n".join(self._inconsistencies) + self._inconsistencies.clear() + raise ValueError(f"The following inconsistencies were found:\n{msg}") + + def _force_specialization(self, s): + val = self._var_to_val[s] + self._static_results.add(f"{self._dcp.symbol_to_source[s][0].name()} == {val}") + self._substitutions[s] = val + + def specialize_divisor_symbols(self): + for expr in self._multivariate_inequalities: + for atom in expr.atoms(FloorDiv, Mod): + _, divisor = atom.args + for s in divisor.free_symbols: + self._force_specialization(s) + + multivariate_inequalities = self._multivariate_inequalities + self._multivariate_inequalities = set() + for expr in multivariate_inequalities: + self.add(expr.subs(self._substitutions)) + self.raise_inconsistencies() + self._univariate_inequalities = { + s: exprs + for s, exprs in self._univariate_inequalities.items() + if s not in self._substitutions + } + self._congruences = { + s: congruences + for s, congruences in self._congruences.items() + if s not in self._substitutions + } + + def solve(self, disable_congruences=True, disable_equivalences=True): + self.raise_inconsistencies() + # as long as there are symbols with equalities, solve for them + # NOTE(avik): this is guaranteed to terminate (#iterations <= #symbols) + while(self._symbols_with_equalities): + s = self._symbols_with_equalities.pop() + exprs = self._univariate_inequalities.pop(s) + solution = sympy.solvers.inequalities.reduce_inequalities(exprs, s) + if isinstance(solution, sympy.And): + solution = next((arg for arg in solution.args if isinstance(arg, sympy.Eq)), solution) + assert isinstance(solution, sympy.Eq), f"Expected an equality constraint for {s}, got {solution}" + symbol, val = solution.args + assert symbol == s, f"Expected a constraint on {s} instead of on {symbol}" + # because this is univariate, the solution is a specialization + self._static_results.add(f"{self._dcp.symbol_to_source[s][0].name()} == {val}") + # add this as a substitution to simplify other constraints + self._substitutions[s] = val + + # simplify multivariate inequalities: some of them will now become univariate! + multivariate_inequalities = self._multivariate_inequalities + self._multivariate_inequalities = set() + for expr in multivariate_inequalities: + self.add(expr.subs(s, self._substitutions[s])) + self.raise_inconsistencies() + + self.specialize_divisor_symbols() + + # solve linear congruences + # NOTE(avik): We do not need to solve them for symbols that have already been specialized. + reduced_congruences = self.reduce_congruences() + for s, congruences in reduced_congruences.items(): + for congruence in congruences: + # any congruence that cannot be checked becomes a dynamic constraint as well + if s not in self._substitutions or not sympy.checksol(congruence, {s: self._substitutions[s]}): + if disable_congruences: + self._force_specialization(s) + self._univariate_inequalities.pop(s, None) + else: + self._dynamic_results.add(self._dcp.doprint(sympy.Eq(congruence, 0))) + + # remaining symbols have only pure inequalities (no equalities) + for s, exprs in self._univariate_inequalities.items(): + try: + solution = sympy.solvers.inequalities.reduce_inequalities(exprs, s) + # because this is univariate, the solution is a dynamic (range) constraint + if isinstance(solution, sympy.And): + for arg in solution.args: + self._dynamic_results.add(self._dcp.doprint(arg)) + else: + self._dynamic_results.add(self._dcp.doprint(solution)) + except NotImplementedError as e: + log.warning("Failed to reduce inequalities: %s", e) + for expr in exprs: + self._dynamic_results.add(self._dcp.doprint(expr)) + + # simplify symbolic equivalences: some of them will now become specializations! + symbolic_equivalences = self._symbolic_equivalences + self._symbolic_equivalences = [] + for source, expr in symbolic_equivalences: + if disable_equivalences and not isinstance(expr, sympy.Symbol): + for s in expr.free_symbols: + self._force_specialization(s) + sexpr = self._dcp._print_Symbol(s) + self._dynamic_results = {r for r in self._dynamic_results if sexpr not in r} + self.add_equality(source, expr.subs(self._substitutions)) + + # remaining symbolic equivalences become dynamic equality constraints + for source, expr in self._symbolic_equivalences: + self._dynamic_results.add(f"{self._dcp.print_source(source)} == {self._dcp.doprint(expr)}") + + def forced_specializations(self): + def debug_name(src): + name = src.name() + if self._dcp.source_name_to_debug_name: + return f"{self._dcp.source_name_to_debug_name[name]} = {name}" + else: + return name + + return { + debug_name(self._dcp.symbol_to_source[s][0]): val + for s, val in self._substitutions.items() + if s in self._marked_dynamic + } + + def remove_redundant_dynamic_results(self): + candidates_for_removal = [] + dynamic_results = set() + for dc in self._dynamic_results: + # Instead of 2 <= dynamic_dim(...) simply suggest dynamic_dim(...). + # There is no change in behavior since 2 is the default lower bound. + dc_ = re.sub(r"2 <= dynamic_dim(.+)", r"dynamic_dim\1", dc) + if dc != dc_: + candidates_for_removal.append(dc_) + else: + dynamic_results.add(dc_) + for dc in candidates_for_removal: + # remove dynamic_dim(t, 0) as a constraint when dynamic_dim(t, 0) also + # appears as part of another constraint + found = False + for other_dc in dynamic_results: + if dc in other_dc: + found = True + if not found: + dynamic_results.add(dc) + self._dynamic_results = dynamic_results + + def prettify_results( + self, + original_signature: inspect.Signature, + constraint_violation_error=None, + forced_specializations=None, + ): + if self._dcp.source_name_to_debug_name: + def transform(s): + for k, v in self._dcp.source_name_to_debug_name.items(): + s = s.replace(k, v) + return s + + results = defaultdict(dict) + + def flip(op): + if op == "<=": + return ">=" + if op == ">=": + return "<=" + if op == "<": + return ">" + if op == ">": + return "<" + assert op == "==" + return op + + def relation_with_digit(expr, op, digit): + if op == "<=": + results[expr]["max"] = digit + elif op == "<": + results[expr]["max"] = digit - 1 + elif op == ">=": + results[expr]["min"] = digit + elif op == ">": + results[expr]["min"] = digit + 1 + else: + assert op == "==" + results[expr]["eq"] = digit + + for s in self._static_results.union(self._dynamic_results): + t = transform(s) + if t == s: + continue + left, op, right = t.split(" ") + if op == "==" and left == right: + continue + if right.isdigit(): + relation_with_digit(left, op, int(right)) + elif left.isdigit(): + relation_with_digit(right, flip(op), int(left)) + else: + assert op == "==" + results[left]["eq"] = right + + buf = "" + debug_names = set() + if forced_specializations: + debug_names.update(k.split(" = ")[0] for k in forced_specializations.keys()) + buf += ( + f"Specializations unexpectedly required ({', '.join(debug_names)})! " + "For more information, run with TORCH_LOGS=dynamic.\n" + ) + for s, val in forced_specializations.items(): + buf += f" - {s} must be specialized to {val} because the guards generated for it are too complex.\n" + + dims = [] + others = [] + match = None + if constraint_violation_error: + match = re.search(r"Constraints violated \((.*)\)", constraint_violation_error.args[0]) + if match is not None: + debug_names.update(match.expand(r'\1').split(', ')) + + for k, c in results.items(): + if k not in debug_names: + continue + if "eq" in c: + other = c["eq"] + if isinstance(other, int): + others.append(f"{k} = None # {other}") + else: + others.append(f"{k} = {other}") + else: + min_ = c.get("min", None) + if min_ == 2: + min_ = None + max_ = c.get("max", None) + if min_ is not None and max_ is not None: + dims.append(f"{k} = Dim('{k}', min={min_}, max={max_})") + elif min_ is not None: + dims.append(f"{k} = Dim('{k}', min={min_})") + elif max_ is not None: + dims.append(f"{k} = Dim('{k}', max={max_})") + else: + dims.append(f"{k} = Dim('{k}')") + + buf += "\nSuggested fixes:\n " + buf += "\n ".join(dims + others) + + return buf + + # Note: Model inputs are wrapped as LocalSource in dynamo. + # LocalSource.name() wraps the name with L[""]. We use regular + # expression to do the replacement to avoid traversing up + # the source hierarchy manually. + def extract_and_rewrite_local(dc): + match = re.search(r"L\['(.+?)'\]", dc) + if match is None: + return + arg = match.expand(r'\1') + dc = re.sub(r"L\['(.+?)'\]", r'\1', dc) + return arg, dc + + def group(results, args_index): + groups = defaultdict(list) + for dc in results: + local = extract_and_rewrite_local(dc) + if local is None: + # This can happen, e.g., with `assume_constant_result`. + # In that case, we drop the constraint. + # TODO(avik) Maybe we should generate an assertion here? + continue + arg, dc = local + if arg in args_index: + groups[args_index[arg]].append(dc) + else: + # This can happen, e.g., with decorators that change the signature. + # In that case, we drop the constraint. Seems hard to do better. :/ + # TODO(avik) Maybe warn that `arg` in not in `signature`? + continue + sorted_groups = [] + for idx, dcs in sorted(groups.items()): + _, arg = idx + sorted_groups.append((arg, sorted(dcs))) + return sorted_groups + + signature = original_signature.replace(return_annotation=inspect.Signature.empty) + args_index = {} + for i, arg in enumerate(signature.parameters.keys()): + args_index[arg] = (i, arg) + + def print_results(grouped, indent, result_fn): + nonlocal buf + + space = False + for arg, results in grouped: + if space: + buf += "\n" + else: + space = True + buf += f"\n{indent}# {arg}:" + for result in results: + buf += f"\n{indent}{result_fn(result)}" + + buf = "" + if forced_specializations: + buf += ( + "Some dynamic dimensions need to be specialized because " + "the constraints inferred for them are too complex to specify.\n" + ) + for s, val in forced_specializations.items(): + buf += f" - {s}, which was marked dynamic, must be specialized to {val}.\n" + indent = 4 * " " + if self._static_results: + grouped_static_results = group(self._static_results, args_index) + buf += "\nThe following dimensions have been specialized and CANNOT be dynamic." + buf += f"\n```\ndef specializations{str(signature)}:" + print_results( + grouped_static_results, + indent, + lambda result: f"assert {result}", + ) + buf += "\n```\n" + if self._dynamic_results: + grouped_dynamic_results = group(self._dynamic_results, args_index) + buf += "\nThe following dimensions CAN be dynamic." + buf += "\nPlease use the following code to specify the constraints they must satisfy:" + buf += f"\n```\ndef specify_constraints{str(signature)}:" + buf += f"\n{indent}return [" + print_results( + grouped_dynamic_results, + indent * 2, + lambda result: f"{result},", + ) + buf += f"\n{indent}]\n```\n" + return buf + + + +TLS = threading.local() + + +class ShapeEnv: + # This is a wrapper over the actual __init__ function. + # + # Where to add a new constructor parameter to ShapeEnv? + # ===================================================== + # This __init__ function should be used only for parameters related to event recording. + # These are parameters that we don't wish to pass down the road to new ShapeEnv instances + # created from replaying events. + # + # If you wish to add a parameter to the constructor of ShapeEnv, unrelated to event + # recording, do so in the _init function. + def __init__( + self, *, + should_record_events: Optional[bool] = None, + tracked_fakes: Optional[List[Any]] = None, + **kwargs + ) -> None: + self._init(**kwargs) + + # Disable event recording when replaying. + kwargs["should_record_events"] = False + + from torch.fx.experimental.validator import translation_validation_enabled + self._translation_validation_enabled = translation_validation_enabled() + + # If not specified, enable event recording if both: + # - Translation validation is on + # - Translation validation bisection is not disabled + self.should_record_events = ( + should_record_events + if should_record_events is not None + else ( + self._translation_validation_enabled + and not config.translation_validation_no_bisect + ) + ) + + # Enable event recording check if both: + # - It should record events + # - The recording check is enabled + self.check_recorded_events = ( + self.should_record_events and config.check_shape_env_recorded_events + ) + + # This will make sure we only record the top-level function call. + self.is_recording = not self.should_record_events + # Keep track of the list of tracked fakes. + self.tracked_fakes = tracked_fakes + # List of events for reconstructing ShapeEnv at arbitrary points in time. + self.events: List[ShapeEnvEvent] = ( + [ShapeEnvEvent(ShapeEnv, kwargs=kwargs)] if self.should_record_events else [] + ) + + def _init( + self, *, + allow_scalar_outputs=True, + allow_dynamic_output_shape_ops=True, + # NB: These are legacy configuration that help us make good choices + # when the constraint/dynamic dims are not explicitly passed to us. + # Ideally we will fix all call sites to be explicit and not have + # implicit choices, but this apparently was pretty involved. + assume_static_by_default=False, + # Note - On 0/1 specialization + # + # The following options affect decisions we make about eager + # specialization. Disabling them will increase trace time (as we do + # more symbolic reasoning) and can also harm the quality of generated + # code (because inductor may not be able to specialize for bounds + # being equal--although if we later respecialize because of a guard, + # your code may be just as good as it was before.) + # + # When True, eagerly specialize input sizes which have 0/1. + specialize_zero_one=True, + # When True, assume input sizes which have the same size are + # symbolically equal. + duck_shape=True, + # For debugging + co_fields=None, + ): + # Not directly used by ShapeEnv; indirectly used by FakeTensor + self.allow_scalar_outputs = allow_scalar_outputs + self.allow_dynamic_output_shape_ops = allow_dynamic_output_shape_ops + self.guards: List[ShapeGuard] = [] + # Maps symbolic ints to their original concrete values + # Currently populated from tensors + self.var_to_val: Dict[sympy.Symbol, sympy.Integer] = {} + # Maps symbolic ints to their min/max range. These ranges + # are conservative: the int MUST fall in the range, but the + # range may contain ints which may not actually appear in + # practice + self.var_to_range: Dict[sympy.Symbol, ValueRanges] = {} + self.source_name_to_debug_name: Dict[str, str] = {} + # Maps symbolic ints to their min/max range for runtime checks. + # This is because we assume a graph generated with N=2 is general enough + # for N < 2. Therefore, it will be too strict to assert N=2 at runtime. + self.runtime_var_to_range: Dict[sympy.Symbol, ValueRanges] = {} + self.var_to_sources: Dict[sympy.Symbol, List[Source]] = {} + self.var_to_stack: Dict[sympy.Symbol, CapturedTraceback] = {} + # Maps symbolic ints to the guards that refine their lower/upper + # bound. If one of them is None, it means that there are no guards + # that refine that respective bound. + self.var_to_guards: Dict[sympy.Symbol, Tuple[Optional[ShapeGuard], Optional[ShapeGuard]]] = {} + # Maps from sympy ints to expressions representing them + # Populated from equality guards (i.e. a.shape[0] == b.shape[0]) + self.replacements: Dict[sympy.Symbol, sympy.Expr] = {} # + # Set holds a % b expressions that evaluate to 0. + self.divisible: Set[sympy.Expr] = set() + # Duck-shaping says that if two input tensors have the same size, + # they get assigned the same symbolic variable + self.val_to_var: Dict[int, sympy.Expr] = {} + if specialize_zero_one: + self.val_to_var = {0: sympy.Integer(0), 1: sympy.Integer(1)} + self.unbacked_symfloat_counter = itertools.count() + self.unbacked_symint_counter = itertools.count() + # Similar to guards, but these MUST evaluate to true and can + # only be evaluated at runtime midway through (i.e., they always + # involve unbacked symints) + # + # For efficiency reasons, we index in the following way. Suppose you have + # a runtime assert i0 + i1 <= s1. We pick the most recently allocated + # symbol in the source expression and add the assert to the list for + # that symbol e.g., {i1: [i0 + i1 <= s1]}. + # + # We access the runtime asserts in two situations: + # + # - When we are guarding on an expression, we will attempt to + # statically evaluate it, in case the unbacked SymInts can + # simplify away. If we have a runtime assert, we may be able + # to discharge the guard entirely. We only need to attempt + # runtime asserts that mention freevars of the expression in + # question. + # + # - When we are performing codegen (in Inductor for eager, or + # when finalizing the export FX graph), we need to know what + # extra runtime asserts to insert. Whenever an unbacked + # SymInt comes into scope, all runtime asserts involving it + # become eligible for insertion (so long as all of their other + # free unbacked symbols are also in scope). We technically + # can handle any choice of key by kicking inexpressible asserts + # to the next unbacked symbol to wait on, but if we choose the + # latest key, an assert will only show up at the moment when + # we can actually codegen it. + self.deferred_runtime_asserts: Dict[sympy.Symbol, List[RuntimeAssert]] = {} + # This exists so we can efficiently invalidate the cache (it's used as + # part of the cache key); otherwise we'd have to iterate through + # deferred_runtime_asserts to compute its length + self.num_deferred_runtime_asserts = 0 + self.assume_static_by_default = assume_static_by_default + self.specialize_zero_one = specialize_zero_one + self.duck_shape = duck_shape + self.log = log + self.log.info("create_env") + self.frozen = False + self.dim_constraints: Optional[DimConstraints] = None + self.counter = collections.Counter() + # A selection of important fields on co_field; solely used for + # signpost_event + self.co_fields = co_fields if co_fields else {} + + # Version counter used to invalidate cached values + self._prev_cache_key = self._get_key() + self._version_counter = 0 + + # Cache for FX nodes. + # Maps an already built node a tuple of: + # 1. node's target + # 2. list of arguments + # This drastically reduces the size of the FX graph, avoiding + # duplicated nodes. + self.fx_node_cache: Dict[Tuple[Callable, Tuple[Any, ...]], torch.fx.Node] = {} + self.source_to_symbol: Dict[str, sympy.Symbol] = {} + + from torch.fx.experimental.validator import translation_validation_enabled + self._translation_validation_enabled = translation_validation_enabled() + + if self._translation_validation_enabled: + from torch.fx.experimental.validator import TranslationValidator + + self.validator = TranslationValidator() + self.graph = torch.fx.Graph() + # Create an output graph and start inserting before that. + # This is needed when 'deepcopy'-ing this object. + self.graph.inserting_before(self.graph.output(None)) + + # Mapping of each node name to the node itself. + # + # This is useful for matching an FX node from a recorded ShapeEnv.graph + # to the FX node of the ShapeEnv we are running the event on. + # + # Whenever you add a node to self.graph, you must add a mapping to this + # variable. Otherwise, the built FX graph on the replayed ShapeEnv will + # not be valid. + self.name_to_node: Dict[str, torch.fx.Node] = {} + + def check_equal(self, other: "ShapeEnv") -> None: + # ShapeEnv fields that are not relevant for the outcome of + # ShapeEnv.produce_guards call: + # - Debugging variables + # - Translation validation related variables + # - Events recording related variables + non_state_variable_names = ( + "counter", + "log", + "var_to_stack", + "fx_node_cache", + "graph", + "validator", + "check_recorded_events", + "should_record_events", + "is_recording", + "tracked_fakes", + "events", + "source_name_to_debug_name", + "_prev_cache_key", + "_version_counter", + ) + + # Mapping of the value of each to-be-compared field into the values that + # should actually be compared. + # + # You should modify this if, for example, the field that holds state and + # debugging information. e.g. ShapeGuard holds the actual guard (sympy.Expr) + # and the stack when it was added to the set of guards. In order to compare + # it, we throw away the stack information. + def map_value(key: str, value: Any) -> Any: + if key in ("unbacked_symfloat_counter", "unbacked_symint_counter"): + from copy import copy + + # For itertools.count(), we compare the next integer returned + # by the count iterators. Not that we need to copy the iterator + # first. Otherwise we are mutating the object. + return next(copy(value)) + elif key == "guards": + # Transform the list of ShapeGuard into a list of expressions. + return [g.expr for g in value] + elif key == "var_to_guards": + # Transform the tuple of optional ShapeGuards of each entry into + # a tuple of optional expressions. + return { + s: ( + lb.expr if lb is not None else None, + ub.expr if ub is not None else None, + ) + for s, (lb, ub) in value.items() + } + elif key == "deferred_runtime_asserts": + # Transform the list of RuntimeAsserts into a list of expressions. + return {s: [ra.expr for ra in ras] for s, ras in value.items()} + elif key == "name_to_node": + # Compare just the set of keys is the same. + return set(value.keys()) + return value + + shape_env_check_state_equal(self, other, non_state_variable_names, map_value) + + def snapshot_tracked_fakes(self) -> Optional[List[Any]]: + if self.tracked_fakes is None: + return None + + from torch._dynamo.variables.builder import TrackedFake + + def maybe_transform_fake(fake: TrackedFake): + inner_fake = fake.fake \ + if isinstance(fake.fake, torch.SymInt) \ + else FakeTensorMeta.from_fake(fake.fake) + # Even though TrackedFake accepts either a Union[SymInt, FakeTensor], here we give it a + # FakeTensorMeta for two reasons: + # 1. this is all the information we need when recording ShapeEnvEvents. + # 2. it works even if each TrackedFake changes its metadata. + return TrackedFake(inner_fake, fake.source, fake.constraint_dims) # type: ignore[arg-type] + + return [maybe_transform_fake(fake) for fake in self.tracked_fakes] + + def inc_tracked_fakes_length(self) -> None: + self.tracked_fakes_length += 1 + + def set_tracked_fakes_length(self, i: int) -> None: + self.tracked_fakes_length = i + + def last_event_index(self) -> int: + return len(self.events) - 1 + + @contextmanager + def recording(self): + self.is_recording = True + try: + yield + finally: + self.is_recording = False + + @record_shapeenv_event() + def freeze(self): + self.frozen = True + + def _create_symbol_for_source(self, source: Source) -> Optional[sympy.Symbol]: + if not self._translation_validation_enabled: + return None + srcname = source.name() + if source not in self.source_to_symbol: + self.source_to_symbol[srcname] = sympy.Symbol(srcname, integer=True) + return self.source_to_symbol[srcname] + + def _add_z3var(self, symbol: sympy.Symbol, type: Type) -> None: + if self._translation_validation_enabled: + self.validator.add_var(symbol, type) + + def _add_target_expr(self, expr) -> None: + if self._translation_validation_enabled: + self.validator.add_target_expr(expr) + + def _add_assertion(self, expr) -> None: + if self._translation_validation_enabled: + self.validator.add_assertion(expr) + + def _check_translation_validate(self) -> None: + if self._translation_validation_enabled: + self.validator.validate() + + @record_shapeenv_event() + def create_fx_call_function( + self, + op: Callable, + args: Tuple, + ) -> Tuple[Optional[torch.fx.Node], bool]: + # Cache this tuple in order to avoid duplicated nodes. + node_key = (op, args) + # Flags whether the returned node was cached or not. + fresh = False + + if self._translation_validation_enabled and node_key not in self.fx_node_cache: + from torch.fx.experimental.validator import z3op + + # Presence of None in the arguments implies that we should ignore this operation. + if any(a is None for a in args): + # We check if we are not mixing SymNode that should not be ignored + # (fx_node is not None) with those that should (fx_node is None). + assert all(not isinstance(a, torch.fx.Node) for a in args) + return None, fresh + + fresh = True + lifted_op = z3op(op, self.validator) + + # If translation validation is enabled, all arguments must have its + # own FX node. + assert all(a is not None for a in args), f"missing arg in FX graph ({op.__name__}): {args}" + node = self.fx_node_cache[node_key] = self.graph.call_function(lifted_op, args) + self.name_to_node[node.name] = node + + return self.fx_node_cache.get(node_key, None), fresh + + def create_fx_placeholder_and_z3var( + self, + symbol: sympy.Symbol, + type: Type, + ) -> Optional[torch.fx.Node]: + if not self._translation_validation_enabled: + return None + + node_key = (self.graph.placeholder, (symbol,)) + + # Check if we haven't added this symbol already. + # If so, skip the placeholder creation, as it + # generates invalid Python code. + if node_key not in self.fx_node_cache: + # Add a Z3 variable according to 'type'. + self._add_z3var(symbol, type) + # Create the FX placeholder out of a mangled name. + mangled_name = re.sub(r'[^a-zA-Z0-9]', '_', re.sub(r'[()]', '', symbol.name)) + node = self.fx_node_cache[node_key] = self.graph.placeholder(mangled_name) + self.name_to_node[node.name] = node + # Attach the 'symbol' to the placeholder so that we can retrieve + # the Z3 variable later. + node.meta["symbol"] = symbol + + return self.fx_node_cache[node_key] + + def remove_fx_node(self, node: Optional[torch.fx.Node]) -> None: + if self._translation_validation_enabled and node is not None: + self.name_to_node.pop(node.name) + self.graph.erase_node(node) + + def add_fx_node_metadata(self, node: torch.fx.Node) -> None: + from torch._dynamo.utils import get_current_node + + if self.should_record_events: + node.meta[SHAPEENV_EVENT_KEY] = self.last_event_index() + node.meta[CURRENT_NODE_KEY] = get_current_node() + + def _suppress_guards_tls(self): + return getattr(TLS, "suppress_guards", False) + + @record_shapeenv_event() + def suppress_guards_enter(self): + TLS.suppress_guards = True + + @record_shapeenv_event() + def suppress_guards_exit(self): + TLS.suppress_guards = False + + @contextmanager + def suppress_guards(self): + self.suppress_guards_enter() + try: + yield + finally: + self.suppress_guards_exit() + + def _get_key(self): + """ + Defines the current "state" of the guards we've accumulated in this ShapeEnv. + Determines when we need to invalidate our cache + """ + return (len(self.replacements), len(self.divisible), self.num_deferred_runtime_asserts) + + def _update_version_counter(self): + # The shape environment is queried orders of magnitude more often than + # it is changed, so we summarise the cache key into a linearly + # increasing version counter which is cheaper to check in _lru_cache + + # Only update version counter if the state actually changed + cur_key = self._get_key() + if self._prev_cache_key != cur_key: + self._prev_cache_key = cur_key + self._version_counter += 1 + + def _produce_dyn_sizes(self, + ex_size: Sequence[int], + source: Source, + symbolic_context: SymbolicContext + ) -> List[sympy.Expr]: + return self._produce_dyn_sizes_from_int_tuple(tuple(ex.size()), source, symbolic_context) + + def _produce_dyn_sizes_from_int_tuple(self, + tensor_size: Tuple[int], + source: Source, + symbolic_context: SymbolicContext, + ) -> List[sympy.Expr]: + assert all(not is_symbolic(val) for val in tensor_size), f"Expect size to be a plain tuple of ints but got {tensor_size}" + from torch._dynamo.source import TensorPropertySource, TensorProperty + _assert_symbol_context(symbolic_context) + dynamic_dims = symbolic_context.dynamic_sizes + constraint_dims = symbolic_context.constraint_sizes + size = [] + for i, val in enumerate(tensor_size): + size.append(self.create_symbol( + val, TensorPropertySource(source, TensorProperty.SIZE, i), dynamic_dims[i], constraint_dims[i] + )) + return size + + def create_symbolic_sizes_strides_storage_offset( + self, + ex: torch.Tensor, + source: Source, + *, + symbolic_context: Optional[SymbolicContext] = None, + ): + """ + Returns a list of symbolic sizes and strides for the given tensor. + We try our best to express stride in terms of the sizes, so as to not + introduce new symbolic variables. + """ + + # Dynamo may want to wrap FakeTensors with SymInt sizes up e.g. make_fx(opt_f(), tracing_mode="symbolic"). + # We create symbols in shape_env using the backed hints behind SymInt. + + # Case 1: when SymInt is backed, dynamo can proceed with FakeTensors that have concrete shape. + # produce_guards will trigger specializations on the outer stuff + + # Case 2: when the SymInt is unbacked, we will throw an data dependent error in require_hint(). + # + # It's probably good for now but it's important to note that this approach has implications for + # the original shape_env when checking guards in different order. + + # Example: + # --------- + # Consider a function "opt_f" as shown below: + + # @torch.compile() + # def opt_f(x: bool, y: Tensor): + # if x == True: + # return y + torch.randn([4]) + # else: + # return y + # Depending on the sequence of calls, we might install two different sets of guards: + + # 1. opt_f(False, y): + # - "x == False" (always works for any size y) + + # 2. opt_f(True, y): + # - Triggers recompilation and results in guards like: + # - "x == True and y.size(0) == 4" + # - (or "y.size(0) == 4 and x == True") + + # The order of checking the guards matters. In this specific example: + # If True branch guard check precedes False branch and for True branch, y.size(0) check precedes x == True, + # we may have an unnessary shape speciliazation for y. + assert not ex.is_nested + + def maybe_specialize_sym_int_with_hint(maybe_sym) -> int: + assert isinstance(maybe_sym, (int, torch.SymInt)) + if is_symbolic(maybe_sym): + assert maybe_sym.node.shape_env is not self, \ + "expect the symbol is created from an shape env other than current one." + return maybe_sym.node.require_hint() + return maybe_sym + + ex_size = tuple(maybe_specialize_sym_int_with_hint(sz) for sz in ex.size()) + ex_stride = tuple(maybe_specialize_sym_int_with_hint(sd) for sd in ex.stride()) + ex_storage_offset = maybe_specialize_sym_int_with_hint(ex.storage_offset()) + + return self._create_symbolic_sizes_strides_storage_offset( + ex_size, + ex_stride, + ex_storage_offset, + [_is_dim_dynamic(ex, i) for i in range(ex.dim())], + source, + symbolic_context=symbolic_context, + ) + + @record_shapeenv_event() + def _create_symbolic_sizes_strides_storage_offset( + self, + ex_size: Sequence[int], + ex_stride: Sequence[int], + ex_storage_offset: int, + is_dim_dynamic: Sequence[bool], + source: Source, + *, + symbolic_context: Optional[SymbolicContext] = None, + ): + dim = len(ex_size) + + # Reimplement the legacy behavior + if symbolic_context is None: + constraint_dims = [None] * dim + dynamic_dims = [] + for i in range(dim): + # NB: This is encapsulation breaking! Legacy behavior was + # bad. + if is_dim_dynamic[i]: + r = DimDynamic.DYNAMIC + elif self.assume_static_by_default: + r = DimDynamic.STATIC + else: + r = DimDynamic.DUCK + dynamic_dims.append(r) + dynamic_dims = [DimDynamic.DUCK] * dim + # symbolic_context is None - set one + symbolic_context = StatelessSymbolicContext(dynamic_sizes=dynamic_dims, constraint_sizes=constraint_dims) + # We got a StatelessSymbolicContext + _assert_symbol_context(symbolic_context) + constraint_dims = symbolic_context.constraint_sizes + dynamic_dims = symbolic_context.dynamic_sizes + + # TODO: make this configurable from outside symbolic_context; we made a symbolic_context + # decision here where if all sizes are static, we are going to + # specialize all of the inner strides/offset too. We don't have to + # do this, and arguably we should ALWAYS allow for dynamic offset, + # this is cheap. + # TODO: This should be DYNAMIC, using DUCK for BC + dynamic_strides_offset = DimDynamic.STATIC if all(r == DimDynamic.STATIC for r in dynamic_dims) else DimDynamic.DUCK + + assert len(dynamic_dims) == dim, f"{len(dynamic_dims)} != {dim}" + assert len(constraint_dims) == dim + + from torch._dynamo.source import TensorPropertySource, TensorProperty + size: List[sympy.Expr] = self._produce_dyn_sizes_from_int_tuple(ex_size, source, symbolic_context) + stride: List[Optional[sympy.Expr]] = [None] * len(size) + for i, val in enumerate(ex_stride): + if val in (0, 1): + stride[i] = sympy.Integer(val) + while any(x is None for x in stride): + candidates = { + ex_size[i] * ex_stride[i]: size[i] * stride[i] + for i in range(len(size)) + if stride[i] is not None and ex_stride[i] >= 0 + } + # iterate over unbound strides in sorted order + val_list = sorted( + [(ex_stride[i], i) for i in range(len(stride)) if stride[i] is None] + ) + for _, i in val_list: + if stride[i] is None and ex_stride[i] in candidates: + stride[i] = candidates[ex_stride[i]] + candidates[ex_size[i] * ex_stride[i]] = size[i] * stride[i] + + if any(x is None for x in stride): + # bind the smallest unbound stride to a new variable + val, i = min( + [ + (ex_stride[i], i) + for i in range(len(stride)) + if stride[i] is None + ] + ) + stride[i] = self.create_symbol( + val, + TensorPropertySource(source, TensorProperty.STRIDE, i), + dynamic_dim=dynamic_strides_offset, + constraint_dim=None, + ) + assert all(x is not None for x in stride) + + sym_sizes = [ + self.create_symintnode( + sym, + hint=hint, + source=TensorPropertySource(source, TensorProperty.SIZE, i), + symbolic_context=symbolic_context + ) + for i, (sym, hint) in enumerate(zip(size, ex_size)) + ] + sym_stride = [] + for i, stride_expr in enumerate(stride): + # NB: Don't duck size the stride; instead use the expression + # we computed + assert stride_expr is not None + sym_stride.append(self.create_symintnode( + stride_expr, hint=ex_stride[i], source=TensorPropertySource(source, TensorProperty.STRIDE, i), + symbolic_context=symbolic_context)) + sym_storage_offset = self.create_symintnode( + self.create_symbol( + ex_storage_offset, + TensorPropertySource(source, TensorProperty.STORAGE_OFFSET), + dynamic_dim=dynamic_strides_offset, + constraint_dim=None, + ), + hint=ex_storage_offset, + source=TensorPropertySource(source, TensorProperty.STORAGE_OFFSET), symbolic_context=symbolic_context) + return tuple(sym_sizes), tuple(sym_stride), sym_storage_offset + + # If you know what the current hint value of the SymInt to be created + # is, pass it into hint. Otherwise, pass None and we will make our best + # guess + @record_shapeenv_event() + def create_symintnode( + self, + sym: "sympy.Expr", + *, + hint: Optional[int], + source: Optional[Source] = None, + symbolic_context: Optional[SymbolicContext] = None, + ): + source_name = source.name() if source else None + + if self._translation_validation_enabled and source is not None: + # Create a new symbol for this source. + symbol = self._create_symbol_for_source(source) + assert symbol is not None + + # Create a new FX placeholder and Z3 variable for 'symbol'. + fx_node = self.create_fx_placeholder_and_z3var(symbol, int) + + # Add an equality assertion for the newly created symbol and 'sym'. + self._add_assertion(sympy.Eq(symbol, sym)) + else: + fx_node = None + + # see note [Tensor Fakification and Symbol Caching] + if isinstance(symbolic_context, StatefulSymbolicContext) and source_name: + if source_name in symbolic_context.source_to_symint_node_cache: + return symbolic_context.source_to_symint_node_cache[source_name] + + if isinstance(sym, sympy.Integer): + if hint is not None: + assert int(sym) == hint + out = int(sym) + else: + out = SymInt(SymNode(sym, self, int, hint, fx_node=fx_node)) + if isinstance(symbolic_context, StatefulSymbolicContext) and source_name: + symbolic_context.source_to_symint_node_cache[source_name] = out + return out + + @record_shapeenv_event() + def create_unspecified_symint_and_symbol(self, value, source, dynamic_dim): + return self.create_symintnode( + self.create_unspecified_symbol( + value, + source=source, + dynamic_dim=dynamic_dim, + ), + hint=value, + source=source, + ) + + def create_symboolnode(self, sym: "sympy.Expr"): + # This function is only being used in serialization, so we do not track it + # for validation. + return SymBool(SymNode(sym, self, bool, None)) + + @record_shapeenv_event() + def create_unbacked_symfloat(self): + symbol: sympy.Symbol = sympy.Symbol(f"f{next(self.unbacked_symfloat_counter)}") + self.counter["create_unbacked_symbol"] += 1 + self.var_to_stack[symbol] = CapturedTraceback.extract(skip=1) + self.var_to_range[symbol] = ValueRanges.unknown() + + # Create a new FX placeholder and Z3 variable for 'symbol'. + fx_node = self.create_fx_placeholder_and_z3var(symbol, float) + + return SymFloat(SymNode(symbol, self, float, None, fx_node=fx_node)) + + @record_shapeenv_event() + def create_unbacked_symint(self): + symbol: sympy.Symbol = sympy.Symbol(f"i{next(self.unbacked_symint_counter)}", integer=True) + self.counter["create_unbacked_symbol"] += 1 + self.var_to_stack[symbol] = CapturedTraceback.extract(skip=1) + vr = self.var_to_range[symbol] = self._default_unspecified_value_range() + + # Create a new FX placeholder and Z3 variable for 'symbol'. + fx_node = self.create_fx_placeholder_and_z3var(symbol, int) + + fsummary, user_tb, maybe_user_loc = self._get_stack_summary() + log.info("create_unbacked_symbol %s [%s, %s]%s (%s)", symbol, vr.lower, vr.upper, maybe_user_loc, format_frame(fsummary)) + + return SymInt(SymNode(symbol, self, int, None, fx_node=fx_node)) + + def is_unbacked_symint(self, symbol: sympy.Symbol) -> bool: + # NB: keep synced with free_unbacked_symbols + return str(symbol).startswith("i") + + @record_shapeenv_event() + def create_unbacked_symbool(self): + symbol: sympy.Symbol = sympy.Symbol(f"i{next(self.unbacked_symint_counter)}", integer=True) + self.counter["create_unbacked_symbol"] += 1 + self.var_to_stack[symbol] = CapturedTraceback.extract(skip=1) + self.var_to_range[symbol] = ValueRanges(0, 1) + + # Create a new FX placeholder and Z3 variable for 'symbol'. + fx_node = self.create_fx_placeholder_and_z3var(symbol, bool) + + return SymBool(SymNode(sympy.Eq(symbol, 1), self, bool, None, fx_node=fx_node)) + + @record_shapeenv_event() + def create_unspecified_symbol( + self, + val: Union[int, SymInt], + source: Source, + dynamic_dim: DimDynamic = DimDynamic.DUCK, + constraint_dim: DimConstraint = None, # NB: includes None + ) -> "sympy.Expr": + # 'positive' is None for unspecified symbols, since we can't + # assume that it will be neither positive nor negative. + + # We don't want to specialize zero one val for unspecified symbol + # so that we can always get a new symbol despite val. + return self.create_symbol(val, source, dynamic_dim, constraint_dim, positive=None, do_not_specialize_zero_one=True) + + @record_shapeenv_event() + def create_symbol( + self, + val: int, + source: Source, + dynamic_dim: DimDynamic = DimDynamic.DUCK, + constraint_dim: DimConstraint = None, # NB: includes None + positive: Optional[bool] = True, + do_not_specialize_zero_one: bool = False, + ) -> "sympy.Expr": + if do_not_specialize_zero_one: + specialize_zero_one = False + else: + specialize_zero_one = self.specialize_zero_one + + assert isinstance(source, Source), f"{type(source)} {source}" + assert not (positive and val < 0), f"positive set for negative value: {val}" + # It's always sound to allocate a symbol as DYNAMIC. If the user + # constrained the symbol, force the symbolic_context to DYNAMIC, because our + # constraint code will do weird stuff if, e.g., it's duck shaped + if constraint_dim is not None: + dynamic_dim = DimDynamic.DYNAMIC + + if dynamic_dim is DimDynamic.STATIC: + # We don't expect to ever reach here even the user specifies + # dynamic=False, because automatic_dynamic skipped for + # nested tensors. + return sympy.Integer(val) + + elif dynamic_dim is DimDynamic.DUCK: + # duck_shape can be used to globally turn off duck shaping, even + # if it was requested + duck = self.duck_shape + elif dynamic_dim is DimDynamic.DYNAMIC: + duck = False + else: + raise AssertionError(f"unhandled dynamic_dim {dynamic_dim}") + + if val in (0, 1) and specialize_zero_one: + r = self.val_to_var[val] + elif not duck or val not in self.val_to_var: + # If we're not duck shaping, we always create a new symbol + # Even if we're duck shaping, if we haven't seen this particular + # value before, we also create a new symbol + sympy_expr = sympy.Symbol(f"s{len(self.var_to_val)}", positive=positive, integer=True) + # We always associate vars to vals + if isinstance(val, int): + self.var_to_val[sympy_expr] = sympy.Integer(val) + else: + # Only used for jagged layout nested tensors + self.var_to_val[sympy_expr] = SingletonInt(val.node.singleton_int(), coeff=val.node.singleton_coeff()) + + # Do the appending later, because we always want to populate this + self.var_to_sources[sympy_expr] = [] + # Create a Z3 variable for the new symbol. + self._add_z3var(sympy_expr, int) + + if duck: + # Make sure to reuse this symbol for subsequent duck shaping + self.val_to_var[val] = sympy_expr + + if isinstance(val, int): + if positive: + # Add assertions for the newly created symbols + self._add_assertion(sympy_expr > 1) + + # Apply default range, which assumes not zero-one + self.var_to_range[sympy_expr] = self._default_value_range() + else: + self.var_to_range[sympy_expr] = self._default_unspecified_value_range() + + # Small performance optimization: if we have a min-max constraint, + # we can proactively narrow to that range + if isinstance(constraint_dim, StrictMinMaxConstraint): + assert not duck + self.var_to_range[sympy_expr] &= constraint_dim.vr + + vr = self.var_to_range[sympy_expr] + + if val not in vr: + raise ConstraintViolationError(f"{val} not in range [{vr.lower}, {vr.upper}]") + + # Initialize default runtime range to match compile time range, + # for backed SymInts (this is allowed to diverge for unbacked) + self.runtime_var_to_range[sympy_expr] = vr + + range_str = f"[{vr.lower}, {vr.upper}]" + else: + # Skip var_range logic for SingletonInt + # Only used for jagged layout nested tensors + range_str = "" + + r = sympy_expr + self.log.info("create_symbol %s = %s for %s %s", sympy_expr, val, source.name(), range_str) + self.counter["create_symbol"] += 1 + else: + # This implements duck-shaping: input sizes that match are assigned + # the same symint + r = self.val_to_var[val] + self.log.debug("create_symbol %s duck sized %s", r, source.name()) + + if isinstance(r, sympy.Symbol): + self.var_to_sources[r].append(source) + + return r + + def debug_name(self, source): + src_name = source.name() + return self.source_name_to_debug_name.get(src_name, src_name) + + def render_range_for_constraint_violation(self, source, c): + if isinstance(c, StrictMinMaxConstraint): + lower, upper = c.vr.lower, c.vr.upper + default = self._default_value_range() + if lower <= default.lower: + lower = None + if upper >= default.upper: + upper = None + c_render = f"{self.debug_name(source)} = {source.name()} in the specified range" + if lower is not None and upper is not None: + c_render += f" {lower} <= {self.debug_name(source)} <= {upper}" + elif lower is None and upper is not None: + c_render += f" {self.debug_name(source)} <= {upper}" + elif lower is not None and upper is None: + c_render += f" {lower} <= {self.debug_name(source)}" + return c_render + return c.render(source) + + # Generates a list of guards strings which, when evaluated in a context that + # defines tensors for all the sources, returns True or False depending + # on if the guards in the list evaluated to True or not. Primarily used by Dynamo, + # but this is also helpful for manual testing of guards (see + # evaluate_guards_for_args) + # + # For convenience in testing, a source is allowed to be a str, + # in which case we will assume it is a LocalSource + # + # simplified lets you omit duck sizing, equality and 0/1 guards. + # This is useful for testing when you don't care about the boilerplate + # guards, and it may be helpful for user output too (be careful though; + # some equality guards are nontrivial! It would be nice to get simplified + # output to print them too). It's private because it's not + # intended for normal use + def produce_guards( + self, + placeholders, + sources, + source_ref=lambda n: n.name(), + *, + # An input is either a SymInt (in which case you directly have + # DimConstraint) or a Tensor (in which case you have a + # DimList[DimConstraint]). Whenever Optional is accepted, that + # just means there are no constraints + constraint_inputs: Optional[InputList[Union[DimConstraint, Optional[DimList[DimConstraint]]]]] = None, + equalities_inputs: Optional[Set[Tuple[Source, Source]]] = None, + _simplified=False, + # Indicates if we should produce guards for known static values. + ignore_static=True, + ) -> List[str]: + self.log.info("produce_guards") + + # Check if we get to the same ShapeEnv state by replaying the recorded events. + # This will create a new ShapeEnv instance, and call all recorded function + # calls on this new instance. Finally, it will check whether this new instance + # has equal state. + # + # It's important that we do it in the begining of this function, since it modifies + # self.dim_constraints through its execution. Changes that happen in this method + # aren't interesting, since this is the function call we wish to reproduce at the + # end. If we wish to simply reproduce ShapeEnv instances even after this call, + # this method should also be recorded. + if self.check_recorded_events: + shape_env = replay_shape_env_events(self.events) + self.check_equal(shape_env) + + assert len(placeholders) == len(sources) + Tensorlike = (torch.Tensor, FakeTensorMeta) + + # Expand optional inputs, or verify invariants are upheld + if constraint_inputs is None: + constraint_inputs = [ + [None] * t.dim() if isinstance(t, Tensorlike) else None for t in placeholders + ] + else: + assert len(constraint_inputs) == len(placeholders) + for i, (t, constraint) in enumerate(zip(placeholders, constraint_inputs)): + if isinstance(t, Tensorlike): + if constraint is None: + constraint_inputs[i] = [None] * t.dim() + else: + assert len(constraint) == t.dim() + else: + assert isinstance(t, (SymInt, int)) + assert not isinstance(constraint, list) + + # It took a lot of sweat to figure out the algorithm here. Let's + # explain how it works. + # + # The ShapeEnv lifecycle looks something like this: + # + # - For each input, you either generate a fresh Sympy symbol (s0) to + # represent its value (a binding site), or you reuse some + # preexisting symbol or expression, skipping the symbol allocation + # (e.g., duck sizing to a preexisting symbol, or expressing a + # stride as a multiplication of a separate stride and size.) + # Naively, you might expect to bind a fresh Sympy symbol for + # every input, but this is fairly wasteful as most of these + # symbols immediately simplify away, and if you don't eagerly + # specialize, e.g., 0/1 symbols, you end up with very complicated + # expressions that are not optimizable in practice. + # + # - You perform some compute on these symbols, occasionally + # introducing guards on boolean expressions on these symbols. + # In particular, whenever we guard on equality (_maybe_guard_eq), + # we can simplify shapes; e.g., when s0 == s1 * 2, we can now + # replace all occurrences of s0 with s1 * 2. Sometimes, a + # boolean expression evaluation doesn't introduce a guard, as + # the guard is already entailed by the simplifications we have + # applied. + # + # - In the end, you have a bunch of replacements (saying how to + # simplify shapes) and a bunch of guards (all the equality guards + # are trivial, because they're covered by the replacements). + # + # From the ShapeEnv, we must generate a Python expression that, when + # evaluated on a set of inputs, tells us whether or not these boolean + # expressions would have evaluated in the same way. However, + # we cannot easily compute this, as we elide recording boolean + # expressions when we think they are vacuously true. Thus, we seek + # an approximation: we must generate an expression, if true, would have + # produced an "equivalent" ShapeEnv, which would answer guard + # expressions in the same way. + # + # Our notion of equivalence is a bit subtle. For example, consider + # the ShapeEnv created from an input of size (5, 4) versus (4, 4) + # (no other guards.) Duck sizing would generate (s0, s1) in the first + # case but (s0, s0) in the second. We do NOT assume that size + # variables are disjoint; so in fact a graph that assumes the input + # could be (s0, s1) subsumes (s0, s0) (setting s0 == s1), but not + # vice versa. However, consider an analogous case (1,) versus (2,). + # Duck sizing generates (1,) and (s0,); the (s0,) graph does NOT + # subsume the (1,) graph because we assume that any size variables + # is NOT 0/1 (and make simplifications according to this; e.g., if + # we queried s0 == 0, we would immediately return False without + # returning a guard.) + # + # So, it is perhaps easier to flip things on their head: the guard + # expressions we generate here say what simplifications are valid, + # and what are not. Below, we explain each of the guard expressions + # we generate + + # TODO: Make this more efficient by binding all the size/stride/offsets + # to locals before performing tests on them. + + from torch._dynamo.source import TensorPropertySource, TensorProperty, NegateSource + + # Actual codegen must be delayed as we don't necessarily know what + # the symbol mapping is + input_guards = [] + + symbol_to_source = collections.defaultdict(list) + symbol_to_constraints = collections.defaultdict(set) + constraint_violations : List[Tuple[bool, Callable[[], str]]] = [] + + def record_constraint_violation(warn_only, debug_name, msg, hint=None): + constraint_violations.append( + (warn_only, debug_name, lambda: f"{msg}{hint()}" if hint else msg) + ) + + def is_dim(src): + return isinstance(src, TensorPropertySource) and src.prop is TensorProperty.SIZE + + if equalities_inputs: + source_index = {} + for i, src in enumerate(sources): + source_index[src.name()] = i + + def get_symbol(tensor_dim_src): + fake = placeholders[source_index[tensor_dim_src.base.name()]] + symint = fake.shape[tensor_dim_src.idx] + assert isinstance(symint, torch.SymInt) + return symint.node.expr + + for src1, src2 in equalities_inputs.source_pairs: + s1, s2 = get_symbol(src1), get_symbol(src2) + concrete_val = self.evaluate_expr(sympy.Eq(s1, s2)) + if not concrete_val: + raise ConstraintViolationError( + f"{src1.name()} = {self.var_to_val[s1]}" + " is not equal to " + f"{src2.name()} = {self.var_to_val[s2]}" + ) + + # How do we know what the value of s0 is? Fresh variables can only be + # bound by inputs, so there MUST be some other input which binds the + # variable. If there is no such input, this is an error in our + # system. We record where all symbols come from, to help you diagnose + # why those symbols didn't occur. + # + # In fact, generally speaking it is only possible for the "outermost" + # user of a ShapeEnv to evaluate the guards, because some inputs may + # not be available to inner levels. For example, Dynamo can guard on + # tensors that never actually become graph arguments (they are + # pruned). In this case, only Dynamo knows about these arguments. + def track_symint(source, val, constraint=None): + log.debug("track_symint %s %s %s", LazyString(source.name), val, constraint) + assert not isinstance(val, SymInt) or is_symbolic(val) + + if isinstance(val, SymInt) and val.node.maybe_as_int() is not None: + val = val.node.maybe_as_int() + + if isinstance(val, SymInt): + s = val.node.expr + if isinstance(s, sympy.Symbol): + symbol_to_source[s].append(source) + if constraint is not None: + symbol_to_constraints[s].add(constraint) + elif isinstance(-s, sympy.Symbol): + symbol_to_source[-s].append(NegateSource(source)) + else: + constraint_violated = False + if isinstance(constraint, StrictMinMaxConstraint): + # try inferring the ranges of the expr s + sym_vrs = {x: self.var_to_range.get(x, None) for x in s.free_symbols} + if all(vr is not None for vr in sym_vrs.values()): + expr_vr = bound_sympy(s, sym_vrs) + if (expr_vr != constraint.vr): + # the expr and constrain ranges don't match + constraint_violated = True + else: + # some of the free symbols in s don't have ranges + constraint_violated = True + elif isinstance(constraint, RelaxedUnspecConstraint): + if s.is_number: + i = int(s) + # Don't complain about 0/1 specialization, we + # expect to have to compile in this case anyway + if i not in (0, 1): + constraint_violated = True + else: + # TODO: Maybe non-strict constraint shouldn't error + # here? Check what happens in practice + constraint_violated = True + if constraint_violated: + def hint(s): + sexpr = ShapeGuardPrinter(symbol_to_source, source_ref, self.var_to_sources).doprint(s) + return f"{sexpr}." + + var_with_range = self.render_range_for_constraint_violation(source, constraint) + msg = ( + f"Not all values of {var_with_range} are valid because " + f"{self.debug_name(source)} was inferred to be equal to " + ) + record_constraint_violation( + constraint.warn_only, + self.debug_name(source), + msg, + hint=functools.partial(hint, s), + ) + + input_guards.append((source, s)) + else: + s = sympy.Integer(val) + input_guards.append((source, s)) + constraint_violated = False + if isinstance(constraint, StrictMinMaxConstraint): + constraint_violated = True + elif isinstance(constraint, RelaxedUnspecConstraint): + # Don't complain about 0/1 specialization, we + # expect to have to compile in this case anyway + if val not in (0, 1): + constraint_violated = True + if constraint_violated: + var_with_range = self.render_range_for_constraint_violation(source, constraint) + msg = ( + f"Not all values of {var_with_range} are valid because " + f"{self.debug_name(source)} was inferred to be a constant ({val})." + ) + record_constraint_violation(constraint.warn_only, self.debug_name(source), msg) + + for t, source, constraint in zip(placeholders, sources, constraint_inputs): + if isinstance(source, str): + from torch._dynamo.source import LocalSource + source = LocalSource(source) + assert isinstance(source, Source) + if t is None: + continue + if isinstance(t, (SymInt, int)): + track_symint(source, t) + continue + assert isinstance(t, Tensorlike) + sources_and_tensors = [(source, t)] + if is_traceable_wrapper_subclass(t): + # If our placeholder is a tensor subclass, then the "true" symints + # come from the subclass's inner tensors. + attrs, _ = t.__tensor_flatten__() + from torch._dynamo.source import AttrSource + inner_sources_and_tensors = [(AttrSource(source, attr), getattr(t, attr)) for attr in attrs] + if t.is_nested: + # For NestedTensors we need to track BOTH symints on the outer + # tensor and tensor because we'd like to guard on the ragged + # size but the symint representing ragged size is not in terms + # of the symints on the inner tensors. + sources_and_tensors.extend(inner_sources_and_tensors) + else: + # For other tensor subclasses, only track the symints from + # the inner tensors + sources_and_tensors = inner_sources_and_tensors + + for src, curr_t in sources_and_tensors: + for i, ss in enumerate(curr_t.size()): + property_source = TensorPropertySource(src, TensorProperty.SIZE, i) + track_symint(property_source, ss, constraint[i]) + if not t.is_nested: + for i, ss in enumerate(curr_t.stride()): + track_symint(TensorPropertySource(src, TensorProperty.STRIDE, i), ss) + track_symint(TensorPropertySource(src, TensorProperty.STORAGE_OFFSET), curr_t.storage_offset()) + + # 1. Every input must equal the final simplified symbolic expression + # stored on the placeholder. Given a placeholder (s0*2, s1), + # if we have an input (2, 3), we must show s0*2 == 2 and s1 == 3. + # This does a lot of work: it covers duck sizing and equality guards. + exprs = [] + self.dim_constraints = DimConstraints( + symbol_to_source, + self.var_to_val, + set(symbol_to_constraints.keys()), + self.source_name_to_debug_name, + ) + + if not _simplified: + for source, expr in input_guards: + if self._translation_validation_enabled: + # Ignore sources that were not turned into SymInts. + srcname = source.name() + if srcname in self.source_to_symbol: + self._add_target_expr(sympy.Eq(self.source_to_symbol[srcname], expr)) + + # Small optimization + if ( + isinstance(expr, sympy.Symbol) and + symbol_to_source.get(expr) and + source == symbol_to_source[expr][0] + ): + continue + + # This logic excludes static values found on tensors from guarding, because + # dynamo's check_tensor_fn does that (see guards.cpp). + # However, for non tensor sources, we still need to guard here. + if ignore_static and isinstance(source, TensorPropertySource): + if expr.is_number: + self.log.debug("Skipping guard %s", f"{source_ref(source)} == {expr}") + continue + + if is_dim(source): + self.dim_constraints.add_equality(source, expr) + + sexpr = ShapeGuardPrinter(symbol_to_source, source_ref, self.var_to_sources).doprint(expr) + exprs.append(f"{source_ref(source)} == {sexpr}") + if ( + isinstance(expr, sympy.Symbol) and + expr in symbol_to_constraints and + isinstance(source, TensorPropertySource) + and source.prop is TensorProperty.SIZE + and equalities_inputs and + not equalities_inputs.is_equal(source, symbol_to_source[expr][0]) + ): + msg = ( + f"The values of {self.debug_name(source)} = {source.name()} and " + f"{self.debug_name(symbol_to_source[expr][0])} = {symbol_to_source[expr][0].name()} " + "must always be equal." + ) + record_constraint_violation(equalities_inputs.warn_only, self.debug_name(source), msg) + # NB: Not necessary to report constraint violations here: + # constraints are guaranteed to be on symbols (we've already + # caught constants and non-atomic expressions), so we only + # have relational constraints, but we don't support those + # at the moment + + # 2. Every guard must evaluate to True (but remember many guards + # like s0 == s1*2 because trivial due to simplification) + issued = set() + + def issue_guard(guard: ShapeGuard) -> None: + expr = self.simplify(guard.expr) + + # Avoid re-issueing the same guard. + if expr in issued: + return + + issued.add(expr) + + try: + is_trivial = False + if any(is_dim(source) for s in expr.free_symbols for source in symbol_to_source[s]): + is_trivial = self.dim_constraints.add(expr) + guard_expr = ShapeGuardPrinter(symbol_to_source, source_ref, self.var_to_sources).doprint(expr) + exprs.append(guard_expr) + self._add_target_expr(expr) + # A non-relational constraint on a single sizevar can violate + # a constraint + if not is_trivial and len(expr.free_symbols) == 1: + symbol = next(iter(expr.free_symbols)) + source = symbol_to_source[symbol][0] + constraints = symbol_to_constraints[symbol] + for c in constraints: + if isinstance(c, StrictMinMaxConstraint): + var_with_range = self.render_range_for_constraint_violation(source, c) + msg = ( + f"Not all values of {var_with_range} " + f"satisfy the generated guard {guard_expr}." + ) + record_constraint_violation(c.warn_only, self.debug_name(source), msg) + elif isinstance(c, RelaxedUnspecConstraint): + # This is fine, we allow guards here as long as it + # didn't constrain it to one value (we don't + # actually know this; this depends on our + # ValueRanges reasoning capability) + pass + else: + raise AssertionError(f"unrecognized constraint {c}") + except Exception: + self.log.warning("Failing guard allocated at: \n%s", ''.join(guard.stack.format())) + raise + + # First, issue all the non-trivial guards. + for guard in self.guards: + if self._maybe_evaluate_static(guard.expr) is not None: + continue + issue_guard(guard) + + # Then, issue the guards that refine the value range of tracked symbols. + # We need to explicitly issue these guards, since they are the ones that + # guarantee the symbol's value range. Plus, due to the updated value + # range, they may be skipped in the previous step. + for symbol, guards in self.var_to_guards.items(): + if symbol not in symbol_to_source: + continue + for guard in guards: + if guard is not None: + issue_guard(guard) + + # 3. Every symbol must be within its value range (this handles 0/1 + # specialization too). NB: because we never update value ranges + # except in case of explicit user annotation, these are not included + # in simplified. However, when we start updating value ranges + # these should probably get reported in tests too + if not _simplified: + for symbol, sources in symbol_to_source.items(): + r = self.runtime_var_to_range.get(symbol) + if r is None: + if symbol not in self.var_to_range: + continue + r = self.var_to_range[symbol] + + assert sources + assert symbol.is_integer + g_lower, g_upper = self.var_to_guards.get(symbol, (None, None)) + bounds = [] + if r.lower != -sympy.oo and g_lower is None: + if any(is_dim(source) for source in sources): + self.dim_constraints.add(sympy.Ge(symbol, r.lower)) + bounds.append(str(r.lower)) + bounds.append(source_ref(sources[0])) + # NB: This looks like an off-by-one error but it's not: the + # upper bound may be sys.maxsize - 1 because we intentionally + # exclude sys.maxsize from our bounds to deal with direct + # == INT_MAX guards, but it's still dumb to actually test it. + # Note that you can be off by a pretty large constant and it + # won't matter because sizes in practice will be no where near + # the 64-bit limit. + if r.upper != sympy.oo and r.upper < sys.maxsize - 1 and g_upper is None: + if any(is_dim(source) for source in sources): + self.dim_constraints.add(sympy.Le(symbol, r.upper)) + bounds.append(str(r.upper)) + if len(bounds) > 1: + exprs.append(" <= ".join(bounds)) + + if constraint_violations: + warn_msgs = [] + error_msgs = [] + debug_names = set() + for warn_only, debug_name, msg in constraint_violations: + if warn_only: + msg = f" {len(warn_msgs) + 1}. {msg()}" + warn_msgs.append(msg) + else: + msg = f" - {msg()}" + error_msgs.append(msg) + debug_names.add(debug_name) + if len(error_msgs) > 0: + debug_names = ', '.join(debug_names) + err = '\n'.join(error_msgs) + raise ConstraintViolationError( + f"Constraints violated ({debug_names})! " + "For more information, run with TORCH_LOGS=dynamic.\n" + f"{err}" + ) + elif len(warn_msgs) > 0: + log.debug("%s Warning only constraints violated", len(warn_msgs)) + + signpost_event( + "dynamic", + "produce_guards", + { + **self.co_fields, + **self.counter, + "num_guards": len(exprs), + "free_symbols": sum(1 for v in symbol_to_source.values() if v), + }, + ) + + if self._translation_validation_enabled: + from torch.fx.experimental.validator import PopulateValidator + + # Add all deferred runtime assertions; these are not technically + # handled by produce_guards but we need to put them in the target + # set + for ras in self.deferred_runtime_asserts.values(): + for ra in ras: + self._add_target_expr(ra.expr) + + # Add value range bound guards for all symbols with no trivial bounds. + # Reason: '_maybe_evaluate_static' may eliminate guards based on the + # refined value ranges. + # + # NB: do NOT use runtime var ranges, they're unsound! You will + # only get correct TV with the compile-time ranges. + for sym, vr in self.var_to_range.items(): + if vr.lower != -sympy.oo: + self._add_target_expr(sympy.Le(vr.lower, sym)) + if vr.upper != sympy.oo: + self._add_target_expr(sympy.Le(sym, vr.upper)) + + # Before validating, populate the input of the validator with the + # built FX graph. + with fx_traceback.preserve_node_meta(): + PopulateValidator(self.graph, self.validator).run() + + self._check_translation_validate() + return exprs + + def produce_guards_expression(self, placeholders, ignore_static=True): + """ + Expected to be used with evaluate_guards_expression(). Produces the guards + for the given placeholders and returns a string expression to be evaluated + by evaluate_guards_expression given concrete values for the placeholders. + """ + from torch._dynamo.source import LocalSource + arg_names = [f"t{i}" for i in range(len(placeholders))] + guards = self.produce_guards(placeholders, [LocalSource(a) for a in arg_names], ignore_static=ignore_static) + if guards: + return " and ".join(guards) + return None + + def evaluate_guards_expression(self, code, args): + """ + Expected to be used with produce_guards_expression(). Evaluates an expression + generated by produce_guards_expression for the given concrete args. + """ + arg_names = [f"t{i}" for i in range(len(args))] + return eval(code, SYMPY_INTERP, {"L": dict(zip(arg_names, args))}) + + def evaluate_guards_for_args(self, placeholders, args, *, ignore_static=True): + code = self.produce_guards_expression(placeholders, ignore_static=ignore_static) + if code: + return self.evaluate_guards_expression(code, args) + return True + + def bind_symbols(self, placeholders, args): + # Given a paired list of placeholders (fake tensors with + # symbolic sizes) and concrete arguments (regular tensors + # with real sizes), returns a dictionary mapping each + # symbol to its real value. So for example, if you + # have a placeholder with size (s0, s1), binding + # (2, 4) to it will give you {s0: 2, s1: 4}. This is + # not guaranteed to bind ALL symbols in the ShapeEnv; + # we can't bind a symbol if it doesn't occur in any placeholder, + # and symbols that already have replacements won't get bindings. + + # This is a little duplicative with evaluate_guards but + # it's different enough that it seemed cleanest to make + # another copy. This assumes the guards are already checked, + # though if it's cheap we'll check for shenanigans + bindings: Dict[sympy.Symbol, int] = {} + + def bind_symint(arg, val): + if isinstance(val, SymInt): + s = val.node.expr + + if isinstance(s, sympy.Symbol): + if s in bindings: + assert bindings[s] == arg, f"{bindings[s]} != {arg}" + else: + bindings[s] = arg + elif isinstance(-s, sympy.Symbol): + if -s in bindings: + assert bindings[-s] == -arg, f"{bindings[-s]} != {-arg}" + else: + bindings[-s] = -arg + + for t, arg in zip(placeholders, args): + if t is None: + continue + if isinstance(t, SymInt): + bind_symint(arg, t) + continue + assert isinstance(t, torch.Tensor) + for i, s in enumerate(t.size()): + bind_symint(arg.size(i), s) + for i, s in enumerate(t.stride()): + bind_symint(arg.stride(i), s) + bind_symint(arg.storage_offset(), t.storage_offset()) + + return bindings + + def get_nontrivial_guards(self): + return [self.simplify(guard.expr) for guard in self.guards if self._maybe_evaluate_static(guard.expr) is None] + + def format_guards(self, verbose=False): + def format_tb(tb): + if not verbose: + return "" + return f"\n Guarded at:\n{''.join(' ' + l for l in tb.format())}" + + return '\n'.join(f" - {guard.expr}{format_tb(guard.stack)}" for guard in self.guards) + + def get_shape_groups(self): + shape_groups = collections.defaultdict(list) + for k, v in self.replacements.items(): + shape_groups[v].append(k) + return shape_groups + + @_lru_cache + def _maybe_evaluate_static( + self, expr: "sympy.Expr", *, unbacked_only: bool = False, compute_hint: bool = False, + expect_rational=True, + ) -> "Optional[sympy.Expr]": + """ + Tries to evaluate expr without introducing guards + + If unbacked_only == True, then we only do substitutions on + unbacked SymInts (leaving regular hinted integers alone). This could + result in an expression that still contains backed SymInts, which you + could then potentially guard on. + + Use compute_hint == True if you are trying to compute a non-binding + hint for the particular hint values of backed SymInts, e.g., if + s0 happens to be 3 this run, compute_hint will subsitute s0 with 3. + """ + expr = self.simplify(expr) + + if compute_hint: + expr = expr.xreplace(self.var_to_val) + + expr = canonicalize_bool_expr(expr) + + symbols = list(expr.free_symbols) + + # Apply known runtime asserts + for s in symbols: + # Unbacked symints only + if s in self.var_to_val: + continue + subst = {} + for ra in self.deferred_runtime_asserts.get(s, ()): + if compute_hint: + e = canonicalize_bool_expr(ra.expr.xreplace(self.var_to_val)) + else: + e = ra.expr + # e is already canonical + subst[e] = sympy.true + subst[canonicalize_bool_expr(sympy.Not(e))] = sympy.false + if isinstance(e, sympy.Eq): + subst[sympy.Le(e.lhs, e.rhs)] = sympy.true + subst[sympy.Le(-e.lhs, -e.rhs)] = sympy.true + subst[sympy.Lt(e.lhs, e.rhs)] = sympy.false + subst[sympy.Lt(-e.lhs, -e.rhs)] = sympy.false + + # NB: this helps us deal with And/Or connectives + expr = expr.subs(subst) + + # Simplify making use of value range lower bound + new_shape_env = {} + new_range_env = {} + for idx, k in enumerate(symbols): + if isinstance(self.var_to_val.get(k, None), SingletonInt): + # Skip var_to_range logic for SingletonInt which is only used + # for jagged layout NestedTensors today + continue + vr = self.var_to_range[k] + # Don't do anything if we don't have a nontrivial lower bound + # Also don't do anything if we asked only to simplify unbacked + # SymInt + if ( + vr.lower < (-sys.maxsize - 1) // 2 or + (unbacked_only and k in self.var_to_val) + ): + new_range_env[k] = vr + continue + # Positive means >= 1 + # Positive - 1 means >= 0 + # Positive + lower - 1 means >= lower + # The new symbol 's' is "too low", so when we substitute it in + # we have to increase it by offset (and conversely, the new + # variables have to have their value range bounds adjusted as + # well) + s = sympy.Symbol(f"shape_{idx}", positive=True, integer=True) + offset = vr.lower - 1 + new_shape_env[k] = s + offset + new_range_env[s] = SymPyValueRangeAnalysis.add(vr, -offset) + + def replace(expr, repl): + return expr.xreplace(repl) + + try: + new_expr = replace(expr, new_shape_env) + except RecursionError: + log.warning("RecursionError in sympy.xreplace(%s, %s)", expr, new_shape_env) + self.counter["sympy_recursion_error"] += 1 + return None + + floor_div_replace = {} + for atom in new_expr.atoms(FloorDiv): + floor_div_replace[atom] = sympy.floor(atom.args[0] / atom.args[1]) + new_expr = safe_expand(new_expr.xreplace(floor_div_replace)) + # TODO: when unbacked_only, can sometimes early return even when there + # are still free symbols + if new_expr.is_number: + return new_expr + + # Check if the range can solve it statically + out = bound_sympy(new_expr, new_range_env) + if expect_rational: + _assert_bound_is_rational(new_expr, out) + if out.is_singleton(): + return out.lower + + return new_expr if unbacked_only else None + + @_lru_cache + def replace(self, expr: "sympy.Expr") -> "sympy.Expr": + replacements = {s: self._find(cast(sympy.Symbol, s)) for s in expr.free_symbols} + return safe_expand(expr.xreplace(replacements)) + + @_lru_cache + def _update_divisible(self): + new_divisible = set() + for k in self.divisible: + res = self.replace(k) + if not res.is_number: + new_divisible.add(k) + + self.divisible = new_divisible + self._update_version_counter() + + @_lru_cache + def simplify(self, expr: "sympy.Expr") -> "sympy.Expr": + expr = self.replace(expr) + # TODO it would seem that this pass is not necessary given the + # below replacement of // with /, but for nested FloorDivs + # the non-recursive replacement doesn't work, and + # recursive makes it hard to look up divisibility, + # because existing divisibility info has FloorDiv in it, not / + # for now just do a separate pass to catch common nested case + if expr.has(FloorDiv): + self._update_divisible() + div_replacements = {} + for atom in expr.atoms(FloorDiv): + base, divisor = atom.args + if isinstance(divisor, FloorDiv): + base1, divisor1 = divisor.args + if self.replace(Mod(base, divisor)) in self.divisible and \ + base == base1 and self.replace(Mod(base1, divisor1)) in self.divisible: + div_replacements[atom] = divisor1 + expr = expr.xreplace(div_replacements) + expr = safe_expand(expr) + if expr.has(FloorDiv): + div_replacements = {} + pows = expr.atoms(sympy.Pow) + rationals = expr.atoms(sympy.Rational).difference(expr.atoms(sympy.Integer)) + for fd in expr.atoms(FloorDiv): + base, divisor = fd.args + if self.replace(Mod(base, divisor)) in self.divisible: + div_replacements[fd] = base / divisor + new_expr = expr.xreplace(div_replacements) + new_expr = safe_expand(new_expr) + new_pows = new_expr.atoms(sympy.Pow) + new_rationals = new_expr.atoms(sympy.Rational).difference(new_expr.atoms(sympy.Integer)) + # divisions simplified away + if new_pows.issubset(pows) and new_rationals.issubset(rationals): + expr = new_expr + return expr + + @lru_cache(256) + def size_hint(self, expr: "sympy.Expr", *, allow_none=False): + """ + Gets a size hint for a given expression from the underlying shapes we had. + Does not introduce a guard, so only use this when you can guarantee that + your code is still valid for arbitrary shapes (such as optimization decisions) + """ + result_expr = safe_expand(expr).xreplace(self.var_to_val) + if not result_expr.is_number: + r = self._maybe_evaluate_static(result_expr, compute_hint=True) + if r is not None: + return r + if allow_none: + return None + raise self._make_data_dependent_error(result_expr, expr) + return result_expr + + # NB: keep in sync with size_hint + @lru_cache(256) + def has_hint(self, expr: "sympy.Expr"): + result_expr = safe_expand(expr).xreplace(self.var_to_val) + return result_expr.is_number or self._maybe_evaluate_static(result_expr) is not None + + def _make_data_dependent_error(self, expr, unhinted_expr): + # TODO: in a Dynamo context, having user code, and having the + # name of the local, will be much better + for s in expr.free_symbols: + stacktrace = ''.join(self.var_to_stack[s].format()) + self.log.debug("Data dependent variable '%s' allocated at:\n%s", s, stacktrace) + return GuardOnDataDependentSymNode( + "It appears that you're trying to get a value out of symbolic int/float " + "whose value is data-dependent (and thus we do not know the true value.) " + f"The expression we were trying to evaluate is {expr} (unhinted: {unhinted_expr}). " + "Scroll up to see where each of these data-dependent accesses originally occurred." + # TODO: Help text about how to use our runtime tests to fix this + # problem + ) + + def _set_replacement(self, a: "sympy.Symbol", expr: "sympy.Expr") -> None: + """ + Adds or updates a replacement for a symbol. + Use this instead of `self.replacements[a] = expr`. + """ + if config.print_specializations and isinstance(expr, (sympy.Integer, sympy.Float)): + # specializing to a constant, which is likely unexpected + + # NOTE(avik): It is possible that we try logging the same specialization multiple times, e.g., + # when adding a to self.replacements, and again when simplifying an expression containing a. + # Thus to avoid duplication, checking whether a is in self.replacements isn't enough; if it is, + # it must not already map to `expr`. Fortunately this check is cheap because `expr` is a constant. + if a not in self.replacements or expr != self.replacements[a]: + self.log.warning("Specializing %s to %s", self.var_to_sources[a][0].name(), expr) + self.log.debug("SPECIALIZATION", stack_info=True) + log.info("set_replacement %s = %s", a, expr) + self.replacements[a] = expr + self._update_version_counter() + + # When specializing 'a == expr', the equality should be also conveyed to + # Z3, in case an expression uses 'a'. + self._add_target_expr(sympy.Eq(a, expr)) + + def _add_divisible(self, expr: "sympy.Expr"): + self.divisible.add(expr) + self._update_version_counter() + + @_lru_cache + @record_shapeenv_event() + def _find(self, a: "sympy.Symbol") -> "sympy.Expr": + """ + Implements a DSU-like algorithm to find the variable that represents a + Also handles transitive non-identity replacements. + + a: b + c + c: d + """ + if a not in self.replacements: + return a + res = self.replacements[a] + cur_replace = {s: self._find(s) for s in res.free_symbols} + self._set_replacement(a, self.replacements[a].xreplace(cur_replace)) + return self.replacements[a] + + @lru_cache(256) + def _maybe_guard_eq(self, expr: Union["sympy.Eq", "sympy.Ne"], concrete_bool: bool) -> None: + """ + Evaluates the result of an eq call. If true, uses information to + simplify shapes (i.e. a == b or a % 5 == 0) + """ + assert type(concrete_bool) is bool + if isinstance(expr, sympy.Eq): + if not concrete_bool: + return + # NB: Apparently this is load bearing; to see what test fails if + # you comment it out run: + # python test/functorch/test_aotdispatch.py -k + # test_aot_autograd_symbolic_module_exhaustive_nn_LazyConv3d_cpu_float32 + elif isinstance(expr, sympy.Ne): + if concrete_bool: + return + free = list(expr.free_symbols) + + assert len(free) > 0, f"The expression should not be static by this point: {expr}" + # In case of really gnarly expression, we don't blow up + if len(free) > 5: + return + # NB: prioritize unbacked symints for solving by ordering them last + free = sorted(free, key=lambda x: (self.size_hint(x, allow_none=True) or sys.maxsize, x.name), reverse=True) # type: ignore[attr-defined] + lhs = expr.lhs + rhs = expr.rhs + if not expr.has(Mod): + try: + floor_div_atoms = lhs.atoms(FloorDiv).union(rhs.atoms(FloorDiv)) + if len(floor_div_atoms) > 0 and any(a.divisor != 1 for a in floor_div_atoms): + raise NotImplementedError + # short-circuit when no solving is needed + if isinstance(lhs, sympy.Symbol) and free_unbacked_symbols(lhs): + self._set_replacement(lhs, self._find(rhs)) + elif isinstance(rhs, sympy.Symbol) and free_unbacked_symbols(rhs): + self._set_replacement(rhs, self._find(lhs)) + else: + r = try_solve(expr, free[0], floordiv_inequality=False) + if r is not None and all(t.is_integer for t in sympy.preorder_traversal(r[1])): + new_var = self._find(r[1]) + ok = False + if self.is_unbacked_symint(free[0]): + # If you have i0 + i1 + i2 = s0, don't substitute i2 = + # s0 - i0 - i1. Arguably this should be OK but the + # runtime assert machinery is very delicate right now + # so this causes things to fail e.g., + # test_split_unbacked_sizes + ok = len(free_unbacked_symbols(new_var)) <= 1 + else: + # Never substitute backed with unbacked + ok = len(free_unbacked_symbols(new_var)) == 0 + if ok: + self._set_replacement(cast(sympy.Symbol, free[0]), new_var) + except NotImplementedError: + pass + if expr.has(Mod): + mod_expr = next(iter(expr.atoms(Mod))) + try: + r = try_solve(expr, mod_expr, floordiv_inequality=False) + if r is not None and r[1] == 0: + self._add_divisible(mod_expr) + # This is a little bit of extra logic to make things like + # torch.empty(i0, q).view(c, -1, q) work out + p, q = mod_expr.args + if isinstance(q, sympy.Number) and isinstance(p, sympy.Mul) and len(p.args) == 2: + c, i0 = p.args + # Given Mod(c * i0, q) == 0 + if ( + isinstance(c, sympy.Number) and + isinstance(i0, sympy.Symbol) and + self.is_unbacked_symint(i0) + ): + # We have Mod(i0, q / c) == 0, which means we can + # rewrite i0 as (q / gcd(q, c)) * i1 + d = q / sympy.gcd(q, c) + i1 = self.create_unbacked_symint().node.expr + # Propagate the value ranges. It doesn't really + # matter if we use truediv or floordiv, because we + # have established divisibility. + self.var_to_range[i1] = SymPyValueRangeAnalysis.truediv( + self.var_to_range[i0], ValueRanges.wrap(d) + ) + self.runtime_var_to_range[i1] = SymPyValueRangeAnalysis.truediv( + self.runtime_var_to_range[i0], ValueRanges.wrap(d) + ) + self._set_replacement(i0, d * i1) + + except NotImplementedError: + pass + return + + # See: Note - On 0/1 specialization + # NB: sys.maxsize is NOT allowed for sizes, because we use MAX_INT + # as a sentinel sometimes. Your sizevar isn't going to be + # anywhere near the max 64-bit integer anyway. + def _default_value_range(self) -> ValueRanges: + lower = 2 if self.specialize_zero_one else 0 + return ValueRanges(lower, sys.maxsize - 1) + + def _default_unspecified_value_range(self) -> ValueRanges: + return ValueRanges(-sys.maxsize - 1, sys.maxsize) + + @_lru_cache + def _simplify_floor_div(self, expr): + floor_divs = tuple(expr.atoms(FloorDiv)) + # we expect floor_divs to be exact, + # and thus add the guards for the exact floordivs, + # even if tracing doesn't require them otherwise + for fd in reversed(floor_divs): + base, divisor = fd.args + mod_expr = Mod(base, divisor) + eq_expr = sympy.Eq(mod_expr, 0) + # add necessary mod guards + self.evaluate_expr(eq_expr) + return self.simplify(expr) + + # We're about to add a guard/runtime assert, check if the ShapeEnv is frozen + # and if so issue a warning + def _check_frozen(self, expr, concrete_val): + if self.frozen: + self.counter["ignored_backward_guard"] += 1 + signpost_event( + "dynamic", + "evaluate_expr_frozen", + { + **self.co_fields, + "ignored_guard": f"{expr} == {concrete_val}", + # no version = original state (this signpost is expected) + # version 2 = dynamic backwards is eagerly compiled + "version": 2, + }, + ) + log.warning("Ignored guard %s == %s, this could result in accuracy problems", expr, concrete_val) + + + def _get_stack_summary(self): + fsummary = None + frame = inspect.currentframe() + try: + while frame is not None: + if frame.f_code.co_filename not in uninteresting_files(): + fsummary = traceback.FrameSummary( + frame.f_code.co_filename, + frame.f_lineno, + frame.f_code.co_name, + ) + break + frame = frame.f_back + finally: + del frame + + # NB: this stack is truncated, but it's fine because the main + # stack_info will give you the rest of the info you need + maybe_user_loc = "" + user_tb = TracingContext.extract_stack() + if user_tb: + maybe_user_loc = " at " + format_frame(user_tb[-1]) + + return fsummary, user_tb, maybe_user_loc + + def _log_guard(self, prefix: str, g): + if self.log.isEnabledFor(logging.INFO): + fsummary, user_tb, maybe_user_loc = self._get_stack_summary() + + # TODO: make this an artifact + is_debug = False + maybe_extra_debug = "" + if is_debug and user_tb: + maybe_extra_debug = ( + '\nUser Stack (most recent call last):\n' + + ' (snipped, see stack below for prefix)\n' + + ''.join(traceback.format_list(user_tb)) + ) + self.log.info( + "%s %s [guard added]%s (%s)%s", + prefix, + g, + maybe_user_loc, + format_frame(fsummary), + maybe_extra_debug, + stack_info=is_debug, + ) + + @lru_cache(256) + @record_shapeenv_event(save_tracked_fakes=True) + def evaluate_expr(self, orig_expr: "sympy.Expr", hint=None, fx_node=None, + expect_rational=True): + """ + Given an expression, evaluates it, adding guards if necessary + """ + if hint is None: + concrete_val = self.size_hint(orig_expr) + else: + concrete_val = sympy.sympify(hint) + + # Check if: + # 1. 'translation_validation' is set + # 2. the corresponding 'fx_node' is not 'None' + # 3. the guard should not be suppressed + # + # If all of the above check, we create an FX node representing the + # actual expression to be guarded. + node = None + fresh = False + if ( + self._translation_validation_enabled + and fx_node is not None + and not self._suppress_guards_tls() + ): + if concrete_val is sympy.true: + node, fresh = self.create_fx_call_function(torch._assert, (fx_node,)) + elif concrete_val is sympy.false: + neg, _ = self.create_fx_call_function(operator.not_, (fx_node,)) + node, fresh = self.create_fx_call_function(torch._assert, (neg,)) + else: + eql, _ = self.create_fx_call_function(operator.eq, (fx_node, concrete_val)) + node, fresh = self.create_fx_call_function(torch._assert, (eql,)) + + assert node is not None + # If this is a fresh node, we have to remember the event index that + # corresponds to this assertion node. + # Reason: so that, given an assertion node, we can replay the ShapeEnv + # events until the point where this assertion node was freshly created. + if fresh: + self.add_fx_node_metadata(node) + + # After creating the FX node corresponding to orig_expr, we must make sure that + # no error will be raised until the end of this function. + # + # Reason: the translation validation may become invalid otherwise. + # + # If an error is raised before the end of this function, we remove the FX node + # inserted, and re-raise the error. + guard = None + tb = None + + try: + if orig_expr.is_number: + self.log.debug("eval %s [trivial]", orig_expr) + # NB: don't test float as there may be precision issues + if isinstance(hint, (int, bool)): + assert orig_expr == hint, f"{orig_expr} != {hint}" + return orig_expr + + expr = orig_expr + + static_expr = self._maybe_evaluate_static(expr, + expect_rational=expect_rational) + if static_expr is not None: + self.log.debug("eval %s == %s [statically known]", orig_expr, static_expr) + # NB: don't test float as there may be precision issues + if isinstance(hint, (int, bool)): + assert static_expr == hint, f"{static_expr} != {hint}" + return static_expr + + if not (expr.free_symbols <= self.var_to_val.keys()): + # TODO: dedupe this with _maybe_evaluate_static + # Attempt to eliminate the unbacked SymInt + new_expr = self._maybe_evaluate_static(expr, unbacked_only=True) + if not (new_expr.free_symbols <= self.var_to_val.keys()): + raise self._make_data_dependent_error(expr.xreplace(self.var_to_val), expr) + expr = new_expr + + self._check_frozen(expr, concrete_val) + + if ( + config.inject_EVALUATE_EXPR_flip_equality_TESTING_ONLY + and isinstance(hint, bool) + and isinstance(expr, (sympy.Eq, sympy.Ne)) + ): + expr = sympy.Not(expr) + + if isinstance(expr, (sympy.Eq, sympy.Ne)): + self._maybe_guard_eq(expr, bool(concrete_val)) + # TODO: If we successfully eliminate a symbol via equality, it + # is not actually necessary to save a guard for the equality, + # as we will implicitly generate a guard when we match that + # input against the symbol + elif isinstance(concrete_val, sympy.Integer): + # WARNING: we cannot actually do simplifications on guards + # on floating point values, because Sympy generally does not + # think expressions on integers can ever be equal to floating + # point (e.g., sympy.Eq(s0/6, 0.5) evaluates to False). Without + # very clear algebraic laws that hold for floating point, such + # simplifications are error prone anyway, so be sure not to + # maybe_guard_eq in those cases. + self._maybe_guard_eq(sympy.Eq(expr, concrete_val), True) + + if concrete_val is sympy.true: + g = expr + elif concrete_val is sympy.false: + g = sympy.Not(expr) + else: + g = sympy.Eq(expr, concrete_val) # type: ignore[arg-type] + + if not self._suppress_guards_tls(): + stack = CapturedTraceback.extract(skip=1) + guard = ShapeGuard(g, stack) + self.guards.append(guard) + except Exception: + if fresh: + self.remove_fx_node(node) + raise + else: + if not self._suppress_guards_tls(): + assert guard is not None + + self.refine_ranges(guard) + + self._log_guard("eval", g) + else: + self.log.debug("eval %s [guard suppressed]", g) + + return concrete_val + + def cleanup(self): + # Break reference cycles. + # This destroys the stacks. If you really want to keep them, we + # just need some way to break references on code objects. + for g in self.guards: + g.stack.cleanup() + for s in self.var_to_stack.values(): + s.cleanup() + for ras in self.deferred_runtime_asserts.values(): + for ra in ras: + ra.stack.cleanup() + + @record_shapeenv_event(save_tracked_fakes=True) + def defer_runtime_assert(self, orig_expr: "sympy.Expr", msg, fx_node=None): + expr = orig_expr + + static_expr = self._maybe_evaluate_static(expr) + if static_expr is not None: + self.log.debug("runtime_assert %s == %s [statically known]", orig_expr, static_expr) + return static_expr + + # Attempt to eliminate the unbacked SymInt + new_expr = self._maybe_evaluate_static(expr, unbacked_only=True) + if new_expr.free_symbols <= self.var_to_val.keys(): + # Do a normal guard + return self.evaluate_expr(new_expr, fx_node=fx_node) + # NB: Don't use new_expr as expr; it could contain gunk like shape0 + # which we don't want to guard on + + # OK, we're definitely doing a runtime assert now + if ( + self._translation_validation_enabled + and fx_node is not None + and not self._suppress_guards_tls() + ): + node, fresh = self.create_fx_call_function(torch._assert, (fx_node,)) + assert node is not None + if fresh: + self.add_fx_node_metadata(node) + + self._check_frozen(expr, sympy.true) + + # eliminate symbols on equality tests + if isinstance(expr, sympy.Eq): + self._maybe_guard_eq(expr, True) + + if not self._suppress_guards_tls(): + # canonicalise to remove equations that are trivially equal + expr = canonicalize_bool_expr(expr) + stack = CapturedTraceback.extract(skip=1) + ra = RuntimeAssert(expr, msg, stack) + # TODO: Do this in a way that is less janky than int(s.name[1:]) + cands = sorted([s for s in expr.free_symbols if s.name.startswith("i")], key=lambda s: int(s.name[1:])) + self.deferred_runtime_asserts.setdefault(cands[-1], []).append(ra) + self.num_deferred_runtime_asserts += 1 + self._update_version_counter() + # TODO: refine ranges + # Unfortunately, range refinement is probably going to not + # work most of the time, because we don't support symbols + # in ranges. For example, i0 <= s0 is un-rangeable, because + # we can't put s0 in the range. So this is not very high + # priority at the moment. + self._log_guard("runtime_assert", expr) + else: + self.log.debug("runtime_assert %s [guard suppressed]", expr) + + return True + + # Refines the ranges of the variables present in 'guard'. + # + # This function tries to refine the range of the variables inside + # 'guard' by reasoning about it. Specifically, when 'guard' is a + # 'sympy.Relational' operation. + # + # It does mainly 3 things: + # 1. Tries to isolate a variable in the left-hand side + # 2. Compute the value range of the right-hand side + # 3. Update the value range of the variable, if better + def refine_ranges(self, guard: ShapeGuard) -> None: + expr = self.simplify(guard.expr) + + for symbol in expr.free_symbols: + assert isinstance(symbol, sympy.Symbol) + + if isinstance(self.var_to_val.get(symbol, None), SingletonInt): + # Skip var_to_range logic for SingletonInt which is only used + # for jagged layout NestedTensors today + continue + + r = try_solve(expr, symbol) + + if r is None or not (symbol.is_integer and r[1].is_integer): + # Range refinement only supports integer symbols for now. + # There are lots of SymPy bugs when it comes to comparing + # reals and integers, so we skip that for now. + continue + + r_expr, rhs = r + vr = self.var_to_range[symbol] + lower, upper = vr.lower, vr.upper + + rhs_vr = bound_sympy(rhs, self.var_to_range) + _assert_bound_is_rational(rhs, rhs_vr) + lower_guard, upper_guard = self.var_to_guards.get(symbol, (None, None)) + + # Let's suppose that we have a preexisting range for x [0, 100]. + # Now, we issue a guard x > y, where the range for y is [50, 150]. + # Then, lower = 0, rhs_vr.lower = 50 and therefore refinement can happen, + # refining x to [51, 100], since x must be greater than y, but the lowest + # y could be is 50. + # + # sympy.Eq may update both lower and upper bounds. + # sympy.G{t,e} may update the lower bound, only. + # sympy.L{t,e} may update the upper bound, only. + if lower < rhs_vr.lower and isinstance(r_expr, (sympy.Eq, sympy.Ge, sympy.Gt)): + # Strictly greater relations allow us to refine a bit more, since + # x < y implies that the lower bound for x is: y + 1. + lower = rhs_vr.lower + int(isinstance(r_expr, sympy.Gt)) + lower_guard = guard + if upper > rhs_vr.upper and isinstance(r_expr, (sympy.Eq, sympy.Le, sympy.Lt)): + upper = rhs_vr.upper - int(isinstance(r_expr, sympy.Lt)) + upper_guard = guard + + # Do nothing if the new value range is no better than what we already have. + if vr == ValueRanges(lower, upper): + continue + + # Updates the range and the guards corresponding to each bound of the symbol. + self.var_to_range[symbol] = ValueRanges(lower, upper) + self.var_to_guards[symbol] = (lower_guard, upper_guard) + # Clears the cache, since this update can change the result. + self._maybe_evaluate_static.cache_clear() + +def _is_int(expr): + return isinstance(expr, SymInt) and expr.node.expr.is_number + +# WARNING: This is legacy, DO NOT USE +def _is_dim_dynamic(t, d): + return hasattr(t, "_dynamo_dynamic_indices") and d in t._dynamo_dynamic_indices diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..31446d0e61253d7f722a3235e6e4c5788b4b01ba --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/__init__.py @@ -0,0 +1,4 @@ +# mypy: disable-error-code=attr-defined +from .core import unify, reify # noqa: F403 +from .more import unifiable # noqa: F403 +from .variable import var, isvar, vars, variables, Var # noqa: F403 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f70242e87d1512201b6d969e13aafc4e6ad7b72f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/core.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/core.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..920284d19970fbf1931630605f2863adf241ee39 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/core.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/dispatch.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/dispatch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b49893aa4e72dc761d5e5a1990bc847bb47b86bf Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/dispatch.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/match.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/match.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16f09685d9a14ebc04a9e0318e77334ca1f9795f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/match.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/more.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/more.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c7cf1bb0b5571b3043364439cb0825c9be16b646 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/more.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/unification_tools.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/unification_tools.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0aeb04799da670a88056b8e0e801ab0b65941b25 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/unification_tools.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa3fc758a9e01165222784e3aa26bf74d7420dbe Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/variable.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/variable.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..35bde28d8ab68a37362e08d2af9a8ea8361f40e4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/variable.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/match.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/match.py new file mode 100644 index 0000000000000000000000000000000000000000..dd459726917fe3423ea448b3c61a7feaeecdeefe --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/match.py @@ -0,0 +1,121 @@ +from .core import unify, reify # type: ignore[attr-defined] +from .variable import isvar +from .utils import _toposort, freeze +from .unification_tools import groupby, first # type: ignore[import] + + +class Dispatcher: + def __init__(self, name): + self.name = name + self.funcs = {} + self.ordering = [] + + def add(self, signature, func): + self.funcs[freeze(signature)] = func + self.ordering = ordering(self.funcs) + + def __call__(self, *args, **kwargs): + func, s = self.resolve(args) + return func(*args, **kwargs) + + def resolve(self, args): + n = len(args) + for signature in self.ordering: + if len(signature) != n: + continue + s = unify(freeze(args), signature) + if s is not False: + result = self.funcs[signature] + return result, s + raise NotImplementedError("No match found. \nKnown matches: " + + str(self.ordering) + "\nInput: " + str(args)) + + def register(self, *signature): + def _(func): + self.add(signature, func) + return self + return _ + + +class VarDispatcher(Dispatcher): + """ A dispatcher that calls functions with variable names + >>> # xdoctest: +SKIP + >>> d = VarDispatcher('d') + >>> x = var('x') + >>> @d.register('inc', x) + ... def f(x): + ... return x + 1 + >>> @d.register('double', x) + ... def f(x): + ... return x * 2 + >>> d('inc', 10) + 11 + >>> d('double', 10) + 20 + """ + def __call__(self, *args, **kwargs): + func, s = self.resolve(args) + d = {k.token: v for k, v in s.items()} + return func(**d) + + +global_namespace = {} # type: ignore[var-annotated] + + +def match(*signature, **kwargs): + namespace = kwargs.get('namespace', global_namespace) + dispatcher = kwargs.get('Dispatcher', Dispatcher) + + def _(func): + name = func.__name__ + + if name not in namespace: + namespace[name] = dispatcher(name) + d = namespace[name] + + d.add(signature, func) + + return d + return _ + + +def supercedes(a, b): + """ ``a`` is a more specific match than ``b`` """ + if isvar(b) and not isvar(a): + return True + s = unify(a, b) + if s is False: + return False + s = {k: v for k, v in s.items() if not isvar(k) or not isvar(v)} + if reify(a, s) == a: + return True + if reify(b, s) == b: + return False + + +# Taken from multipledispatch +def edge(a, b, tie_breaker=hash): + """ A should be checked before B + Tie broken by tie_breaker, defaults to ``hash`` + """ + if supercedes(a, b): + if supercedes(b, a): + return tie_breaker(a) > tie_breaker(b) + else: + return True + return False + + +# Taken from multipledispatch +def ordering(signatures): + """ A sane ordering of signatures to check, first to last + Topological sort of edges as given by ``edge`` and ``supercedes`` + """ + signatures = list(map(tuple, signatures)) + edges = [(a, b) for a in signatures for b in signatures if edge(a, b)] + edges = groupby(first, edges) + for s in signatures: + if s not in edges: + edges[s] = [] + edges = {k: [b for a, b in v] for k, v in edges.items()} # type: ignore[attr-defined, assignment] + return _toposort(edges) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a0295af0ea6b6b92836e034c1d28cfdf69b1d3ba --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/__init__.py @@ -0,0 +1,3 @@ +from .core import dispatch +from .dispatcher import (Dispatcher, halt_ordering, restart_ordering, + MDNotImplementedError) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b163d1533cb6e12c9ca19144bce3502be4952bc7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/__pycache__/conflict.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/__pycache__/conflict.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de899ca4c69dc05999a036dd6081432e93a12340 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/__pycache__/conflict.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/__pycache__/core.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/__pycache__/core.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb50d74198b1545d1a0a9a1482236bef0177fcd7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/__pycache__/core.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/__pycache__/dispatcher.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/__pycache__/dispatcher.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..17a84a4427ee395c95ae4384dce9b3627d957b64 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/__pycache__/dispatcher.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/__pycache__/utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..79419eaac07e8f20722398a51d9e491526bebffd Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/__pycache__/utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/__pycache__/variadic.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/__pycache__/variadic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..534e6c596cbcc19612837e80b2f91c3f2da733bc Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/__pycache__/variadic.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/conflict.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/conflict.py new file mode 100644 index 0000000000000000000000000000000000000000..fc64151945655371e12ad15648054774168d3f52 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/conflict.py @@ -0,0 +1,119 @@ +from .utils import _toposort, groupby +from .variadic import isvariadic + +__all__ = ["AmbiguityWarning", "supercedes", "consistent", "ambiguous", "ambiguities", "super_signature", + "edge", "ordering"] + +class AmbiguityWarning(Warning): + pass + + +def supercedes(a, b): + """ A is consistent and strictly more specific than B """ + if len(a) < len(b): + # only case is if a is empty and b is variadic + return not a and len(b) == 1 and isvariadic(b[-1]) + elif len(a) == len(b): + return all(map(issubclass, a, b)) + else: + # len(a) > len(b) + p1 = 0 + p2 = 0 + while p1 < len(a) and p2 < len(b): + cur_a = a[p1] + cur_b = b[p2] + if not (isvariadic(cur_a) or isvariadic(cur_b)): + if not issubclass(cur_a, cur_b): + return False + p1 += 1 + p2 += 1 + elif isvariadic(cur_a): + assert p1 == len(a) - 1 + return p2 == len(b) - 1 and issubclass(cur_a, cur_b) + elif isvariadic(cur_b): + assert p2 == len(b) - 1 + if not issubclass(cur_a, cur_b): + return False + p1 += 1 + return p2 == len(b) - 1 and p1 == len(a) + + +def consistent(a, b): + """ It is possible for an argument list to satisfy both A and B """ + + # Need to check for empty args + if not a: + return not b or isvariadic(b[0]) + if not b: + return not a or isvariadic(a[0]) + + # Non-empty args check for mutual subclasses + if len(a) == len(b): + return all(issubclass(aa, bb) or issubclass(bb, aa) + for aa, bb in zip(a, b)) + else: + p1 = 0 + p2 = 0 + while p1 < len(a) and p2 < len(b): + cur_a = a[p1] + cur_b = b[p2] + if not issubclass(cur_b, cur_a) and not issubclass(cur_a, cur_b): + return False + if not (isvariadic(cur_a) or isvariadic(cur_b)): + p1 += 1 + p2 += 1 + elif isvariadic(cur_a): + p2 += 1 + elif isvariadic(cur_b): + p1 += 1 + # We only need to check for variadic ends + # Variadic types are guaranteed to be the last element + return (isvariadic(cur_a) and p2 == len(b) or + isvariadic(cur_b) and p1 == len(a)) + + +def ambiguous(a, b): + """ A is consistent with B but neither is strictly more specific """ + return consistent(a, b) and not (supercedes(a, b) or supercedes(b, a)) + + +def ambiguities(signatures): + """ All signature pairs such that A is ambiguous with B """ + signatures = list(map(tuple, signatures)) + return {(a, b) for a in signatures for b in signatures + if hash(a) < hash(b) + and ambiguous(a, b) + and not any(supercedes(c, a) and supercedes(c, b) + for c in signatures)} + + +def super_signature(signatures): + """ A signature that would break ambiguities """ + n = len(signatures[0]) + assert all(len(s) == n for s in signatures) + + return [max((type.mro(sig[i]) for sig in signatures), key=len)[0] + for i in range(n)] + + +def edge(a, b, tie_breaker=hash): + """ A should be checked before B + Tie broken by tie_breaker, defaults to ``hash`` + """ + # A either supercedes B and B does not supercede A or if B does then call + # tie_breaker + return supercedes(a, b) and (not supercedes(b, a) or tie_breaker(a) > tie_breaker(b)) + + +def ordering(signatures): + """ A sane ordering of signatures to check, first to last + Topological sort of edges as given by ``edge`` and ``supercedes`` + """ + signatures = list(map(tuple, signatures)) + edges = [(a, b) for a in signatures for b in signatures if edge(a, b)] + edges = groupby(lambda x: x[0], edges) + for s in signatures: + if s not in edges: + edges[s] = [] + edges = {k: [b for a, b in v] for k, v in edges.items()} # type: ignore[assignment, attr-defined] + return _toposort(edges) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/core.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/core.py new file mode 100644 index 0000000000000000000000000000000000000000..6036015f4290ca07cf61e91d96fa0342425254ac --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/core.py @@ -0,0 +1,83 @@ +import inspect +import sys + +from .dispatcher import Dispatcher, MethodDispatcher + +global_namespace = {} # type: ignore[var-annotated] + +__all__ = ["dispatch", "ismethod"] + +def dispatch(*types, **kwargs): + """ Dispatch function on the types of the inputs + Supports dispatch on all non-keyword arguments. + Collects implementations based on the function name. Ignores namespaces. + If ambiguous type signatures occur a warning is raised when the function is + defined suggesting the additional method to break the ambiguity. + + Example: + >>> # xdoctest: +SKIP + >>> @dispatch(int) + ... def f(x): + ... return x + 1 + >>> @dispatch(float) + ... def f(x): + ... return x - 1 + >>> # xdoctest: +SKIP + >>> f(3) + 4 + >>> f(3.0) + 2.0 + >>> # Specify an isolated namespace with the namespace keyword argument + >>> my_namespace = {} + >>> @dispatch(int, namespace=my_namespace) + ... def foo(x): + ... return x + 1 + >>> # Dispatch on instance methods within classes + >>> class MyClass(object): + ... @dispatch(list) + ... def __init__(self, data): + ... self.data = data + ... @dispatch(int) + ... def __init__(self, datum): + ... self.data = [datum] + >>> MyClass([1, 2, 3]).data + [1, 2, 3] + >>> MyClass(3).data + [3] + """ + namespace = kwargs.get('namespace', global_namespace) + + types = tuple(types) + + def _df(func): + name = func.__name__ + + if ismethod(func): + dispatcher = inspect.currentframe().f_back.f_locals.get( # type: ignore[union-attr] + name, # type: ignore[union-attr] + MethodDispatcher(name), + ) + else: + if name not in namespace: + namespace[name] = Dispatcher(name) + dispatcher = namespace[name] + + dispatcher.add(types, func) + return dispatcher + return _df + + +def ismethod(func): + """ Is func a method? + Note that this has to work as the method is defined but before the class is + defined. At this stage methods look like functions. + """ + if hasattr(inspect, "signature"): + signature = inspect.signature(func) + return signature.parameters.get('self', None) is not None + else: + if sys.version_info.major < 3: + spec = inspect.getargspec(func) + else: + spec = inspect.getfullargspec(func) # type: ignore[union-attr, assignment] + return spec and spec.args and spec.args[0] == 'self' diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/dispatcher.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/dispatcher.py new file mode 100644 index 0000000000000000000000000000000000000000..d2a8e6bfc7ffffad565ea82747a417bd863608d2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/dispatcher.py @@ -0,0 +1,430 @@ +from warnings import warn +import inspect +from .conflict import ordering, ambiguities, super_signature, AmbiguityWarning +from .utils import expand_tuples +from .variadic import Variadic, isvariadic +import itertools as itl + +__all__ = ["MDNotImplementedError", "ambiguity_warn", "halt_ordering", "restart_ordering", "variadic_signature_matches_iter", + "variadic_signature_matches", "Dispatcher", "source", "MethodDispatcher", "str_signature", "warning_text"] + +class MDNotImplementedError(NotImplementedError): + """ A NotImplementedError for multiple dispatch """ + + +def ambiguity_warn(dispatcher, ambiguities): + """ Raise warning when ambiguity is detected + Parameters + ---------- + dispatcher : Dispatcher + The dispatcher on which the ambiguity was detected + ambiguities : set + Set of type signature pairs that are ambiguous within this dispatcher + See Also: + Dispatcher.add + warning_text + """ + warn(warning_text(dispatcher.name, ambiguities), AmbiguityWarning) + + +def halt_ordering(): + """Deprecated interface to temporarily disable ordering. + """ + warn( + 'halt_ordering is deprecated, you can safely remove this call.', + DeprecationWarning, + ) + + +def restart_ordering(on_ambiguity=ambiguity_warn): + """Deprecated interface to temporarily resume ordering. + """ + warn( + 'restart_ordering is deprecated, if you would like to eagerly order' + 'the dispatchers, you should call the ``reorder()`` method on each' + ' dispatcher.', + DeprecationWarning, + ) + + +def variadic_signature_matches_iter(types, full_signature): + """Check if a set of input types matches a variadic signature. + Notes + ----- + The algorithm is as follows: + Initialize the current signature to the first in the sequence + For each type in `types`: + If the current signature is variadic + If the type matches the signature + yield True + Else + Try to get the next signature + If no signatures are left we can't possibly have a match + so yield False + Else + yield True if the type matches the current signature + Get the next signature + """ + sigiter = iter(full_signature) + sig = next(sigiter) + for typ in types: + matches = issubclass(typ, sig) + yield matches + if not isvariadic(sig): + # we're not matching a variadic argument, so move to the next + # element in the signature + sig = next(sigiter) + else: + try: + sig = next(sigiter) + except StopIteration: + assert isvariadic(sig) + yield True + else: + # We have signature items left over, so all of our arguments + # haven't matched + yield False + + +def variadic_signature_matches(types, full_signature): + # No arguments always matches a variadic signature + assert full_signature + return all(variadic_signature_matches_iter(types, full_signature)) + + +class Dispatcher: + """ Dispatch methods based on type signature + Use ``dispatch`` to add implementations + Examples + -------- + >>> # xdoctest: +SKIP("bad import name") + >>> from multipledispatch import dispatch + >>> @dispatch(int) + ... def f(x): + ... return x + 1 + >>> @dispatch(float) + ... def f(x): + ... return x - 1 + >>> f(3) + 4 + >>> f(3.0) + 2.0 + """ + __slots__ = '__name__', 'name', 'funcs', '_ordering', '_cache', 'doc' + + def __init__(self, name, doc=None): + self.name = self.__name__ = name + self.funcs = {} + self.doc = doc + + self._cache = {} + + def register(self, *types, **kwargs): + """ register dispatcher with new implementation + >>> # xdoctest: +SKIP + >>> f = Dispatcher('f') + >>> @f.register(int) + ... def inc(x): + ... return x + 1 + >>> @f.register(float) + ... def dec(x): + ... return x - 1 + >>> @f.register(list) + ... @f.register(tuple) + ... def reverse(x): + ... return x[::-1] + >>> f(1) + 2 + >>> f(1.0) + 0.0 + >>> f([1, 2, 3]) + [3, 2, 1] + """ + def _df(func): + self.add(types, func, **kwargs) # type: ignore[call-arg] + return func + return _df + + @classmethod + def get_func_params(cls, func): + if hasattr(inspect, "signature"): + sig = inspect.signature(func) + return sig.parameters.values() + + @classmethod + def get_func_annotations(cls, func): + """ get annotations of function positional parameters + """ + params = cls.get_func_params(func) + if params: + Parameter = inspect.Parameter + + params = (param for param in params + if param.kind in + (Parameter.POSITIONAL_ONLY, + Parameter.POSITIONAL_OR_KEYWORD)) + + annotations = tuple( + param.annotation + for param in params) + + if all(ann is not Parameter.empty for ann in annotations): + return annotations + + def add(self, signature, func): + """ Add new types/method pair to dispatcher + >>> # xdoctest: +SKIP + >>> D = Dispatcher('add') + >>> D.add((int, int), lambda x, y: x + y) + >>> D.add((float, float), lambda x, y: x + y) + >>> D(1, 2) + 3 + >>> D(1, 2.0) + Traceback (most recent call last): + ... + NotImplementedError: Could not find signature for add: + >>> # When ``add`` detects a warning it calls the ``on_ambiguity`` callback + >>> # with a dispatcher/itself, and a set of ambiguous type signature pairs + >>> # as inputs. See ``ambiguity_warn`` for an example. + """ + # Handle annotations + if not signature: + annotations = self.get_func_annotations(func) + if annotations: + signature = annotations + + # Handle union types + if any(isinstance(typ, tuple) for typ in signature): + for typs in expand_tuples(signature): + self.add(typs, func) + return + + new_signature = [] + + for index, typ in enumerate(signature, start=1): + if not isinstance(typ, (type, list)): + str_sig = ', '.join(c.__name__ if isinstance(c, type) + else str(c) for c in signature) + raise TypeError(f"Tried to dispatch on non-type: {typ}\n" + f"In signature: <{str_sig}>\n" + f"In function: {self.name}") + + # handle variadic signatures + if isinstance(typ, list): + if index != len(signature): + raise TypeError( + 'Variadic signature must be the last element' + ) + + if len(typ) != 1: + raise TypeError( + 'Variadic signature must contain exactly one element. ' + 'To use a variadic union type place the desired types ' + 'inside of a tuple, e.g., [(int, str)]' + ) + new_signature.append(Variadic[typ[0]]) + else: + new_signature.append(typ) + + self.funcs[tuple(new_signature)] = func + self._cache.clear() + + try: + del self._ordering + except AttributeError: + pass + + @property + def ordering(self): + try: + return self._ordering + except AttributeError: + return self.reorder() + + def reorder(self, on_ambiguity=ambiguity_warn): + self._ordering = od = ordering(self.funcs) + amb = ambiguities(self.funcs) + if amb: + on_ambiguity(self, amb) + return od + + def __call__(self, *args, **kwargs): + types = tuple([type(arg) for arg in args]) + try: + func = self._cache[types] + except KeyError as e: + func = self.dispatch(*types) + if not func: + raise NotImplementedError( + f'Could not find signature for {self.name}: <{str_signature(types)}>') from e + self._cache[types] = func + try: + return func(*args, **kwargs) + + except MDNotImplementedError as e: + funcs = self.dispatch_iter(*types) + next(funcs) # burn first + for func in funcs: + try: + return func(*args, **kwargs) + except MDNotImplementedError: + pass + + raise NotImplementedError( + "Matching functions for " + f"{self.name}: <{str_signature(types)}> found, but none completed successfully",) from e + + def __str__(self): + return f"" + __repr__ = __str__ + + def dispatch(self, *types): + """Determine appropriate implementation for this type signature + This method is internal. Users should call this object as a function. + Implementation resolution occurs within the ``__call__`` method. + >>> # xdoctest: +SKIP + >>> from multipledispatch import dispatch + >>> @dispatch(int) + ... def inc(x): + ... return x + 1 + >>> implementation = inc.dispatch(int) + >>> implementation(3) + 4 + >>> print(inc.dispatch(float)) + None + See Also: + ``multipledispatch.conflict`` - module to determine resolution order + """ + + if types in self.funcs: + return self.funcs[types] + + try: + return next(self.dispatch_iter(*types)) + except StopIteration: + return None + + def dispatch_iter(self, *types): + + n = len(types) + for signature in self.ordering: + if len(signature) == n and all(map(issubclass, types, signature)): + result = self.funcs[signature] + yield result + elif len(signature) and isvariadic(signature[-1]): + if variadic_signature_matches(types, signature): + result = self.funcs[signature] + yield result + + def resolve(self, types): + """ Determine appropriate implementation for this type signature + .. deprecated:: 0.4.4 + Use ``dispatch(*types)`` instead + """ + warn("resolve() is deprecated, use dispatch(*types)", + DeprecationWarning) + + return self.dispatch(*types) + + def __getstate__(self): + return {'name': self.name, + 'funcs': self.funcs} + + def __setstate__(self, d): + self.name = d['name'] + self.funcs = d['funcs'] + self._ordering = ordering(self.funcs) + self._cache = {} + + @property + def __doc__(self): + docs = [f"Multiply dispatched method: {self.name}"] + + if self.doc: + docs.append(self.doc) + + other = [] + for sig in self.ordering[::-1]: + func = self.funcs[sig] + if func.__doc__: + s = f'Inputs: <{str_signature(sig)}>\n' + s += '-' * len(s) + '\n' + s += func.__doc__.strip() + docs.append(s) + else: + other.append(str_signature(sig)) + + if other: + docs.append('Other signatures:\n ' + '\n '.join(other)) + + return '\n\n'.join(docs) + + def _help(self, *args): + return self.dispatch(*map(type, args)).__doc__ + + def help(self, *args, **kwargs): + """ Print docstring for the function corresponding to inputs """ + print(self._help(*args)) + + def _source(self, *args): + func = self.dispatch(*map(type, args)) + if not func: + raise TypeError("No function found") + return source(func) + + def source(self, *args, **kwargs): + """ Print source code for the function corresponding to inputs """ + print(self._source(*args)) + + +def source(func): + s = f'File: {inspect.getsourcefile(func)}\n\n' + s = s + inspect.getsource(func) + return s + + +class MethodDispatcher(Dispatcher): + """ Dispatch methods based on type signature + See Also: + Dispatcher + """ + __slots__ = ('obj', 'cls') + + @classmethod + def get_func_params(cls, func): + if hasattr(inspect, "signature"): + sig = inspect.signature(func) + return itl.islice(sig.parameters.values(), 1, None) + + def __get__(self, instance, owner): + self.obj = instance + self.cls = owner + return self + + def __call__(self, *args, **kwargs): + types = tuple([type(arg) for arg in args]) + func = self.dispatch(*types) + if not func: + raise NotImplementedError(f'Could not find signature for {self.name}: <{str_signature(types)}>') + return func(self.obj, *args, **kwargs) + + +def str_signature(sig): + """ String representation of type signature + >>> str_signature((int, float)) + 'int, float' + """ + return ', '.join(cls.__name__ for cls in sig) + + +def warning_text(name, amb): + """ The text for ambiguity warnings """ + text = f"\nAmbiguities exist in dispatched function {name}\n\n" + text += "The following signatures may result in ambiguous behavior:\n" + for pair in amb: + text += "\t" + \ + ', '.join('[' + str_signature(s) + ']' for s in pair) + "\n" + text += "\n\nConsider making the following additions:\n\n" + text += '\n\n'.join(['@dispatch(' + str_signature(super_signature(s)) + + f')\ndef {name}(...)' for s in amb]) + return text diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/utils.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..4b5ec2ed63152e240ccb94935c96b25ad8b66093 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/utils.py @@ -0,0 +1,125 @@ +from collections import OrderedDict + +__all__ = ["raises", "expand_tuples", "reverse_dict", "groupby", "typename"] + +def raises(err, lamda): + try: + lamda() + return False + except err: + return True + + +def expand_tuples(L): + """ + >>> expand_tuples([1, (2, 3)]) + [(1, 2), (1, 3)] + >>> expand_tuples([1, 2]) + [(1, 2)] + """ + if not L: + return [()] + elif not isinstance(L[0], tuple): + rest = expand_tuples(L[1:]) + return [(L[0],) + t for t in rest] + else: + rest = expand_tuples(L[1:]) + return [(item,) + t for t in rest for item in L[0]] + + +# Taken from theano/theano/gof/sched.py +# Avoids licensing issues because this was written by Matthew Rocklin +def _toposort(edges): + """ Topological sort algorithm by Kahn [1] - O(nodes + vertices) + inputs: + edges - a dict of the form {a: {b, c}} where b and c depend on a + outputs: + L - an ordered list of nodes that satisfy the dependencies of edges + >>> _toposort({1: (2, 3), 2: (3, )}) + [1, 2, 3] + >>> # Closely follows the wikipedia page [2] + >>> # [1] Kahn, Arthur B. (1962), "Topological sorting of large networks", + >>> # Communications of the ACM + >>> # [2] http://en.wikipedia.org/wiki/Toposort#Algorithms + """ + incoming_edges = reverse_dict(edges) + incoming_edges = OrderedDict((k, set(val)) + for k, val in incoming_edges.items()) + S = OrderedDict.fromkeys(v for v in edges if v not in incoming_edges) + L = [] + + while S: + n, _ = S.popitem() + L.append(n) + for m in edges.get(n, ()): + assert n in incoming_edges[m] + incoming_edges[m].remove(n) + if not incoming_edges[m]: + S[m] = None + if any(incoming_edges.get(v, None) for v in edges): + raise ValueError("Input has cycles") + return L + + +def reverse_dict(d): + """Reverses direction of dependence dict + >>> d = {'a': (1, 2), 'b': (2, 3), 'c':()} + >>> reverse_dict(d) # doctest: +SKIP + {1: ('a',), 2: ('a', 'b'), 3: ('b',)} + :note: dict order are not deterministic. As we iterate on the + input dict, it make the output of this function depend on the + dict order. So this function output order should be considered + as undeterministic. + """ + result = OrderedDict() # type: ignore[var-annotated] + for key in d: + for val in d[key]: + result[val] = result.get(val, tuple()) + (key, ) + return result + + +# Taken from toolz +# Avoids licensing issues because this version was authored by Matthew Rocklin +def groupby(func, seq): + """ Group a collection by a key function + >>> names = ['Alice', 'Bob', 'Charlie', 'Dan', 'Edith', 'Frank'] + >>> groupby(len, names) # doctest: +SKIP + {3: ['Bob', 'Dan'], 5: ['Alice', 'Edith', 'Frank'], 7: ['Charlie']} + >>> iseven = lambda x: x % 2 == 0 + >>> groupby(iseven, [1, 2, 3, 4, 5, 6, 7, 8]) # doctest: +SKIP + {False: [1, 3, 5, 7], True: [2, 4, 6, 8]} + See Also: + ``countby`` + """ + + d = OrderedDict() # type: ignore[var-annotated] + for item in seq: + key = func(item) + if key not in d: + d[key] = list() + d[key].append(item) + return d + + +def typename(type): + """Get the name of `type`. + Parameters + ---------- + type : Union[Type, Tuple[Type]] + Returns + ------- + str + The name of `type` or a tuple of the names of the types in `type`. + Examples + -------- + >>> typename(int) + 'int' + >>> typename((int, float)) + '(int, float)' + """ + try: + return type.__name__ + except AttributeError: + if len(type) == 1: + return typename(*type) + return f"({', '.join(map(typename, type))})" diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/variadic.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/variadic.py new file mode 100644 index 0000000000000000000000000000000000000000..0f046ba55bd324b39fdfa4be3b943ae4c5c8c1d7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/multipledispatch/variadic.py @@ -0,0 +1,91 @@ +from .utils import typename + +__all__ = ["VariadicSignatureType", "isvariadic", "VariadicSignatureMeta", "Variadic"] + +class VariadicSignatureType(type): + # checking if subclass is a subclass of self + def __subclasscheck__(cls, subclass): + other_type = (subclass.variadic_type if isvariadic(subclass) + else (subclass,)) + return subclass is cls or all( + issubclass(other, cls.variadic_type) for other in other_type # type: ignore[attr-defined] + ) + + def __eq__(cls, other): + """ + Return True if other has the same variadic type + Parameters + ---------- + other : object (type) + The object (type) to check + Returns + ------- + bool + Whether or not `other` is equal to `self` + """ + return (isvariadic(other) and + set(cls.variadic_type) == set(other.variadic_type)) # type: ignore[attr-defined] + + def __hash__(cls): + return hash((type(cls), frozenset(cls.variadic_type))) # type: ignore[attr-defined] + + +def isvariadic(obj): + """Check whether the type `obj` is variadic. + Parameters + ---------- + obj : type + The type to check + Returns + ------- + bool + Whether or not `obj` is variadic + Examples + -------- + >>> # xdoctest: +SKIP + >>> isvariadic(int) + False + >>> isvariadic(Variadic[int]) + True + """ + return isinstance(obj, VariadicSignatureType) + + +class VariadicSignatureMeta(type): + """A metaclass that overrides ``__getitem__`` on the class. This is used to + generate a new type for Variadic signatures. See the Variadic class for + examples of how this behaves. + """ + def __getitem__(cls, variadic_type): + if not (isinstance(variadic_type, (type, tuple)) or type(variadic_type)): + raise ValueError("Variadic types must be type or tuple of types" + " (Variadic[int] or Variadic[(int, float)]") + + if not isinstance(variadic_type, tuple): + variadic_type = variadic_type, + return VariadicSignatureType( + f'Variadic[{typename(variadic_type)}]', + (), + dict(variadic_type=variadic_type, __slots__=()) + ) + + +class Variadic(metaclass=VariadicSignatureMeta): + """A class whose getitem method can be used to generate a new type + representing a specific variadic signature. + Examples + -------- + >>> # xdoctest: +SKIP + >>> Variadic[int] # any number of int arguments + + >>> Variadic[(int, str)] # any number of one of int or str arguments + + >>> issubclass(int, Variadic[int]) + True + >>> issubclass(int, Variadic[(int, str)]) + True + >>> issubclass(str, Variadic[(int, str)]) + True + >>> issubclass(float, Variadic[(int, str)]) + False + """ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/utils.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d74799a714c5d04035670ca53bca30751ac76a40 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/utils.py @@ -0,0 +1,105 @@ +__all__ = ["hashable", "transitive_get", "raises", "reverse_dict", "xfail", "freeze"] +def hashable(x): + try: + hash(x) + return True + except TypeError: + return False + + +def transitive_get(key, d): + """ Transitive dict.get + >>> d = {1: 2, 2: 3, 3: 4} + >>> d.get(1) + 2 + >>> transitive_get(1, d) + 4 + """ + while hashable(key) and key in d: + key = d[key] + return key + + +def raises(err, lamda): + try: + lamda() + return False + except err: + return True + + +# Taken from theano/theano/gof/sched.py +# Avoids licensing issues because this was written by Matthew Rocklin +def _toposort(edges): + """ Topological sort algorithm by Kahn [1] - O(nodes + vertices) + inputs: + edges - a dict of the form {a: {b, c}} where b and c depend on a + outputs: + L - an ordered list of nodes that satisfy the dependencies of edges + >>> # xdoctest: +SKIP + >>> _toposort({1: (2, 3), 2: (3, )}) + [1, 2, 3] + Closely follows the wikipedia page [2] + [1] Kahn, Arthur B. (1962), "Topological sorting of large networks", + Communications of the ACM + [2] http://en.wikipedia.org/wiki/Toposort#Algorithms + """ + incoming_edges = reverse_dict(edges) + incoming_edges = {k: set(val) for k, val in incoming_edges.items()} + S = ({v for v in edges if v not in incoming_edges}) + L = [] + + while S: + n = S.pop() + L.append(n) + for m in edges.get(n, ()): + assert n in incoming_edges[m] + incoming_edges[m].remove(n) + if not incoming_edges[m]: + S.add(m) + if any(incoming_edges.get(v, None) for v in edges): + raise ValueError("Input has cycles") + return L + + +def reverse_dict(d): + """Reverses direction of dependence dict + >>> d = {'a': (1, 2), 'b': (2, 3), 'c':()} + >>> reverse_dict(d) # doctest: +SKIP + {1: ('a',), 2: ('a', 'b'), 3: ('b',)} + :note: dict order are not deterministic. As we iterate on the + input dict, it make the output of this function depend on the + dict order. So this function output order should be considered + as undeterministic. + """ + result = {} # type: ignore[var-annotated] + for key in d: + for val in d[key]: + result[val] = result.get(val, tuple()) + (key, ) + return result + + +def xfail(func): + try: + func() + raise Exception("XFailed test passed") # pragma:nocover + except Exception: + pass + + +def freeze(d): + """ Freeze container to hashable form + >>> freeze(1) + 1 + >>> freeze([1, 2]) + (1, 2) + >>> freeze({1: 2}) # doctest: +SKIP + frozenset([(1, 2)]) + """ + if isinstance(d, dict): + return frozenset(map(freeze, d.items())) + if isinstance(d, set): + return frozenset(map(freeze, d)) + if isinstance(d, (tuple, list)): + return tuple(map(freeze, d)) + return d diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/variable.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/variable.py new file mode 100644 index 0000000000000000000000000000000000000000..8f7efda3328b0fd3dfbbc452347100bf3db8d506 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/variable.py @@ -0,0 +1,85 @@ +from contextlib import contextmanager +from .utils import hashable +from .dispatch import dispatch + +_global_logic_variables = set() # type: ignore[var-annotated] +_glv = _global_logic_variables + + +class Var: + """ Logic Variable """ + + _id = 1 + + def __new__(cls, *token): + if len(token) == 0: + token = f"_{Var._id}" # type: ignore[assignment] + Var._id += 1 + elif len(token) == 1: + token = token[0] + + obj = object.__new__(cls) + obj.token = token # type: ignore[attr-defined] + return obj + + def __str__(self): + return "~" + str(self.token) # type: ignore[attr-defined] + __repr__ = __str__ + + def __eq__(self, other): + return type(self) == type(other) and self.token == other.token # type: ignore[attr-defined] + + def __hash__(self): + return hash((type(self), self.token)) # type: ignore[attr-defined] + + +def var(): + return lambda *args: Var(*args) + + +def vars(): + return lambda n: [var() for i in range(n)] + + +@dispatch(Var) +def isvar(v): + return True + +isvar + + +@dispatch(object) # type: ignore[no-redef] +def isvar(o): + return not not _glv and hashable(o) and o in _glv + + +@contextmanager +def variables(*variables): + """ + Context manager for logic variables + + Example: + >>> # xdoctest: +SKIP("undefined vars") + >>> from __future__ import with_statement + >>> with variables(1): + ... print(isvar(1)) + True + >>> print(isvar(1)) + False + >>> # Normal approach + >>> from unification import unify + >>> x = var('x') + >>> unify(x, 1) + {~x: 1} + >>> # Context Manager approach + >>> with variables('x'): + ... print(unify('x', 1)) + {'x': 1} + """ + old_global_logic_variables = _global_logic_variables.copy() + _global_logic_variables.update(set(variables)) + try: + yield + finally: + _global_logic_variables.clear() + _global_logic_variables.update(old_global_logic_variables) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unify_refinements.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unify_refinements.py new file mode 100644 index 0000000000000000000000000000000000000000..532d2784fb49ae4cd798b2a0706d82b8151a08de --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unify_refinements.py @@ -0,0 +1,120 @@ +from torch.fx.experimental.graph_gradual_typechecker import Refine +from torch.fx.tensor_type import TensorType +from torch.fx.experimental.unification import Var, unify # type: ignore[attr-defined] + + +def infer_symbolic_types_single_pass(traced): + """ + Calls our symbolic inferencer once. + """ + r = Refine(traced) + r.refine() + mgu = unify_eq(r.constraints) + substitute_all_types(traced.graph, mgu) + +def infer_symbolic_types(traced): + """ + Calls our symbolic inferencer twice. + This is useful when one pass is not enough + to infer all the information such as the case + for braodcasting. + """ + r = Refine(traced) + r.refine() + mgu = unify_eq(r.constraints) + substitute_all_types(traced.graph, mgu) + + r = Refine(traced) + r.refine() + mgu = unify_eq(r.constraints) + substitute_all_types(traced.graph, mgu) + + r.symbolic_relations() + +def convert_eq(list_of_eq): + """ + Convert equality constraints in the right format + to be used by unification library. + """ + lhs = [] + rhs = [] + for eq in list_of_eq: + lhs.append(eq.lhs) + rhs.append(eq.rhs) + return tuple(lhs), tuple(rhs) + + +def unify_eq(list_of_eq): + """ + Apply unification to a set of + equality constraints + """ + lhs, rhs = convert_eq(list_of_eq) + return unify(lhs, rhs) + + +def substitute_solution_one_type(mapping, t): + """ + Apply the most general unifier to a type + """ + if isinstance(t, Var): + if t in mapping.keys(): + return mapping[t] + else: + return t + + elif isinstance(t, TensorType): + new_type = [] + for typ in t.__args__: + if typ in mapping.keys(): + new_type.append(mapping[typ]) + else: + new_type.append(typ) + return TensorType(tuple(new_type)) + + elif isinstance(t, list): + new_type = [] + for typ in t: + new_type.append(substitute_solution_one_type(mapping, typ)) + return new_type + + elif isinstance(t, tuple): + new_type = [] + for typ in t: + new_type.append(substitute_solution_one_type(mapping, typ)) + return tuple(new_type) + + else: + return t + + +def substitute_all_types(graph, mapping): + """ + Apply the most general unifier to all types in a graph + till reaching a fixed point. If the input and output graph + are the same, we converge. + """ + flag = True + while flag: + flag = False + for k in mapping: + old_mapping_val = mapping[k] + if mapping[k] in mapping.keys(): + new_key = mapping[k] + mapping[k] = mapping[new_key] + if old_mapping_val != mapping[k]: + flag = True + + for n in graph.nodes: + n.type = substitute_solution_one_type(mapping, n.type) + +def check_for_type_equality(g1, g2): + """ + A check equality to be used in fixed points. + We do not use graph equality but instead type + equality. + """ + for n, m in zip(g1.nodes, g2.nodes): + if n.type != m.type: + return False + return True diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/validator.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/validator.py new file mode 100644 index 0000000000000000000000000000000000000000..48ad07dd8559c545630fdd32f3c3c833c6686aa7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/validator.py @@ -0,0 +1,745 @@ +import functools +import logging +import math +import operator +import sympy + +from dataclasses import dataclass +from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Type, Union + +import torch +import torch.fx +import torch.fx.traceback as fx_traceback + +from torch._dynamo.exc import TorchDynamoException +from torch.fx.node import Argument, Target +from torch.utils._sympy.interp import sympy_interp + +log = logging.getLogger(__name__) + +try: + import z3 # type: ignore[import] + + # Translation Validation for Dynamo guards + # ======================================== + # + # Checks whether optimizations applied to the collected guards are + # valid. In other words, whether the guard function we actually run + # does not have false positives (unsound). + # + # In order to do so, we build the guards using 2 different information + # attached to each 'SymNode': + # 1. SymPy expressions + # 2. FX nodes + # + # SymPy expressions have implicit optimizations baked within itself, + # which may have a few bugs. On the other hand, we build the FX graph + # manually, with no optimizations enabled. This gives us access to + # the "ground truth". + # + # We then convert into Z3 expressions both the SymPy expressions + # (see [Note: SympyToZ3]) that reach 'ShapeEnv.produce_guards' function + # and the FX nodes (see [Note: PopulateValidator]) that go through + # 'ShapeEnv.evaluate_expr' function. Finally, we run the validation. + # (see [Note: TranslationValidator]) + + # Better Z3 to string implementation (for a small fraction of Z3). + # + # Here are the things we clean before showing the Z3 expression: + # - Rename a few ops (e.g. "Distinct" ==> "!=") + # + # - Ignore ToInt and ToReal operations: + # usually they don't really matter + # + # - Transform (ToInt (/ ...)) into (idiv ...): + # this is the pattern for floor division + # + # - Collect a chain of the same operations into one + def z3str(e: z3.ExprRef) -> str: + assert z3.is_expr(e), f"unsupported expression type: {e}" + + def get_args_str(e: z3.ExprRef) -> List[str]: + return [z3str(e.arg(i)) for i in range(e.num_args())] + + # First, we simplify the given expression. + # This is done using rewriting rules, so shouldn't take long. + e = z3.simplify(e) + + + # Only support function applications. + # Even Z3 "variables" are, in fact, function applications. + if not z3.is_app(e): + raise ValueError(f"can't print Z3 expression: {e}") + + if z3.is_int_value(e) or z3.is_rational_value(e): + return e.as_string() # type: ignore[attr-defined] + + decl = e.decl() + kind = decl.kind() + op = str(decl) + args = get_args_str(e) + + if kind == z3.Z3_OP_POWER: + op = "pow" + + elif kind in (z3.Z3_OP_ADD, z3.Z3_OP_MUL): + # Collect the arguments of chains of ADD and MUL. + # This is safe, since they are associative. + + def collect_str_args(e): + if not (z3.is_app(e) and e.decl().kind() == kind): + return [z3str(e)] + else: + return [ + x + for i in range(e.num_args()) + for x in collect_str_args(e.arg(i)) + ] + + args = collect_str_args(e) + + elif kind == z3.Z3_OP_NOT: + # Revert some conversions that z3.simplify applies: + # - a != b ==> (Not (== a b)) ==> (!= a b) + # - a < b ==> (Not (<= b a)) ==> (> b a) + # - a > b ==> (Not (<= a b)) ==> (> a b) + + assert e.num_args() == 1 + arg = e.arg(0) + + assert z3.is_app(arg) + argkind = arg.decl().kind() + + logic_inverse = { + z3.Z3_OP_EQ: "!=", + z3.Z3_OP_LE: ">", + z3.Z3_OP_GE: "<", + } + + if argkind in logic_inverse: + op = logic_inverse[argkind] + args = get_args_str(arg) + + elif kind in (z3.Z3_OP_TO_INT, z3.Z3_OP_TO_REAL): + assert e.num_args() == 1 + argstr = z3str(e.arg(0)) + + # Check if it's the floor division pattern. + if argstr.startswith("(/"): + return "(idiv" + argstr[2:] + + # Otherwise, just ignore it. + return argstr + + elif kind == z3.Z3_OP_UNINTERPRETED: + assert e.num_args() == 0 + return str(decl) + + string = op + " " + " ".join(args) + return f"({string.rstrip()})" + + # Implementation of Python semantics as Z3 expressions. + # + # Z3 Real-Int theory has operators with semantics that differ that of + # Python. Therefore, in order to get it right, we need to implement + # the (Python) semantics we are relying on in Z3. + @dataclass + class _Z3Ops: + # Validator used for adding assertions as needed. + # e.g. div(a, b) requires b != 0. + validator: "TranslationValidator" + + # The 2 functions below are used for conditionally casting between + # integer and reals. + # + # Returns a real expression from 'x'. + @staticmethod + def to_real(x: z3.ArithRef) -> z3.ArithRef: + return x if x.is_real() else z3.ToReal(x) + + # Returns an integer expression from 'x'. + @staticmethod + def to_int(x: z3.ArithRef) -> z3.ArithRef: + return x if x.is_int() else z3.ToInt(x) + + # Implements Python division semantics. + def div(self, numerator: z3.ArithRef, denominator: z3.ArithRef) -> z3.ArithRef: + self.validator.add_assertion(denominator != 0) # type: ignore[arg-type] + return _Z3Ops.to_real(numerator) / _Z3Ops.to_real(denominator) + + def floor(self, number: z3.ArithRef) -> z3.ArithRef: + # Z3 ToInt function rounds a real number towards negative infinity. + return _Z3Ops.to_int(number) + + # Python semantics for 'FloorDiv' states that before applying the floor + # function, the operands are converted to their common type. + def floordiv(self, numerator: z3.ArithRef, denominator: z3.ArithRef) -> z3.ArithRef: + cast_result_to_real = numerator.is_real() or denominator.is_real() + result = _Z3Ops.to_int(self.div(numerator, denominator)) + # Since the 'result' is already an integer, we just have to check + # whether we should cast it to real. + return _Z3Ops.to_real(result) if cast_result_to_real else result + + def ceil(self, number: z3.ArithRef) -> z3.ArithRef: + return z3.If( + self.floor(number) < number, + self.floor(number + 1), + number + ) # type: ignore[return-value] + + def max(self, a: z3.ArithRef, b: z3.ArithRef) -> z3.ArithRef: + return z3.If(a > b, a, b) # type: ignore[return-value] + + def min(self, a: z3.ArithRef, b: z3.ArithRef) -> z3.ArithRef: + return z3.If(a < b, a, b) # type: ignore[return-value] + + # Python semantics for 'Mod' is defined as: p % q = p - floordiv(p, q) * q + # It should work with both integer and reals. + def mod(self, p: z3.ArithRef, q: z3.ArithRef) -> z3.ArithRef: + return p - self.floordiv(p, q) * q + + def pow(self, base: z3.ArithRef, exp: z3.ArithRef) -> z3.ArithRef: + # Z3 can't handle complex numbers very well. + self.validator.add_assertion(z3.Or(base != 0, exp > 0)) # type: ignore[arg-type] + return base ** exp + + def sqrt(self, number: z3.ArithRef) -> z3.ArithRef: + # Square-root: + # 1. Only work with reals + number = _Z3Ops.to_real(number) + # 2. The number should be positive or zero. + # Otherwise, Z3 returns 'unknown'. + self.validator.add_assertion(number >= 0) + return number ** 0.5 + + def abs(self, number: z3.ArithRef) -> z3.ArithRef: + return z3.Abs(number) + + # Lifts a callable to be used in Z3. + # + # This function replaces the given 'op' by a function that: + # + # 1. Lifts the arguments into Z3 (i.e. make them inhabitants of Z3) + # + # 2. Calls an operation that corresponds to 'op', but works with Z3 + # inhabitants (left as is if it works as is) + def z3op(op: Callable, validator: "TranslationValidator") -> Callable: + from torch.fx.experimental.sym_node import sym_sqrt + + # Operations that have booleans as their argument. + # This is needed because the argument of some FX nodes were + # literal integers, instead of booleans. So, whenever this flag + # is set, we also convert ints to booleans. + boolean_ops = {operator.not_, operator.and_, operator.or_} + as_bool = op in boolean_ops + + # Lifts the function into 'z3.ExprRef' domain. + def lift(func): + def wrap(a) -> z3.ExprRef: + if isinstance(a, (z3.ArithRef, z3.BoolRef)): + return a + # Convert it into a Z3 value, if it is some of the supported + # types below. + if isinstance(a, bool) or (as_bool and isinstance(a, int)): + return z3.BoolVal(bool(a)) + if isinstance(a, (int, sympy.Integer)): + return z3.IntVal(int(a)) + if isinstance(a, (float, sympy.Float)): + return z3.RealVal(float(a)) + raise ValueError(f"can't lift type: {type(a)}") + + @functools.wraps(func) + def wrapper(*args): + # Lifts the arguments into a list of Z3 inhabitants. + wrapped_args = (wrap(a) for a in args) + # Run the function on the Z3 expressions. + return func(*wrapped_args) + + return wrapper + + ops = _Z3Ops(validator) + replacement_map = { + # Operator module. + operator.not_: lift(z3.Not), + operator.and_: lift(z3.And), + operator.or_: lift(z3.Or), + operator.floordiv: lift(ops.floordiv), + operator.truediv: lift(ops.div), + operator.mod: lift(ops.mod), + operator.abs: lift(ops.abs), + + # Math module. + math.ceil: lift(ops.ceil), + math.floor: lift(ops.floor), + + # Torch module. + torch.sym_float: lift(ops.to_real), + torch.sym_max: lift(ops.max), + torch.sym_min: lift(ops.min), + torch.sym_ite: lift(lambda b, t, f: t if b else f), + sym_sqrt: lift(ops.sqrt), + # Not lifted because we only use this function as a + # marker for adding the expression as validator input. + torch._assert: torch._assert, + } + return replacement_map[op] if op in replacement_map else lift(op) + + # Processes an FX graph, populating the given validator. + # + # [Note: PopulateValidator] + # This class walks through each node in the FX graph, translating + # them into the Z3 world. + # + # Then, whenever it finds an 'torch._assert' call_function operation, + # it adds the Z3 expression corresponding to the argument as validator + # input. + class PopulateValidator(torch.fx.Interpreter): + def __init__(self, graph: torch.fx.Graph, validator: "TranslationValidator"): + # Reference to the translation validator. + self.validator = validator + + # Build the graph module and call `Interpreter` constructor. + module = torch.fx.GraphModule(root={}, graph=graph) + super().__init__(module, garbage_collect_values=True) + + def placeholder(self, target: Target, args: Tuple[Argument, ...], kwargs: Dict[str, Any]) -> Any: + symbol = fx_traceback.get_current_meta()["symbol"] + return self.validator.z3var(symbol) + + def call_function(self, target: Target, args: Tuple[Argument, ...], kwargs: Dict[str, Any]) -> Any: + if target != torch._assert: + # Actually runs the node target function (which is already + # lifted) with its arguments. + return super().call_function(target, args, kwargs) + # Adds the Z3 expression corresponding to the first argument + # as a validator input. + assert len(args) == 1, f"expected 1 argument on assertion. Got: {len(args)} " + self.validator.add_source_expr(args[0]) # type: ignore[arg-type] + + # Translates SymPy expressions into Z3 expressions. + # + # [Note: SympyToZ3] + # At the time of the translation, all free variables present in the + # SymPy expression being translated must be already mapped to a Z3 + # integer variable. + class SympyToZ3: + OPERATOR_HANDLES = {"add", "mul", "eq", "ne", "lt", "gt", "le", "ge"} + + def __init__( + self, + validator: "TranslationValidator", + ) -> None: + self._validator = validator + self._ops = _Z3Ops(self._validator) + + def constant(self, value: Any, dtype: torch.dtype) -> z3.ExprRef: + if dtype is torch.int64: + return z3.IntVal(int(value)) + if dtype is torch.double: + return z3.RealVal(float(value)) + if dtype is torch.bool: + return z3.BoolVal(bool(value)) + raise ValueError(f"unsupported dtype (SympyToZ3): {dtype}") + + def truediv(self, numerator: z3.ArithRef, denominator: z3.ArithRef) -> z3.ArithRef: + return self._ops.div(numerator, denominator) + + def floordiv(self, numerator: z3.ArithRef, denominator: z3.ArithRef) -> z3.ArithRef: + return self._ops.floordiv(numerator, denominator) + + def div(self, numerator: z3.ArithRef, denominator: z3.ArithRef) -> z3.ArithRef: + return self._ops.floordiv(numerator, denominator) + + def pow(self, base: z3.ArithRef, exp: z3.ArithRef) -> z3.ArithRef: + return self._ops.pow(base, exp) + + def mod(self, p: z3.ArithRef, q: z3.ArithRef) -> z3.ArithRef: + return self._ops.mod(p, q) + + def __getattr__(self, name: str) -> Any: + REPLACEMENT = { + "and_": z3.And, + "or_": z3.Or, + "not_": z3.Not, + "floor": self._ops.floor, + "ceil": self._ops.ceil, + "minimum": self._ops.min, + "maximum": self._ops.max, + } + + if name in REPLACEMENT: + return REPLACEMENT[name] + if name in self.OPERATOR_HANDLES: + return getattr(operator, name) + raise AttributeError(f"unhandled operator: {name}") + + def run(self, expr: sympy.Basic) -> z3.ExprRef: + return sympy_interp(self, self._validator.symbols, expr) # type: ignore[arg-type] + + # Dynamo guards translation validator. + # + # [Note: TranslationValidator] + # Verifies whether the guards issued by 'ShapeEnv.produce_guards' are sound. + # That is: whether those (target) guards only yield TRUE whenever the original, + # unoptimized, (source) guards yield TRUE. + # + # More concretely, given 'source' and 'target' guard expressions, we wish to + # check whether the following expression holds: + # + # Not(And(source)) AND And(target) + # + # i.e. whether there is an assignment of the free variables where the opposite + # happens: target is TRUE, but source is FALSE. + class TranslationValidator: + def __init__(self) -> None: + log.debug("new instance") + + # Mapping of SymPy symbols to Z3 variables. + self.symbols: Dict[sympy.Symbol, z3.ExprRef] = {} + + # Set of source Z3 expressions. + # They represent the generated guards without any kind of + # simplification or transformation. + self._source_exprs: Set[z3.BoolRef] = set() + + # Set of target Z3 expressions. + # They represent the actual checked guards at runtime. They might + # be simplified or transformed versions of the source guards. + self._target_exprs: Set[z3.BoolRef] = set() + + # Set of Z3 expressions representing assertions over both the + # source and target expressions. + self._assertions: Set[z3.BoolRef] = set() + + # Retrieves the corresponding Z3 variable. + def z3var(self, symbol: sympy.Symbol) -> z3.ExprRef: + assert symbol in self.symbols, f"Z3 variable not found for: {symbol}" + return self.symbols[symbol] + + # Create a variable in Z3 of 'type' for 'symbol', if it doesn't already exists. + def add_var(self, symbol: sympy.Symbol, type: Type) -> z3.ExprRef: + if symbol in self.symbols: + return self.symbols[symbol] + + log.debug("new variable: %s (%s)", symbol.name, type.__name__) + + if type is int: + var = z3.Int(symbol.name) + + # If 'symbol' is positive (SymPy assumption), we have to + # convey it to Z3 as well. + if symbol.is_positive: # type: ignore[attr-defined] + self._target_exprs.add(var > 0) + elif type is float: + var = z3.Real(symbol.name) + elif type is bool: + var = z3.Bool(symbol.name) + else: + raise RuntimeError(f"unsupported type for Z3 variable: {type}") + + self.symbols[symbol] = var + return var + + # Checks whether all symbols were already added. + def _check_freesymbols(self, e: sympy.Basic) -> None: + for s in e.free_symbols: + assert isinstance(s, sympy.Symbol) + # Call 'z3var' just to check whether there's already a + # Z3 variable corresponding to 's'. + self.z3var(s) + + + def to_z3_boolean_expr(self, e: sympy.Basic) -> z3.BoolRef: + z3expr = SympyToZ3(self).run(e) + assert isinstance(z3expr, z3.BoolRef), f"expected boolean expression. Got: {z3expr}" + return z3expr + + def add_source_expr(self, e: z3.BoolRef) -> None: + if e not in self._source_exprs: + log.debug("add source guard: %s", z3str(e)) + self._source_exprs.add(e) + + def add_target_expr(self, e: sympy.Expr) -> None: + self._check_freesymbols(e) + z3expr = self.to_z3_boolean_expr(e) + if e not in self._target_exprs: + log.debug("add target guard: %s", z3str(z3expr)) + self._target_exprs.add(z3expr) + + def add_assertion(self, e: Union[z3.BoolRef, sympy.Basic]) -> None: + if isinstance(e, sympy.Basic): + self._check_freesymbols(e) + ref = self.to_z3_boolean_expr(e) + else: + ref = e + assert isinstance(ref, z3.BoolRef) + if ref not in self._assertions: + log.debug("add assertion: %s", z3str(ref)) + self._assertions.add(ref) + + def validate(self) -> None: + from torch._dynamo.utils import dynamo_timed + + if len(self._source_exprs) == 0 or len(self._target_exprs) == 0: + # If there are no source/target expressions, there's nothing we really + # wish to prove. So, we just return. + return None + + # Here, we use "QF_NRA" logic for the solver: + # "Quantifier-free Non-linear Real Arithmetic". + # + # Most of the guards expressions have: + # 1. arithmetic between integer and reals + # 2. no quantifiers + # 3. potentially non-linear. + # + # Although there's also "QF_NIRA" (mixed integer-real arithmetic), + # "QF_NRA" seems to work better on 'dynamo/test_dynamic_shapes.py'. + solver = z3.SolverFor("QF_NRA") + # Set a timeout for finding a solution. + solver.set(timeout=translation_validation_timeout()) + + # Add all the assertions to the solver. + for assertion in self._assertions: + solver.add(assertion) + + # "Is there any case where it's TRUE for the target expressions, + # but FALSE for the source expressions?" + solver.add(z3.Not(z3.And(*self._source_exprs))) + solver.add(*self._target_exprs) + + log.debug("translation validation: start") + r = dynamo_timed()(solver.check)() + if r == z3.sat: + # Target expressions are unsound. + # Log the found model and the source expressions that failed. + model = solver.model() + raise ValidationException( + model, self._assertions, self._target_exprs, + failed_source_exprs=[ + inp for inp in self._source_exprs if not model.evaluate(inp) + ] + ) + else: + if r == z3.unknown: + # Could not find a solution. It didn't fail, but it also + # didn't succeed. Canceling the validation execution (keyboard + # interrupt) also gets to this branch. + log.warning("translation validation: could not validate: got z3.unknown") + else: + # Target expressions are sound. + assert r == z3.unsat + log.debug("translation validation: success") + +except ImportError: + _HAS_Z3 = False + + __all__ = [ + "translation_validation_enabled", "translation_validation_timeout", + "ValidationException", "BisectValidationException", + ] + +else: + _HAS_Z3 = True + + __all__ = [ + "z3str", "z3op", "PopulateValidator", "SympyToZ3", "TranslationValidator", + "translation_validation_enabled", "translation_validation_timeout", + "ValidationException", "BisectValidationException", + ] + +from torch.fx.experimental import _config as config + +def translation_validation_enabled() -> bool: + # Checks everytime this function is called, in case the Dynamo + # option is set, but Z3 is not installed. + _assert_z3_installed_if_tv_set() + return _HAS_Z3 and config.translation_validation + + +def translation_validation_timeout() -> int: + return config.translation_validation_timeout + + +def _assert_z3_installed_if_tv_set(): + assert _HAS_Z3 or not config.translation_validation, ( + "translation validation requires Z3 package. Please, either install " + "z3-solver or disable translation validation." + ) + + +class ValidationException(TorchDynamoException): + def __init__(self, model, assertions, target_exprs, failed_source_exprs): + assert _HAS_Z3 + + def symbolstr(sym) -> str: + return f"{sym}: {model[sym]}" + + def joinlines(xs) -> str: + return "\n".join(f" ==> {x}" for x in xs) + + model_str = joinlines(sorted(map(symbolstr, model))) + assertions_str = joinlines(sorted(map(z3str, assertions))) + target_exprs_str = joinlines(sorted(map(z3str, target_exprs))) + failed_source_exprs_str = joinlines(sorted(map(z3str, failed_source_exprs))) + + self.msg = "translation validation failed." + self.details = f"""\ +Model: +{model_str} + +Assertions: +{assertions_str} + +Target Expressions: +{target_exprs_str} + +Failed Source Expressions: +{failed_source_exprs_str}""" + + def __str__(self): + return f"{self.msg}\n\n{self.details}" + + +class BisectValidationException(TorchDynamoException): + def __init__(self, validation_exc, expr, failed_action, traced_node): + self.msg = f"translation validation failed when {failed_action}: {expr}" + self.details = f"""\ +Failure occurred while running node: + {traced_node.format_node()} + +{validation_exc.details}""" + + def __str__(self): + return f"{self.msg}\n\n{self.details}" + +# Checks when this module is loaded. +_assert_z3_installed_if_tv_set() + +# Translation validation bisection. +# +# Bisect into the torch._assert nodes recorded in the shape_env FX graph, and raise +# the earliest ValidationException. +# +# As guards are added by ShapeEnv.evaluate_expr calls, some simplification errors +# might be silently happening. This function tries to nail down exactly at which +# point things went wrong from a validation perspective. +def bisect(shape_env): + from torch.fx.experimental.symbolic_shapes import ShapeEnv, SHAPEENV_EVENT_KEY, CURRENT_NODE_KEY + from torch.fx.experimental.recording import FakeTensorMeta, ShapeEnvEvent, replay_shape_env_events + + events = shape_env.events + + # Retrieves the ShapeEnvEvent associated with node. + def get_node_event(node: torch.fx.Node) -> ShapeEnvEvent: + assert SHAPEENV_EVENT_KEY in node.meta + return events[node.meta[SHAPEENV_EVENT_KEY]] + + # Creates a new instance of fake, but updating every symbolic value's ShapeEnv + # reference to the one given as argument. + # + # This is needed so as not to simplify a symbolic expression using a ShapeEnv + # "from the future", where it may have a different set of replacements. + def new_with_shape_env(shape_env: ShapeEnv, fake) -> Any: + if isinstance(fake, int): + return fake + if isinstance(fake, torch.SymInt): + return torch.SymInt(fake.node.with_shape_env(shape_env)) + assert isinstance(fake, FakeTensorMeta) + return FakeTensorMeta( + tuple(new_with_shape_env(shape_env, s) for s in fake.size()), + tuple(new_with_shape_env(shape_env, s) for s in fake.stride()), + new_with_shape_env(shape_env, fake.storage_offset()), + fake.is_nested, + ) + + # Checks whether the given shape_env fails when produce_guards is called. + def check_shapeenv_fails(shape_env: ShapeEnv, tracked_fakes: Optional[List[Any]]) -> Optional[ValidationException]: + assert tracked_fakes is not None + try: + # This produce_guards call is a best-effort replication, since we + # don't populate EqualityConstraint list. Reason: we would also have + # to save OutputGraph.tracked_fakes_id_to_source. + shape_env.produce_guards( + [new_with_shape_env(shape_env, a.fake) for a in tracked_fakes], + [a.source for a in tracked_fakes], + constraint_inputs=[a.constraint_dims for a in tracked_fakes], + ) + return None + except ValidationException as e: + return e + + # Checks whether the ShapeEnv reconstructed by replaying the events until + # node is created fails when produce_guards is called. + def check_node_fails(node: torch.fx.Node) -> Optional[ValidationException]: + number = node.meta[SHAPEENV_EVENT_KEY] + # Reconstruct shape_env until the event at event_number. + shape_env = replay_shape_env_events(events[:number + 1]) + shape_env.graph.lint() + return check_shapeenv_fails(shape_env, events[number].tracked_fakes) + + last_exception = check_shapeenv_fails(shape_env, shape_env.snapshot_tracked_fakes()) + + if not last_exception: + # We don't actually fail due to a produce_guards call. + # Stop and don't bisect. + log.info("translation validation succeeded: no errors found.") + return + + if not shape_env.should_record_events or config.translation_validation_no_bisect: + # Bisection is off. + # Return the last ValidationException we got. + raise last_exception + + # Cache the raised exception (if any) at each bisection point. + exception = {} + + # Bisection happens on the assertion nodes of the recorded FX graph for + # dynamic shapes. + assert_nodes = [node for node in shape_env.graph.nodes if node.target == torch._assert] + + # Preparing the indices for binary search. + left, mid, right = 0, 0, len(assert_nodes) - 1 + + while left < right: + mid = (left + right) // 2 + + node = assert_nodes[mid] + log.debug("bisecting at %s: %s", mid, get_node_event(node)) + + # Check whether the new shape_env raises a ValidationException or not. + exception[mid] = check_node_fails(node) + + if exception[mid]: + right = mid + else: + left = mid + 1 + + assert left in exception and isinstance(exception[left], ValidationException) + + node = assert_nodes[left] + event = get_node_event(node) + + if event.is_evaluate_expr(): + failed_action = "evaluating" + else: + assert event.is_defer_runtime_assert(), f"unexpected event type: {event}" + failed_action = "adding runtime assert" + + args = event.args + assert args is not None + assert len(args) >= 2, ( + f"bisecting expects {event.name} to have at least 2 positional arguments. " + f"Got: {len(args)}" + ) + assert isinstance(args[1], sympy.Basic), ( + f"bisecting expects {event.name} to have a SymPy expression as its second argument. " + f"Got: {type(args[1])}" + ) + + raise BisectValidationException( + exception[left], + expr=args[1], + failed_action=failed_action, + traced_node=node.meta[CURRENT_NODE_KEY], + )