diff --git a/env-llmeval/lib/python3.10/site-packages/torch/distributions/__pycache__/chi2.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/distributions/__pycache__/chi2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..276e2d049fbbfb2a602e69199742958f456032ea Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/distributions/__pycache__/chi2.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/distributions/__pycache__/fishersnedecor.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/distributions/__pycache__/fishersnedecor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1d47925172f812d389c14e008aedf24d153accd Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/distributions/__pycache__/fishersnedecor.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/distributions/__pycache__/independent.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/distributions/__pycache__/independent.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a50e4d72c24500abf05686de96d791e9bc7560e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/distributions/__pycache__/independent.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/distributions/__pycache__/lkj_cholesky.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/distributions/__pycache__/lkj_cholesky.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..22b41c6ef5102364b01281b8690974a0975146d7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/distributions/__pycache__/lkj_cholesky.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/distributions/__pycache__/multinomial.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/distributions/__pycache__/multinomial.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e1f9927e6e7fa02cd151d5b3a4a126ba86a2759 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/distributions/__pycache__/multinomial.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/distributions/__pycache__/normal.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/distributions/__pycache__/normal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fcb420cebf5e2fd1dfa423e0136a2511b85e864e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/distributions/__pycache__/normal.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/distributions/__pycache__/poisson.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/distributions/__pycache__/poisson.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6edec8f325292b9a1d994cb75358f5ebcface2c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/distributions/__pycache__/poisson.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/futures/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/futures/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6a398bebb599cb4b005ae76ea318e752dcd99a52 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/futures/__init__.py @@ -0,0 +1,318 @@ +from __future__ import annotations + +from typing import cast, Callable, Generic, List, Optional, Type, TypeVar, Union + +import torch + +__all__ = ['Future', 'collect_all', 'wait_all'] + +T = TypeVar("T") +S = TypeVar("S") + + +class _PyFutureMeta(type(torch._C.Future), type(Generic)): # type: ignore[misc, no-redef] + pass + + +class Future(torch._C.Future, Generic[T], metaclass=_PyFutureMeta): + r""" + Wrapper around a ``torch._C.Future`` which encapsulates an asynchronous + execution of a callable, e.g. :meth:`~torch.distributed.rpc.rpc_async`. It + also exposes a set of APIs to add callback functions and set results. + + .. warning:: GPU support is a beta feature, subject to changes. + """ + + def __init__(self, *, devices: Optional[List[Union[int, str, torch.device]]] = None): + r""" + Create an empty unset ``Future``. If the future is intended to hold + values containing CUDA tensors, (a superset of) their CUDA devices must + be specified at construction. (This is only supported if + ``torch.cuda.is_available()`` returns ``True``). This is needed to + ensure proper CUDA stream synchronization. The child futures, returned + by the ``then`` method, will inherit these devices. + + Args: + devices(``List[Union[int, str, torch.device]]``, optional): the set + of devices on which tensors contained in this future's value are + allowed to reside and on which callbacks are allowed to operate. + """ + if devices is None: + devices = [] + super().__init__([torch.device(d) for d in devices]) + + def done(self) -> bool: + r""" + Return ``True`` if this ``Future`` is done. A ``Future`` is done if it + has a result or an exception. + + If the value contains tensors that reside on GPUs, ``Future.done()`` + will return ``True`` even if the asynchronous kernels that are + populating those tensors haven't yet completed running on the device, + because at such stage the result is already usable, provided one + performs the appropriate synchronizations (see :meth:`wait`). + """ + return super().done() + + def wait(self) -> T: + r""" + Block until the value of this ``Future`` is ready. + + If the value contains tensors that reside on GPUs, then an additional + synchronization is performed with the kernels (executing on the device) + which may be asynchronously populating those tensors. Such sync is + non-blocking, which means that ``wait()`` will insert the necessary + instructions in the current streams to ensure that further operations + enqueued on those streams will be properly scheduled after the async + kernels but, once that is done, ``wait()`` will return, even if those + kernels are still running. No further synchronization is required when + accessing and using the values, as long as one doesn't change streams. + + Returns: + The value held by this ``Future``. If the function (callback or RPC) + creating the value has thrown an error, this ``wait`` method will + also throw an error. + """ + return super().wait() + + def value(self) -> T: + r""" + Obtain the value of an already-completed future. + + This method should only be called after a call to :meth:`wait` has + completed, or inside a callback function passed to :meth:`then`. In + other cases this ``Future`` may not yet hold a value and calling + ``value()`` could fail. + + If the value contains tensors that reside on GPUs, then this method will + *not* perform any additional synchronization. This should be done + beforehand, separately, through a call to :meth:`wait` (except within + callbacks, for which it's already being taken care of by :meth:`then`). + + Returns: + The value held by this ``Future``. If the function (callback or RPC) + creating the value has thrown an error, this ``value()`` method will + also throw an error. + """ + return super().value() + + def then(self, callback: Callable[[Future[T]], S]) -> Future[S]: + r""" + Append the given callback function to this ``Future``, which will be run + when the ``Future`` is completed. Multiple callbacks can be added to + the same ``Future``, but the order in which they will be executed cannot + be guaranteed (to enforce a certain order consider chaining: + ``fut.then(cb1).then(cb2)``). The callback must take one argument, which + is the reference to this ``Future``. The callback function can use the + :meth:`value` method to get the value. Note that if this ``Future`` is + already completed, the given callback will be run immediately inline. + + If the ``Future``'s value contains tensors that reside on GPUs, the + callback might be invoked while the async kernels that are populating + those tensors haven't yet finished executing on the device. However, the + callback will be invoked with some dedicated streams set as current + (fetched from a global pool) which will be synchronized with those + kernels. Hence any operation performed by the callback on these tensors + will be scheduled on the device after the kernels complete. In other + words, as long as the callback doesn't switch streams, it can safely + manipulate the result without any additional synchronization. This is + similar to the non-blocking behavior of :meth:`wait`. + + Similarly, if the callback returns a value that contains tensors that + reside on a GPU, it can do so even if the kernels that are producing + these tensors are still running on the device, as long as the callback + didn't change streams during its execution. If one wants to change + streams, one must be careful to re-synchronize them with the original + streams, that is, those that were current when the callback was invoked. + + Args: + callback(``Callable``): a ``Callable`` that takes this ``Future`` as + the only argument. + + Returns: + A new ``Future`` object that holds the return value of the + ``callback`` and will be marked as completed when the given + ``callback`` finishes. + + .. note:: Note that if the callback function throws, either + through the original future being completed with an exception and + calling ``fut.wait()``, or through other code in the callback, the + future returned by ``then`` will be marked appropriately with the + encountered error. However, if this callback later completes + additional futures, those futures are not marked as completed with + an error and the user is responsible for handling completion/waiting + on those futures independently. + + Example:: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES) + >>> def callback(fut): + ... print(f"RPC return value is {fut.wait()}.") + >>> fut = torch.futures.Future() + >>> # The inserted callback will print the return value when + >>> # receiving the response from "worker1" + >>> cb_fut = fut.then(callback) + >>> chain_cb_fut = cb_fut.then( + ... lambda x : print(f"Chained cb done. {x.wait()}") + ... ) + >>> fut.set_result(5) + RPC return value is 5. + Chained cb done. None + """ + return cast(Future[S], super().then(callback)) + + def add_done_callback(self, callback: Callable[[Future[T]], None]) -> None: + r""" + Append the given callback function to this ``Future``, which will be run + when the ``Future`` is completed. Multiple callbacks can be added to + the same ``Future``, but the order in which they will be executed cannot + be guaranteed. The callback must take one argument, which is the + reference to this ``Future``. The callback function can use the + :meth:`value` method to get the value. Note that if this ``Future`` is + already completed, the given callback will be run inline. + + We recommend that you use the :meth:`then` method as it provides a way + to synchronize after your callback has completed. ``add_done_callback`` + can be cheaper if your callback does not return anything. But both + :meth:`then` and ``add_done_callback`` use the same callback + registration API under the hood. + + With respect to GPU tensors, this method behaves in the same way as + :meth:`then`. + + Args: + callback(``Future``): a ``Callable`` that takes in one argument, + which is the reference to this ``Future``. + + .. note:: Note that if the callback function throws, either + through the original future being completed with an exception and + calling ``fut.wait()``, or through other code in the callback, + error handling must be carefully taken care of. For example, if + this callback later completes additional futures, those futures are + not marked as completed with an error and the user is responsible + for handling completion/waiting on those futures independently. + + Example:: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES) + >>> def callback(fut): + ... print("This will run after the future has finished.") + ... print(fut.wait()) + >>> fut = torch.futures.Future() + >>> fut.add_done_callback(callback) + >>> fut.set_result(5) + This will run after the future has finished. + 5 + """ + super().add_done_callback(callback) + + def set_result(self, result: T) -> None: + r""" + Set the result for this ``Future``, which will mark this ``Future`` as + completed and trigger all attached callbacks. Note that a ``Future`` + cannot be marked completed twice. + + If the result contains tensors that reside on GPUs, this method can be + called even if the asynchronous kernels that are populating those + tensors haven't yet completed running on the device, provided that the + streams on which those kernels were enqueued are set as the current ones + when this method is called. Put simply, it's safe to call this method + immediately after launching those kernels, without any additional + synchronization, as long as one doesn't change streams in between. This + method will record events on all the relevant current streams and will + use them to ensure proper scheduling for all the consumers of this + ``Future``. + + Args: + result (object): the result object of this ``Future``. + + Example:: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES) + >>> import threading + >>> import time + >>> def slow_set_future(fut, value): + ... time.sleep(0.5) + ... fut.set_result(value) + >>> fut = torch.futures.Future() + >>> t = threading.Thread( + ... target=slow_set_future, + ... args=(fut, torch.ones(2) * 3) + ... ) + >>> t.start() + >>> print(fut.wait()) + tensor([3., 3.]) + >>> t.join() + """ + super().set_result(result) + + def set_exception(self, result: T) -> None: + r""" + Set an exception for this ``Future``, which will mark this ``Future`` as + completed with an error and trigger all attached callbacks. Note that + when calling wait()/value() on this ``Future``, the exception set here + will be raised inline. + + Args: + result (BaseException): the exception for this ``Future``. + + Example:: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES) + >>> fut = torch.futures.Future() + >>> fut.set_exception(ValueError("foo")) + >>> fut.wait() + Traceback (most recent call last): + ... + ValueError: foo + """ + assert isinstance(result, Exception), f"{result} is of type {type(result)}, not an Exception." + + def raise_error(fut_result): + raise fut_result + + super()._set_unwrap_func(raise_error) + self.set_result(result) # type: ignore[arg-type] + + +def collect_all(futures: List[Future]) -> Future[List[Future]]: + r""" + Collects the provided :class:`~torch.futures.Future` objects into a single + combined :class:`~torch.futures.Future` that is completed when all of the + sub-futures are completed. + + Args: + futures (list): a list of :class:`~torch.futures.Future` objects. + + Returns: + Returns a :class:`~torch.futures.Future` object to a list of the passed + in Futures. + + Example:: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES) + >>> fut0 = torch.futures.Future() + >>> fut1 = torch.futures.Future() + >>> fut = torch.futures.collect_all([fut0, fut1]) + >>> fut0.set_result(0) + >>> fut1.set_result(1) + >>> fut_list = fut.wait() + >>> print(f"fut0 result = {fut_list[0].wait()}") + fut0 result = 0 + >>> print(f"fut1 result = {fut_list[1].wait()}") + fut1 result = 1 + """ + return cast(Future[List[Future]], torch._C._collect_all(cast(List[torch._C.Future], futures))) + + +def wait_all(futures: List[Future]) -> List: + r""" + Waits for all provided futures to be complete, and returns + the list of completed values. If any of the futures encounters an error, + the method will exit early and report the error not waiting for other + futures to complete. + + Args: + futures (list): a list of :class:`~torch.futures.Future` object. + + Returns: + A list of the completed :class:`~torch.futures.Future` results. This + method will throw an error if ``wait`` on any + :class:`~torch.futures.Future` throws. + """ + return [fut.wait() for fut in torch._C._collect_all(cast(List[torch._C.Future], futures)).wait()] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/futures/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/futures/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a69a08ec2b3a38f7a8b8ebf6b7a302869cc2f17c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/futures/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b9896390f12434108cd43bd2e897b9aab7cb2832 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/__init__.py @@ -0,0 +1,89 @@ +r''' +FX is a toolkit for developers to use to transform ``nn.Module`` +instances. FX consists of three main components: a **symbolic tracer,** +an **intermediate representation**, and **Python code generation**. A +demonstration of these components in action: + +:: + + import torch + # Simple module for demonstration + class MyModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.param = torch.nn.Parameter(torch.rand(3, 4)) + self.linear = torch.nn.Linear(4, 5) + + def forward(self, x): + return self.linear(x + self.param).clamp(min=0.0, max=1.0) + + module = MyModule() + + from torch.fx import symbolic_trace + # Symbolic tracing frontend - captures the semantics of the module + symbolic_traced : torch.fx.GraphModule = symbolic_trace(module) + + # High-level intermediate representation (IR) - Graph representation + print(symbolic_traced.graph) + """ + graph(): + %x : [num_users=1] = placeholder[target=x] + %param : [num_users=1] = get_attr[target=param] + %add : [num_users=1] = call_function[target=operator.add](args = (%x, %param), kwargs = {}) + %linear : [num_users=1] = call_module[target=linear](args = (%add,), kwargs = {}) + %clamp : [num_users=1] = call_method[target=clamp](args = (%linear,), kwargs = {min: 0.0, max: 1.0}) + return clamp + """ + + # Code generation - valid Python code + print(symbolic_traced.code) + """ + def forward(self, x): + param = self.param + add = x + param; x = param = None + linear = self.linear(add); add = None + clamp = linear.clamp(min = 0.0, max = 1.0); linear = None + return clamp + """ + +The **symbolic tracer** performs "symbolic execution" of the Python +code. It feeds fake values, called Proxies, through the code. Operations +on theses Proxies are recorded. More information about symbolic tracing +can be found in the :func:`symbolic_trace` and :class:`Tracer` +documentation. + +The **intermediate representation** is the container for the operations +that were recorded during symbolic tracing. It consists of a list of +Nodes that represent function inputs, callsites (to functions, methods, +or :class:`torch.nn.Module` instances), and return values. More information +about the IR can be found in the documentation for :class:`Graph`. The +IR is the format on which transformations are applied. + +**Python code generation** is what makes FX a Python-to-Python (or +Module-to-Module) transformation toolkit. For each Graph IR, we can +create valid Python code matching the Graph's semantics. This +functionality is wrapped up in :class:`GraphModule`, which is a +:class:`torch.nn.Module` instance that holds a :class:`Graph` as well as a +``forward`` method generated from the Graph. + +Taken together, this pipeline of components (symbolic tracing -> +intermediate representation -> transforms -> Python code generation) +constitutes the Python-to-Python transformation pipeline of FX. In +addition, these components can be used separately. For example, +symbolic tracing can be used in isolation to capture a form of +the code for analysis (and not transformation) purposes. Code +generation can be used for programmatically generating models, for +example from a config file. There are many uses for FX! + +Several example transformations can be found at the +`examples `__ +repository. +''' + +from .graph_module import GraphModule +from ._symbolic_trace import symbolic_trace, Tracer, wrap, PH, ProxyableClassMeta +from .graph import Graph, CodeGen +from .node import Node, map_arg, has_side_effect +from .proxy import Proxy +from .interpreter import Interpreter as Interpreter, Transformer as Transformer +from .subgraph_rewriter import replace_pattern diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/__init__.pyi b/env-llmeval/lib/python3.10/site-packages/torch/fx/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..750cda338856eb808e136a09f339f224c9627d45 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/__init__.pyi @@ -0,0 +1,11 @@ +from ._symbolic_trace import ( + symbolic_trace as symbolic_trace, + Tracer as Tracer, + wrap as wrap, +) +from .graph import Graph as Graph +from .graph_module import GraphModule as GraphModule +from .interpreter import Interpreter as Interpreter, Transformer as Transformer +from .node import has_side_effect as has_side_effect, map_arg as map_arg, Node as Node +from .proxy import Proxy as Proxy +from .subgraph_rewriter import replace_pattern as replace_pattern diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/_compatibility.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/_compatibility.py new file mode 100644 index 0000000000000000000000000000000000000000..14588fad9a09e1c307c475bda7c551d801dbd731 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/_compatibility.py @@ -0,0 +1,34 @@ +from typing import Any, Dict +import textwrap + +_BACK_COMPAT_OBJECTS : Dict[Any, None] = {} +_MARKED_WITH_COMPATIBILITY : Dict[Any, None] = {} + +def compatibility(is_backward_compatible : bool): + if is_backward_compatible: + + def mark_back_compat(fn): + docstring = textwrap.dedent(getattr(fn, '__doc__', None) or '') + docstring += """ +.. note:: + Backwards-compatibility for this API is guaranteed. +""" + fn.__doc__ = docstring + _BACK_COMPAT_OBJECTS.setdefault(fn) + _MARKED_WITH_COMPATIBILITY.setdefault(fn) + return fn + + return mark_back_compat + else: + + def mark_not_back_compat(fn): + docstring = textwrap.dedent(getattr(fn, '__doc__', None) or '') + docstring += """ +.. warning:: + This API is experimental and is *NOT* backward-compatible. +""" + fn.__doc__ = docstring + _MARKED_WITH_COMPATIBILITY.setdefault(fn) + return fn + + return mark_not_back_compat diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/_pytree.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/_pytree.py new file mode 100644 index 0000000000000000000000000000000000000000..268fdd8150b70765ce99849e1976ddd2bec54c09 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/_pytree.py @@ -0,0 +1,69 @@ +from collections import namedtuple +from typing import Any, Callable, Dict, List, NamedTuple, Tuple, Type, Optional + +from torch.utils._pytree import LeafSpec, PyTree, TreeSpec + +FlattenFuncSpec = Callable[[PyTree, TreeSpec], List] + +FlattenFuncExactMatchSpec = Callable[[PyTree, TreeSpec], bool] + +SUPPORTED_NODES: Dict[Type[Any], FlattenFuncSpec] = {} + +SUPPORTED_NODES_EXACT_MATCH: Dict[Type[Any], Optional[FlattenFuncExactMatchSpec]] = {} + +def register_pytree_flatten_spec( + cls: Type[Any], + flatten_fn_spec: FlattenFuncSpec, + flatten_fn_exact_match_spec: Optional[FlattenFuncExactMatchSpec] = None +) -> None: + SUPPORTED_NODES[cls] = flatten_fn_spec + SUPPORTED_NODES_EXACT_MATCH[cls] = flatten_fn_exact_match_spec + +def tree_flatten_spec(pytree: PyTree, spec: TreeSpec, exact_structural_match=False) -> List[Any]: + if isinstance(spec, LeafSpec): + return [pytree] + if spec.type not in SUPPORTED_NODES: + raise RuntimeError( + f"{type(pytree)} does not have a flatten_fn_spec associated with it. Please register one with " + "torch.fx._pytree.register_pytree_flatten_spec. If you have serialized your model, make " + "sure that any custom pytrees have been registered before loading it.") + flatten_fn_spec = SUPPORTED_NODES[spec.type] + child_pytrees = flatten_fn_spec(pytree, spec) + if exact_structural_match: + flatten_fn_exact_match_spec = SUPPORTED_NODES_EXACT_MATCH[spec.type] + if flatten_fn_exact_match_spec and not flatten_fn_exact_match_spec(pytree, spec): + raise RuntimeError(f"Cannot flatten pytree {pytree}, given spec: {spec}") + result = [] + for child, child_spec in zip(child_pytrees, spec.children_specs): + flat = tree_flatten_spec(child, child_spec, exact_structural_match) + result += flat + return result + +def _dict_flatten_spec(d: Dict[Any, Any], spec: TreeSpec) -> List[Any]: + return [d[k] for k in spec.context] + +def _list_flatten_spec(d: List[Any], spec: TreeSpec) -> List[Any]: + return [d[i] for i in range(len(spec.children_specs))] + +def _tuple_flatten_spec(d: Tuple[Any], spec: TreeSpec) -> List[Any]: + return [d[i] for i in range(len(spec.children_specs))] + +def _namedtuple_flatten_spec(d: NamedTuple, spec: TreeSpec) -> List[Any]: + return [d[i] for i in range(len(spec.children_specs))] + +def _dict_flatten_spec_exact_match(d: Dict[Any, Any], spec: TreeSpec) -> bool: + return len(d) == len(spec.context) + +def _list_flatten_spec_exact_match(d: List[Any], spec: TreeSpec) -> bool: + return len(d) == len(spec.children_specs) + +def _tuple_flatten_spec_exact_match(d: Tuple[Any], spec: TreeSpec) -> bool: + return len(d) == len(spec.children_specs) + +def _namedtuple_flatten_spec_exact_match(d: NamedTuple, spec: TreeSpec) -> bool: + return len(d) == len(spec.children_specs) + +register_pytree_flatten_spec(dict, _dict_flatten_spec, _dict_flatten_spec_exact_match) +register_pytree_flatten_spec(list, _list_flatten_spec, _list_flatten_spec_exact_match) +register_pytree_flatten_spec(tuple, _tuple_flatten_spec, _tuple_flatten_spec_exact_match) +register_pytree_flatten_spec(namedtuple, _namedtuple_flatten_spec, _tuple_flatten_spec_exact_match) # type: ignore[arg-type] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/_symbolic_trace.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/_symbolic_trace.py new file mode 100644 index 0000000000000000000000000000000000000000..5db2cf7db224e4c21c16689dc4392fdb1b75a453 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/_symbolic_trace.py @@ -0,0 +1,1163 @@ +import builtins +import copy +import functools +import inspect +import math +import os +import warnings +import collections +from itertools import chain +from types import CodeType, FunctionType, ModuleType +from typing import ( + Any, + Callable, + Dict, + List, + NamedTuple, + Optional, + Set, + Tuple, + Type, + Union, +) + +import torch +import torch.utils._pytree as pytree +from torch._C import ScriptObject # type: ignore[attr-defined] + +from ._compatibility import compatibility +from .graph import _PyTreeCodeGen, _PyTreeInfo, Graph +from .graph_module import GraphModule +from .node import Argument, base_types, map_aggregate +from .proxy import ParameterProxy, Proxy, TracerBase, Scope, ScopeContextManager + +HAS_VARSTUFF = inspect.CO_VARARGS | inspect.CO_VARKEYWORDS + +# These need to run in global scope to handle nested calls correctly +_orig_module_call: Callable = torch.nn.Module.__call__ +_orig_module_getattr: Callable = torch.nn.Module.__getattr__ + +_proxyable_classes: Dict[Type, None] = {} + +_is_fx_tracing_flag = False + + +def is_fx_tracing(): + return _is_fx_tracing_flag + +@compatibility(is_backward_compatible=True) +class ProxyableClassMeta(type): + """ + ProxyableClassMeta allows you to make construction of a given Python class + symbolically traceable. For example:: + + import torch + import torch.fx + + class TensorPair(metaclass=torch.fx.ProxyableClassMeta): + def __init__(self, left, right): + self.left, self.right = left, right + + def add(self, other): + l = self.left + other.left + r = self.right + other.right + return TensorPair(l, r) + + def mul(self, other): + l = self.left * other.left + r = self.right * other.right + return TensorPair(l, r) + + def use_tensor_pair_ctor(x : TensorPair, y : torch.Tensor): + s = x.add(TensorPair(y, y)) + return s.mul(x) + + x = TensorPair(torch.randn(5, 3), torch.randn(5, 3)) + y = torch.randn(5, 3) + ref_out = use_tensor_pair_ctor(x, y) + + traced = torch.fx.symbolic_trace(use_tensor_pair_ctor) + print(traced.code) + ''' + def forward(self, x : __main___TensorPair, y : torch.Tensor): + tensor_pair = __main___TensorPair(y, y); y = None + add = x.add(tensor_pair); tensor_pair = None + mul = add.mul(x); add = x = None + return mul + ''' + + From this example, we can see that construction of a class (``TensorPair``) + defined with ``ProxyableClassMeta`` as metaclass can be recorded in symbolic + tracing. + """ + + def __init__(cls, name, bases, attrs): + _proxyable_classes.setdefault(cls) + super().__init__(name, bases, attrs) + + def __call__(cls, *args, **kwargs): + instance = cls.__new__(cls) # type: ignore[call-overload] + + if not is_fx_tracing(): + cls.__init__(instance, *args, **kwargs) # type: ignore[misc] + return instance + + found_proxies = [] + + def check_proxy(a): + if isinstance(a, Proxy): + found_proxies.append(a) + + map_aggregate(args, check_proxy) + map_aggregate(kwargs, check_proxy) + + if len(found_proxies) != 0: + tracer = found_proxies[0].tracer + return tracer.create_proxy("call_function", cls, args, kwargs) + else: + cls.__init__(instance, *args, **kwargs) # type: ignore[misc] + return instance + + +def _patch_function(fn: FunctionType, nargs: int) -> FunctionType: + co = fn.__code__ + co_flags = co.co_flags & ~HAS_VARSTUFF + co_args: tuple + if hasattr(co, "co_qualname"): + # Python-3.11+ code signature + co_args = ( + nargs, + 0, + 0, + co.co_nlocals, + co.co_stacksize, + co_flags, + co.co_code, + co.co_consts, + co.co_names, + co.co_varnames, + co.co_filename, + co.co_name, + co.co_qualname, # type: ignore[attr-defined] + co.co_firstlineno, + co.co_lnotab, + co.co_exceptiontable, # type: ignore[attr-defined] + co.co_freevars, + co.co_cellvars, + ) + elif hasattr(co, "co_posonlyargcount"): + co_args = ( + nargs, + 0, + 0, + co.co_nlocals, + co.co_stacksize, + co_flags, + co.co_code, + co.co_consts, + co.co_names, + co.co_varnames, + co.co_filename, + co.co_name, + co.co_firstlineno, + co.co_lnotab, + co.co_freevars, + co.co_cellvars, + ) + else: + co_args = ( + nargs, + 0, + co.co_nlocals, + co.co_stacksize, + co_flags, + co.co_code, + co.co_consts, + co.co_names, + co.co_varnames, + co.co_filename, + co.co_name, + co.co_firstlineno, + co.co_lnotab, + co.co_freevars, + co.co_cellvars, + ) + new_code = CodeType(*co_args) # type: ignore[arg-type] + return FunctionType( + new_code, fn.__globals__, fn.__name__, fn.__defaults__, fn.__closure__ + ) + + # we need to insert placeholder nodes for *args and **kwargs + # we can't call this function normally, otherwise it would try to unpack them + # instead, let's make python think that args and kwargs are normal variables + + +@compatibility(is_backward_compatible=False) +class PHBase: + """ + Object representing an input placeholder to `concrete_args` + """ + + def __repr__(self): + return "PH" + + +PH = PHBase() + + +@compatibility(is_backward_compatible=False) +class PHWithMeta(PHBase): + """ + Object representing an input placeholder to `concrete_args` + """ + def __init__(self, ph_key: Optional[str] = None): + super().__init__() + + # Provide a hey for user to identify placeholder node during analysis + self.ph_key = ph_key + + +@compatibility(is_backward_compatible=True) +class Tracer(TracerBase): + # Reference: https://github.com/pytorch/pytorch/issues/54354 + # The first line of this docstring overrides the one Sphinx generates for the + # documentation. We need it so that Sphinx doesn't leak `math`s path from the + # build environment (e.g. ` None: + # This method's signature is overridden by the first line of this class' + # docstring. If this method's signature is modified, the signature that + # overrides it also should be modified accordingly. + + """ + Construct a Tracer object. + + Args: + + autowrap_modules (Tuple[ModuleType]): defaults to `(math, )`, + Python modules whose functions should be wrapped automatically + without needing to use fx.wrap(). Backward-compatibility for + this parameter is guaranteed. + + autowrap_functions (Tuple[Callable, ...]): defaults to `()`, + Python functions that should be wrapped automatically without + needing to use fx.wrap(). Backward compatibility for this + parameter is guaranteed. + + param_shapes_constant (bool): When this flag is set, calls to shape, + size and a few other shape like attributes of a module's parameter + will be evaluated directly, rather than returning a new Proxy value + for an attribute access. Backward compatibility for this parameter + is guaranteed. + """ + + super().__init__() + + # Functions we will eagerly wrap when we see them while tracing + # this captures both `math.sqrt()` and `from math import sqrt` automatically + self._autowrap_function_ids: Set[int] = { + id(value) + for name, value in chain(*[m.__dict__.items() for m in autowrap_modules]) + if not name.startswith("_") and callable(value) + } + self._autowrap_function_ids.update({id(f) for f in autowrap_functions}) + + # Python modules to apply autowrap to at the start, in addition to + # modules we see while tracing + self._autowrap_search: List[ModuleType] = list(autowrap_modules) + self.param_shapes_constant = param_shapes_constant + + self.submodule_paths: Optional[Dict[torch.nn.Module, str]] = None + self.root_module_name: str = "" + # Maps the containing module's name to the operator name + self.scope = Scope("", None) + # Records the module call stack + self.module_stack = collections.OrderedDict() + # Mapping of node name to module scope + self.node_name_to_scope: Dict[str, Tuple[str, type]] = {} + + @compatibility(is_backward_compatible=True) + def create_arg(self, a: Any) -> "Argument": + """ + A method to specify the behavior of tracing when preparing values to + be used as arguments to nodes in the ``Graph``. + + By default, the behavior includes: + + #. Iterate through collection types (e.g. tuple, list, dict) and recursively + call ``create_args`` on the elements. + #. Given a Proxy object, return a reference to the underlying IR ``Node`` + #. Given a non-Proxy Tensor object, emit IR for various cases: + + * For a Parameter, emit a ``get_attr`` node referring to that Parameter + * For a non-Parameter Tensor, store the Tensor away in a special + attribute referring to that attribute. + + This method can be overridden to support more types. + + Args: + + a (Any): The value to be emitted as an ``Argument`` in the ``Graph``. + + + Returns: + + The value ``a`` converted into the appropriate ``Argument`` + """ + # The base tracer is used to construct Graphs when there is no associated + # module hierarchy, so it can never create parameter references. + # The default tracer adds the ability to refer to parameters when + # tracing modules. + if isinstance(a, torch.nn.Parameter): + for n, p in self.root.named_parameters(): + if a is p: + return self.create_node("get_attr", n, (), {}) + raise NameError("parameter is not a member of this module") + elif isinstance(a, torch.Tensor): + for n_, p_ in self.root.named_buffers(): + if a is p_: + return self.create_node("get_attr", n_, (), {}) + elif isinstance(a, torch.nn.Module): + for n_, p_ in self.root.named_modules(): + if a is p_: + return self.create_node("get_attr", n_, (), {}) + # For NamedTuple instances that appear literally as args, we emit + # a node to construct the NamedTuple and use that Node as the argument. + if isinstance(a, tuple) and hasattr(a, "_fields"): + args = tuple(self.create_arg(elem) for elem in a) + return self.create_node("call_function", a.__class__, args, {}) + + # Tensors do not have a reliable string repr() from which they can be + # constructed (and we probably don't want to rely on that, either), so + # for any constant Tensor values we encounter, first search for if they + # are an attribute of some module in the module hierarchy. If so, emit + # a get_attr to retrieve that tensor. Otherwise, we'll store away the + # tensor value into a special attribute on the Module s.t. we can + # retrieve it with a get_attr. + if isinstance(a, (torch.Tensor, ScriptObject)): + qualname: Optional[str] = self.tensor_attrs.get(a) + + # Tensor was not found in the Module hierarchy, stow it away in a + # special attribute and set the qualname to refer to that + if not qualname: + i = 0 + while True: + qualname = f"_tensor_constant{i}" + if not hasattr(self.root, qualname): + break + i += 1 + self.tensor_attrs[a] = qualname + setattr(self.root, qualname, a) + + return self.create_node("get_attr", qualname, (), {}) + + if type(a) in _proxyable_classes: + # This is an instance of a proxyable class for which we did not + # witness its construction. Intern this as a constant attribute + + # TODO: binary search + i = 0 + while True: + qualname = f"_{a.__class__.__name__}_constant_{i}" + if not hasattr(self.root, qualname): + break + i += 1 + setattr(self.root, qualname, a) + + return self.create_node("get_attr", qualname, (), {}) + + return super().create_arg(a) + + @compatibility(is_backward_compatible=True) + def is_leaf_module(self, m: torch.nn.Module, module_qualified_name: str) -> bool: + """ + A method to specify whether a given ``nn.Module`` is a "leaf" module. + + Leaf modules are the atomic units that appear in + the IR, referenced by ``call_module`` calls. By default, + Modules in the PyTorch standard library namespace (torch.nn) + are leaf modules. All other modules are traced through and + their constituent ops are recorded, unless specified otherwise + via this parameter. + + Args: + + m (Module): The module being queried about + module_qualified_name (str): The path to root of this module. For example, + if you have a module hierarchy where submodule ``foo`` contains + submodule ``bar``, which contains submodule ``baz``, that module will + appear with the qualified name ``foo.bar.baz`` here. + """ + return ( + (m.__module__.startswith("torch.nn") or m.__module__.startswith("torch.ao.nn")) + and not isinstance(m, torch.nn.Sequential) + ) + + @compatibility(is_backward_compatible=True) + def path_of_module(self, mod: torch.nn.Module) -> str: + """ + Helper method to find the qualified name of ``mod`` in the Module hierarchy + of ``root``. For example, if ``root`` has a submodule named ``foo``, which has + a submodule named ``bar``, passing ``bar`` into this function will return + the string "foo.bar". + + Args: + + mod (str): The ``Module`` to retrieve the qualified name for. + """ + # Prefer the O(1) algorithm + if self.submodule_paths: + path = self.submodule_paths.get(mod) + if path is None: + raise NameError("module is not installed as a submodule") + assert isinstance(path, str) + return path + # O(N^2) fallback in the case that we didn't store the submodule + # paths. + else: + for n, p in self.root.named_modules(): + if mod is p: + return n + raise NameError("module is not installed as a submodule") + + @compatibility(is_backward_compatible=True) + def call_module( + self, + m: torch.nn.Module, + forward: Callable[..., Any], + args: Tuple[Any, ...], + kwargs: Dict[str, Any], + ) -> Any: + """ + Method that specifies the behavior of this ``Tracer`` when it encounters + a call to an ``nn.Module`` instance. + + By default, the behavior is to check if the called module is a leaf module + via ``is_leaf_module``. If it is, emit a ``call_module`` node referring to + ``m`` in the ``Graph``. Otherwise, call the ``Module`` normally, tracing through + the operations in its ``forward`` function. + + This method can be overridden to--for example--create nested traced + GraphModules, or any other behavior you would want while tracing across + ``Module`` boundaries. + + Args: + + m (Module): The module for which a call is being emitted + forward (Callable): The forward() method of the ``Module`` to be invoked + args (Tuple): args of the module callsite + kwargs (Dict): kwargs of the module callsite + + Return: + + The return value from the Module call. In the case that a ``call_module`` + node was emitted, this is a ``Proxy`` value. Otherwise, it is whatever + value was returned from the ``Module`` invocation. + """ + module_qualified_name = self.path_of_module(m) + with ScopeContextManager(self.scope, Scope(module_qualified_name, type(m))) as _scope: + # module_stack is an ordered dict so writing then deleting the + # entry is equivalent to push/pop on a list + self.module_stack[_scope.module_path] = (module_qualified_name, _scope.module_type) + if not self.is_leaf_module(m, module_qualified_name): + ret_val = forward(*args, **kwargs) + else: + ret_val = self.create_proxy("call_module", module_qualified_name, args, kwargs) + key, _ = self.module_stack.popitem(last=True) + assert key == _scope.module_path, f" Unexpected key {key}" + + return ret_val + + @compatibility(is_backward_compatible=False) + def getattr(self, attr: str, attr_val: Any, parameter_proxy_cache: Dict[str, Any]): + """ + Method that specifies the behavior of this ``Tracer`` when we call getattr + on a call to an ``nn.Module`` instance. + + By default, the behavior is to return a proxy value for the attribute. It + also stores the proxy value in the ``parameter_proxy_cache``, so that future + calls will reuse the proxy rather than creating a new one. + + This method can be overridden to --for example-- not return proxies when + querying parameters. + + Args: + + attr (str): The name of the attribute being queried + attr_val (Any): The value of the attribute + parameter_proxy_cache (Dict[str, Any]): A cache of attr names to proxies + + Return: + + The return value from the getattr call. + """ + def maybe_get_proxy_for_attr( + attr_val, collection_to_search, parameter_proxy_cache + ): + for n, p in collection_to_search: + if attr_val is p: + if n not in parameter_proxy_cache: + kwargs = {} + if ( + "proxy_factory_fn" + in inspect.signature(self.create_proxy).parameters + ): + kwargs["proxy_factory_fn"] = ( + None + if not self.param_shapes_constant + else lambda node: ParameterProxy( + self, node, n, attr_val + ) + ) + val_proxy = self.create_proxy("get_attr", n, (), {}, **kwargs) # type: ignore[arg-type] + parameter_proxy_cache[n] = val_proxy + return parameter_proxy_cache[n] + return None + + if isinstance(attr_val, torch.nn.Parameter): + maybe_parameter_proxy = maybe_get_proxy_for_attr( + attr_val, self.root.named_parameters(), parameter_proxy_cache + ) + if maybe_parameter_proxy is not None: + return maybe_parameter_proxy + + if self.proxy_buffer_attributes and isinstance(attr_val, torch.Tensor): + maybe_buffer_proxy = maybe_get_proxy_for_attr( + attr_val, self.root.named_buffers(), parameter_proxy_cache + ) + if maybe_buffer_proxy is not None: + return maybe_buffer_proxy + + return attr_val + + # This method will be refactored + @compatibility(is_backward_compatible=False) + def create_args_for_root(self, root_fn, is_module, concrete_args=None): + """ + Create ``placeholder`` nodes corresponding to the signature of the ``root`` + Module. This method introspects root's signature and emits those + nodes accordingly, also supporting ``*args`` and ``**kwargs``. + """ + # In some cases, a function or method has been decorated with a wrapper + # defined via ``functools.wraps``. In this case, the outer code object + # will likely not contain the actual parameters we care about, so unwrap + # the function to get to the innermost callable. + fn_for_analysis = inspect.unwrap(root_fn) + co = fn_for_analysis.__code__ + total_args = co.co_argcount + co.co_kwonlyargcount + orig_args = list(co.co_varnames) + names_iter = iter(co.co_varnames) + args: List[Any] = [] + skip_arg_idx = 0 + if is_module: + if total_args == 0: + raise RuntimeError( + "``self`` argument cannot be part of *args expansion!" + ) + skip_arg_idx = 1 + next(names_iter) # skip self + args.append(self.root) + + sig = inspect.signature(fn_for_analysis) + + def proxy_placeholder(name: str): + if concrete_args is not None and name in concrete_args: + cnt = 0 + + def replace_ph(x): + nonlocal cnt + cnt += 1 + param = sig.parameters[name] + default = ( + () + if param.default is inspect.Parameter.empty + else (param.default,) + ) + out = self.create_proxy( + "placeholder", f"{name}_{str(cnt)}", default, {} + ) + if isinstance(x, PHBase): + def transfer_attrs(fr, to): + for attr_name in dir(fr): + attr_val = getattr(fr, attr_name) + if ( + not callable(attr_val) + and not attr_name.startswith("__") + and not hasattr(to, attr_name) + ): + setattr(to, attr_name, attr_val) + + if x != PH: + # Transfer attrs in the case where you're using a placeholder other + # than the singleton PH (PH has no attributes to transfer). + # Proxies were created out of the placeholders. + # Transfer any metadata (put on the placeholders in the form of + # attributes set by the user) from the placeholder to the + # underlying nodes (the proxy is unwrapped by the user, but + # the metadata should hold). + transfer_attrs(fr=x, to=out.node) + + return out + # Union[int, bool] == bool in Python <= 3.6 + if ( + type(x) == bool + or type(x) in base_types + and type(x) != torch.Tensor + ): + torch._assert( + out == x, + f"{name} has been specialized to have value {x} but got another value", + ) + elif type(x) == type(None): + args = ( + out, + f"{name} has been specialized to have value None but got another value", + ) + self.create_proxy("call_function", _assert_is_none, args, {}) + else: + warnings.warn( + f"Was not able to add assertion to guarantee correct input {name} to " + f"specialized function. It is up to the user to make sure that your inputs match the " + f"inputs you specialized the function with." + ) + + return x + + return pytree.tree_map(replace_ph, concrete_args[name]) + if name[0] == "*": + default = () + else: + param = sig.parameters[name] + default = () if param.default is inspect.Parameter.empty else (param.default,) # type: ignore[assignment] + return self.create_proxy( + "placeholder", + name, + default, + {}, + type_expr=fn_for_analysis.__annotations__.get(name, None) + ) + + arg_names = [next(names_iter) for idx in range(skip_arg_idx, total_args)] + if isinstance(concrete_args, tuple): + if len(arg_names) != len(concrete_args): + raise RuntimeError( + f"Tracing expected {len(arg_names)} arguments but got {len(concrete_args)} concrete arguments" + ) + concrete_args = dict(zip(arg_names, concrete_args)) + args.extend(proxy_placeholder(names) for names in arg_names) + + if co.co_kwonlyargcount > 0 or co.co_flags & HAS_VARSTUFF: + # TODO: type annotations for *args and **kwargs + if co.co_flags & inspect.CO_VARARGS: + args.append(proxy_placeholder("*" + next(names_iter))) + if co.co_flags & inspect.CO_VARKEYWORDS: + args.append(proxy_placeholder("**" + next(names_iter))) + root_fn = _patch_function(root_fn, len(args)) + + flat_args, in_spec = pytree.tree_flatten(tuple(args)) + if any(not isinstance(i, pytree.LeafSpec) for i in in_spec.children_specs): + # In the case that we have pytree-flattened inputs in + # `concrete_args`, generate a flattening wrapper around the + # original root function and return that. + self.graph._codegen = _PyTreeCodeGen( + _PyTreeInfo(orig_args[:total_args], in_spec, None) + ) + + def flatten_fn(*args): + tree_args = pytree.tree_unflatten(list(args), in_spec) + tree_out = root_fn(*tree_args) + out_args, out_spec = pytree.tree_flatten(tree_out) + assert isinstance(self.graph._codegen, _PyTreeCodeGen) + self.graph._codegen.pytree_info = ( + self.graph._codegen.pytree_info._replace(out_spec=out_spec) + ) + return out_args + + return flatten_fn, flat_args + return root_fn, args + + @compatibility(is_backward_compatible=True) + def trace( + self, + root: Union[torch.nn.Module, Callable[..., Any]], + concrete_args: Optional[Dict[str, Any]] = None, + ) -> Graph: + """ + Trace ``root`` and return the corresponding FX ``Graph`` representation. ``root`` + can either be an ``nn.Module`` instance or a Python callable. + + Note that after this call, ``self.root`` may be different from the ``root`` passed + in here. For example, when a free function is passed to ``trace()``, we will + create an ``nn.Module`` instance to use as the root and add embedded constants + to. + + + Args: + + root (Union[Module, Callable]): Either a ``Module`` or a function to be + traced through. Backwards-compatibility for this parameter is + guaranteed. + concrete_args (Optional[Dict[str, any]]): Concrete arguments that should + not be treated as Proxies. This parameter is experimental and + its backwards-compatibility is *NOT* guaranteed. + + Returns: + + A ``Graph`` representing the semantics of the passed-in ``root``. + """ + global _is_fx_tracing_flag + old_is_fx_tracing_flag = _is_fx_tracing_flag + _is_fx_tracing_flag = True + try: + if isinstance(root, torch.nn.Module): + self.root = root + + assert hasattr( + type(root), self.traced_func_name + ), f"traced_func_name={self.traced_func_name} doesn't exist in {type(root).__name__}" + + fn = getattr(type(root), self.traced_func_name) + self.root_module_name = root._get_name() + self.submodule_paths = {mod: name for name, mod in root.named_modules()} + else: + self.root = torch.nn.Module() + fn = root + + tracer_cls: Optional[Type[Tracer]] = getattr(self, "__class__", None) + self.graph = Graph(tracer_cls=tracer_cls) + if hasattr(fn, '__code__'): + code = fn.__code__ + self.graph._co_fields = { + 'co_name': code.co_name, + 'co_filename': code.co_filename, + 'co_firstlineno': code.co_firstlineno, + } + + # When we encounter a Tensor value that's not a parameter, we look if it + # is some other attribute on the model. Construct a dict mapping Tensor + # values to the qualified name here for efficiency. This is used downstream + # in create_arg + self.tensor_attrs: Dict[Union[torch.Tensor, ScriptObject], str] = {} + + def collect_tensor_attrs(m: torch.nn.Module, prefix_atoms: List[str]): + for k, v in m.__dict__.items(): + if isinstance(v, (torch.Tensor, ScriptObject)): + self.tensor_attrs[v] = ".".join(prefix_atoms + [k]) + for k, v in m.named_children(): + collect_tensor_attrs(v, prefix_atoms + [k]) + + collect_tensor_attrs(self.root, []) + + assert isinstance(fn, FunctionType) + + fn_globals = fn.__globals__ # run before it gets patched + fn, args = self.create_args_for_root( + fn, isinstance(root, torch.nn.Module), concrete_args + ) + + parameter_proxy_cache: Dict[ + str, Proxy + ] = {} # Reduce number of get_attr calls + + # Method dispatch on parameters is not recorded unless it's directly used. + # Thus, we need to insert a proxy when __getattr__ requests a parameter. + @functools.wraps(_orig_module_getattr) + def module_getattr_wrapper(mod, attr): + attr_val = _orig_module_getattr(mod, attr) + return self.getattr(attr, attr_val, parameter_proxy_cache) + + @functools.wraps(_orig_module_call) + def module_call_wrapper(mod, *args, **kwargs): + def forward(*args, **kwargs): + return _orig_module_call(mod, *args, **kwargs) + + _autowrap_check( + patcher, + getattr(getattr(mod, "forward", mod), "__globals__", {}), + self._autowrap_function_ids, + ) + return self.call_module(mod, forward, args, kwargs) + + with _Patcher() as patcher: + # allow duplicate patches to support the case of nested calls + patcher.patch_method( + torch.nn.Module, + "__getattr__", + module_getattr_wrapper, + deduplicate=False, + ) + patcher.patch_method( + torch.nn.Module, "__call__", module_call_wrapper, deduplicate=False + ) + _patch_wrapped_functions(patcher) + _autowrap_check(patcher, fn_globals, self._autowrap_function_ids) + for module in self._autowrap_search: + _autowrap_check( + patcher, module.__dict__, self._autowrap_function_ids + ) + self.create_node( + "output", + "output", + (self.create_arg(fn(*args)),), + {}, + type_expr=fn.__annotations__.get("return", None), + ) + + self.submodule_paths = None + finally: + _is_fx_tracing_flag = old_is_fx_tracing_flag + return self.graph + + def __deepcopy__(self, memo): + # _autowrap_search contains modules, which cannot be deepcopied. + new_tracer = Tracer.__new__(Tracer) + + for k, v in self.__dict__.items(): + if k in {'_autowrap_search'}: + new_obj = copy.copy(v) + else: + new_obj = copy.deepcopy(v, memo) + + new_tracer.__dict__[k] = new_obj + + return new_tracer + + +# Dictionary of (id(globals dict), function name) => globals_dict to patch for +# the purposes of the wrap() API. +# We key by the globals dict id and function name to ensure we're wrapping a given +# function only once. +_wrapped_fns_to_patch: Dict[Tuple[int, str], dict] = {} + +# List of methods on classes to wrap (class type, function name) +# this currently only works for Tensor.* methods that aren't traced properly +_wrapped_methods_to_patch: List[Tuple[type, str]] = [] + +if os.environ.get("FX_PATCH_GETITEM") == "1": + # This change is needed to trace models like PositionalEmbedding from BERT: + # https://github.com/pytorch/benchmark/blob/master/torchbenchmark/models/BERT_pytorch/bert_pytorch/model/embedding/position.py + # but causes issues in quantization documented here: + # https://github.com/pytorch/pytorch/issues/50710 + # once that is fixed we can make this the default behavior. + _wrapped_methods_to_patch.append((torch.Tensor, "__getitem__")) + + +def _find_proxy(*objects_to_search): + """ + Recursively search a data structure for a Proxy() and return it, + return None if not found. + """ + proxy = None + + def find_proxy(x): + nonlocal proxy + if isinstance(x, Proxy): + proxy = x + + map_aggregate(objects_to_search, find_proxy) + return proxy + + +def _create_wrapped_func(orig_fn): + @functools.wraps(orig_fn) + def wrapped(*args, **kwargs): + """ + Given an closed-over ``orig_function`` to invoke, search the args and kwargs for + a Proxy object. If there is one, emit a ``call_function`` node to preserve the + call to this leaf function directly. Otherwise, just return the results of + this function call, as this function is not being traced. + """ + proxy = _find_proxy(args, kwargs) + if proxy is not None: + return_proxy = proxy.tracer.create_proxy( + "call_function", orig_fn, args, kwargs + ) + return_proxy.node.meta["is_wrapped"] = True + return return_proxy + return orig_fn(*args, **kwargs) + + return wrapped + + +def _create_wrapped_method(cls, name): + orig_fn = getattr(cls, name) + + @functools.wraps(orig_fn) + def wrapped(*args, **kwargs): + """ + Search the args and kwargs for a Proxy object. If there is one, + emit a ``call_method`` node to preserve the call to this method + directly. Otherwise, just return the results of this function + call, as this function is not being traced. + """ + proxy = _find_proxy(args, kwargs) + if proxy is not None: + return proxy.tracer.create_proxy("call_method", name, args, kwargs) + return orig_fn(*args, **kwargs) + + return wrapped + + +class _PatchedFn(NamedTuple): + frame_dict: Any + fn_name: str + orig_fn: Any + + def revert(self): + raise NotImplementedError() + + +class _PatchedFnSetItem(_PatchedFn): + def revert(self): + self.frame_dict[self.fn_name] = self.orig_fn + + +class _PatchedFnDel(_PatchedFn): + def revert(self): + del self.frame_dict[self.fn_name] + + +class _PatchedFnSetAttr(_PatchedFn): + def revert(self): + setattr(self.frame_dict, self.fn_name, self.orig_fn) + + +class _Patcher: + def __init__(self): + super().__init__() + self.patches_made: List[_PatchedFn] = [] + self.visited: Set[int] = set() + + def patch( + self, + frame_dict: Dict[str, Any], + name: str, + new_fn: Callable, + deduplicate: bool = True, + ): + """ + Replace frame_dict[name] with new_fn until we exit the context manager. + """ + new_fn.__fx_already_patched = deduplicate # type: ignore[attr-defined] + if name not in frame_dict and hasattr(builtins, name): + self.patches_made.append(_PatchedFnDel(frame_dict, name, None)) + elif getattr(frame_dict[name], "__fx_already_patched", False): + return # already patched, no need to do it again + else: + self.patches_made.append( + _PatchedFnSetItem(frame_dict, name, frame_dict[name]) + ) + frame_dict[name] = new_fn + + def patch_method( + self, cls: type, name: str, new_fn: Callable, deduplicate: bool = True + ): + """ + Replace object_or_dict.name with new_fn until we exit the context manager. + """ + new_fn.__fx_already_patched = deduplicate # type: ignore[attr-defined] + orig_fn = getattr(cls, name) + if getattr(orig_fn, "__fx_already_patched", False): + return # already patched, no need to do it again + self.patches_made.append(_PatchedFnSetAttr(cls, name, orig_fn)) + setattr(cls, name, new_fn) + + def visit_once(self, thing: Any): + """Return True on the first call to with thing, otherwise false""" + idx = id(thing) + if idx in self.visited: + return False + self.visited.add(idx) + return True + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + """ + Undo all the changes made via self.patch() and self.patch_method() + """ + while self.patches_made: + # unpatch in reverse order to handle duplicates correctly + self.patches_made.pop().revert() + self.visited.clear() + + +def _patch_wrapped_functions(patcher: _Patcher): + """ + Go through ``_wrapped_fn_patch_table`` and, for each frame object, wrap + the listed global functions in the `_create_wrapped_func` wrapper. + """ + for (_, name), frame_dict in _wrapped_fns_to_patch.copy().items(): + if name not in frame_dict and hasattr(builtins, name): + orig_fn = getattr(builtins, name) + else: + orig_fn = frame_dict[name] + patcher.patch(frame_dict, name, _create_wrapped_func(orig_fn)) + + for cls, name in _wrapped_methods_to_patch: + patcher.patch_method(cls, name, _create_wrapped_method(cls, name)) + + +def _autowrap_check( + patcher: _Patcher, frame_dict: Dict[str, Any], function_ids: Set[int] +): + """ + Some methods, like `math.sqrt` are common enough we want to automatically wrap them as we see them. + This method searches a scope for them and patches them if found. + """ + if patcher.visit_once(frame_dict): + for name, value in frame_dict.items(): + if ( + not name.startswith("_") + and callable(value) + and id(value) in function_ids + ): + patcher.patch(frame_dict, name, _create_wrapped_func(value)) + + +@compatibility(is_backward_compatible=True) +def wrap(fn_or_name: Union[str, Callable]): + """ + This function can be called at module-level scope to register fn_or_name as a "leaf function". + A "leaf function" will be preserved as a CallFunction node in the FX trace instead of being + traced through:: + + # foo/bar/baz.py + def my_custom_function(x, y): + return x * x + y * y + + torch.fx.wrap('my_custom_function') + + def fn_to_be_traced(x, y): + # When symbolic tracing, the below call to my_custom_function will be inserted into + # the graph rather than tracing it. + return my_custom_function(x, y) + + This function can also equivalently be used as a decorator:: + + # foo/bar/baz.py + @torch.fx.wrap + def my_custom_function(x, y): + return x * x + y * y + + A wrapped function can be thought of a "leaf function", analogous to the concept of + "leaf modules", that is, they are functions that are left as calls in the FX trace + rather than traced through. + + Args: + + fn_or_name (Union[str, Callable]): The function or name of the global function to insert into the + graph when it's called + """ + if not callable(fn_or_name) and not isinstance(fn_or_name, str): + raise RuntimeError( + "Unsupported type for global function! Must be either a callable or " + "string name" + ) + + if callable(fn_or_name): + assert not isinstance(fn_or_name, str) # to make mypy happy + fn_name = fn_or_name.__name__ + else: + assert isinstance( + fn_or_name, str + ), "fn_or_name must be a global function or string name" + fn_name = fn_or_name + + currentframe = inspect.currentframe() + assert currentframe is not None + f = currentframe.f_back + assert f is not None + if f.f_code.co_name != "": + raise NotImplementedError("wrap must be called at the top level of a module") + + # consider implementing Callable version of this via _autowrap_function_ids / _autowrap_search + # semantics would be slightly different, but would add support `from x import wrapped_function` + _wrapped_fns_to_patch[(id(f.f_globals), fn_name)] = f.f_globals + return fn_or_name + + +@compatibility(is_backward_compatible=True) +def symbolic_trace( + root: Union[torch.nn.Module, Callable[..., Any]], + concrete_args: Optional[Dict[str, Any]] = None, +) -> GraphModule: + """ + Symbolic tracing API + + Given an ``nn.Module`` or function instance ``root``, this function will return a ``GraphModule`` + constructed by recording operations seen while tracing through ``root``. + + ``concrete_args`` allows you to partially specialize your function, whether it's to remove control flow or data structures. + + For example:: + + def f(a, b): + if b == True: + return a + else: + return a*2 + + FX can typically not trace through this due to the presence of control + flow. However, we can use `concrete_args` to specialize on the value of + `b` to trace through this:: + + f = fx.symbolic_trace(f, concrete_args={'b': False}) + assert f(3, False) == 6 + + Note that although you can still pass in different values of `b`, they will be ignored. + + We can also use `concrete_args` to eliminate data-structure handling from + our function. This will use pytrees to flatten your input. To avoid + overspecializing, pass in `fx.PH` for values that shouldn't be + specialized. For example:: + + def f(x): + out = 0 + for v in x.values(): + out += v + return out + f = fx.symbolic_trace(f, concrete_args={'x': {'a': fx.PH, 'b': fx.PH, 'c': fx.PH}}) + assert f({'a': 1, 'b': 2, 'c': 4}) == 7 + + + Args: + root (Union[torch.nn.Module, Callable]): Module or function to be traced and converted + into a Graph representation. + concrete_args (Optional[Dict[str, any]]): Inputs to be partially specialized + + Returns: + GraphModule: a Module created from the recorded operations from ``root``. + """ + tracer = Tracer() + graph = tracer.trace(root, concrete_args) + name = ( + root.__class__.__name__ if isinstance(root, torch.nn.Module) else root.__name__ + ) + return GraphModule(tracer.root, graph, name) + + +@wrap +def _assert_is_none(value, msg): + assert value is None, msg diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/annotate.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/annotate.py new file mode 100644 index 0000000000000000000000000000000000000000..032ce14b6ec701dabc2459c501dfb957be5a1487 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/annotate.py @@ -0,0 +1,21 @@ +from torch.fx.proxy import Proxy +from ._compatibility import compatibility + +@compatibility(is_backward_compatible=False) +def annotate(val, type): + # val could be either a regular value (not tracing) + # or fx.Proxy (tracing) + if isinstance(val, Proxy): + if val.node.type: + raise RuntimeError(f"Tried to annotate a value that already had a type on it!" + f" Existing type is {val.node.type} " + f"and new type is {type}. " + f"This could happen if you tried to annotate a function parameter " + f"value (in which case you should use the type slot " + f"on the function signature) or you called " + f"annotate on the same value twice") + else: + val.node.type = type + return val + else: + return val diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/config.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/config.py new file mode 100644 index 0000000000000000000000000000000000000000..da5120d6edf180f7fbbe88ac342b4d0e4b383e50 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/config.py @@ -0,0 +1,6 @@ +# Whether to disable showing progress on compilation passes +# Need to add a new config otherwise wil get a circular import if dynamo config is imported here +disable_progress = True + +# If True this also shows the node names in each pass, for small models this is great but larger models it's quite noisy +verbose_progress = False diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/graph.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/graph.py new file mode 100644 index 0000000000000000000000000000000000000000..ad5909fa22892586333a135e429bc2ea87d207f4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/graph.py @@ -0,0 +1,1630 @@ +import collections +from collections import defaultdict +from .node import Node, Argument, Target, map_arg, _type_repr, _get_qualified_name +import torch.utils._pytree as pytree +from . import _pytree as fx_pytree +from ._compatibility import compatibility + +import contextlib +from typing import TYPE_CHECKING, Callable, Any, List, Dict, NamedTuple, Optional, Tuple, Set, FrozenSet, Type +from dataclasses import dataclass +from contextlib import contextmanager +import copy +import enum +import torch +import keyword +import re +import builtins +import math +import warnings +import inspect + +__all__ = ["PythonCode", "CodeGen", "Graph"] + +if TYPE_CHECKING: + from .graph_module import GraphModule # noqa: F401 + from ._symbolic_trace import Tracer # noqa: F401 + + +# Mapping of builtins to their `typing` equivalent. +_origin_type_map = { + list: List, + dict: Dict, + set: Set, + frozenset: FrozenSet, + tuple: Tuple, +} + + +# Signature for functions thattransforms the body (`list[str]`) of the +# generated code +TransformCodeFunc = Callable[[List[str]], List[str]] + + +class _CustomBuiltin(NamedTuple): + """Additional objs that we add to every graph's globals. + + The repr() for some standard library objects is not valid Python code without + an import. For common objects of this sort, we bundle them in the globals of + every FX graph. + """ + # How to import this object from the standard library. + import_str: str + # The actual object, produced from that import string. + obj: Any + +_custom_builtins: Dict[str, _CustomBuiltin] = {} + + +def _register_custom_builtin(name: str, import_str: str, obj: Any): + _custom_builtins[name] = _CustomBuiltin(import_str, obj) + + +_register_custom_builtin('inf', 'from math import inf', math.inf) +_register_custom_builtin('nan', 'from math import nan', math.nan) +_register_custom_builtin('NoneType', 'NoneType = type(None)', type(None)) +_register_custom_builtin('torch', 'import torch', torch) +_register_custom_builtin('device', 'from torch import device', torch.device) +_register_custom_builtin('fx_pytree', 'import torch.fx._pytree as fx_pytree', fx_pytree) +_register_custom_builtin('pytree', 'import torch.utils._pytree as pytree', pytree) + + +def _is_magic(x: str) -> bool: + return x.startswith('__') and x.endswith('__') + + +def _snake_case(s: str) -> str: + """ + Transforms the given string ``s`` to a Python-style variable name + + Examples: + ``mod.snake_case`` -> ``mod.snake_case`` + ``mod.pascalCase``-> ``mod.pascal_case`` + ``mod.ALL_CAPS`` -> ``mod.all_caps`` + """ + chars = [] + prev_lower = False + for c in s: + if prev_lower and c.isupper(): + chars.append('_') + chars.append(c.lower()) + prev_lower = c.islower() + return ''.join(chars) + + +def _is_from_torch(obj: Any) -> bool: + module_name = getattr(obj, '__module__', None) + if module_name is not None: + base_module = module_name.partition('.')[0] + return ( + base_module == 'torch' and + not module_name.startswith("torch._dynamo.") and + not module_name.startswith("torch._inductor.") + ) + + name = getattr(obj, '__name__', None) + # exclude torch because torch.torch.torch.torch works. idk mang + if name is not None and name != 'torch': + for guess in [torch, torch.nn.functional]: + if getattr(guess, name, None) is obj: + return True + + return False + + +class _Namespace: + """A context for associating names uniquely with objects. + + The following invariants are enforced: + - Each object gets a single name. + - Each name is unique within a given namespace. + - Names generated do not shadow builtins, unless the object is indeed that builtin. + """ + def __init__(self): + self._obj_to_name: Dict[Any, str] = {} + self._unassociated_names = set() + self._used_names: Set[str] = set() + self._base_count: Dict[str, int] = defaultdict(int) + + self._illegal_char_regex = re.compile('[^0-9a-zA-Z_]+') + self._name_suffix_regex = re.compile(r"(.*)_(\d+)$") + + def create_name(self, candidate: str, obj: Optional[Any]) -> str: + """Create a unique name. + + Arguments: + candidate: used as the basis for the unique name, relevant to the user. + obj: If not None, an object that will be associated with the unique name. + """ + if obj is not None and obj in self._obj_to_name: + return self._obj_to_name[obj] + + # delete all characters that are illegal in a Python identifier + candidate = self._illegal_char_regex.sub('_', candidate) + + if not candidate: + candidate = '_unnamed' + + if candidate[0].isdigit(): + candidate = f'_{candidate}' + + match = self._name_suffix_regex.match(candidate) + if match is None: + base = candidate + num = None + else: + base, num_str = match.group(1, 2) + num = int(num_str) + + candidate = base if num is None else f'{base}_{num}' + if not num: + num = self._base_count[base] + + while candidate in self._used_names or self._is_illegal_name(candidate, obj): + num += 1 + candidate = f'{base}_{num}' + + self._used_names.add(candidate) + self._base_count[base] = num + if obj is None: + self._unassociated_names.add(candidate) + else: + self._obj_to_name[obj] = candidate + return candidate + + def associate_name_with_obj(self, name: str, obj: Any): + """Associate a unique name with an object. + + Neither `name` nor `obj` should be associated already. + """ + assert obj not in self._obj_to_name + assert name in self._unassociated_names + self._obj_to_name[obj] = name + self._unassociated_names.remove(name) + + def _is_illegal_name(self, name: str, obj: Any) -> bool: + # 1. keywords are never allowed as names. + if name in keyword.kwlist: + return True + + # 2. Can't shadow a builtin name, unless you *are* that builtin. + if name in builtins.__dict__: + return obj is not builtins.__dict__[name] + + # 3. Can't shadow our custom builtins either + if name in _custom_builtins: + return obj is not _custom_builtins[name].obj + + return False + + def _rename_object(self, obj: Any, name: str): + assert obj in self._obj_to_name + self._obj_to_name[obj] = name + self._used_names.add(name) + +dtype_abbrs = { + torch.bfloat16: 'bf16', + torch.float64: 'f64', + torch.float32: 'f32', + torch.float16: 'f16', + torch.float8_e4m3fn: 'f8e4m3fn', + torch.float8_e5m2: 'f8e5m2', + torch.complex32: 'c32', + torch.complex64: 'c64', + torch.complex128: 'c128', + torch.int8: 'i8', + torch.int16: 'i16', + torch.int32: 'i32', + torch.int64: 'i64', + torch.bool: 'b8', + torch.uint8: 'u8', +} + +@compatibility(is_backward_compatible=True) +@dataclass +class PythonCode: + """ + Represents all the information necessary to exec or save a graph as Python code. + """ + # Python source code for the forward function definition. + src: str + # Values in global scope during execution of `src_def`. + globals: Dict[str, Any] + # Optional mapping from the forward function's line number to + # node index. + _lineno_map: Optional[Dict[int, Optional[int]]] + + +def _format_target(base: str, target: str) -> str: + elems = target.split('.') + r = base + for e in elems: + if not e.isidentifier(): + r = f'getattr({r}, "{e}")' + else: + r = f'{r}.{e}' + return r + +class _InsertPoint: + def __init__(self, graph, new_insert): + self.graph = graph + self.orig_insert, graph._insert = graph._insert, new_insert + + def __enter__(self): + pass + + def __exit__(self, type, value, tb): + self.graph._insert = self.orig_insert + +class _node_list: + def __init__(self, graph: 'Graph', direction: str = '_next'): + assert direction in ['_next', '_prev'] + self.graph = graph + self.direction = direction + + def __len__(self): + return self.graph._len + + def __iter__(self): + root, direction = self.graph._root, self.direction + cur = getattr(root, direction) + while cur is not root: + if not cur._erased: + yield cur + cur = getattr(cur, direction) + + def __reversed__(self): + return _node_list(self.graph, '_next' if self.direction == '_prev' else '_prev') + +class _PyTreeInfo(NamedTuple): + """ + Contains extra info stored when we're using Pytrees + """ + orig_args: List[str] + in_spec: pytree.TreeSpec + out_spec: Optional[pytree.TreeSpec] + +# get File:lineno code from stack_trace +def _parse_stack_trace(stack_trace: str): + if stack_trace is None: + return None + ParsedStackTrace = collections.namedtuple("ParsedStackTrace", ["file", "lineno", "code"]) + pattern = re.compile(r"^File \"(.+)\", line (\d+), in (.+)$") + lines = stack_trace.strip().split('\n') + # stacktrace should have innermost frame last, so we + # iterate backwards to find the first line that starts + # with 'File ' + summary_str = "" + for idx in range(len(lines) - 2, -1, -1): + line = lines[idx].strip() + matches = pattern.match(line) + if matches: + file = matches.group(1) + lineno = matches.group(2) + # next line should be the code + code = lines[idx + 1].strip() + return ParsedStackTrace(file, lineno, code) + return None + + +@compatibility(is_backward_compatible=False) +class CodeGen: + def __init__(self): + self._body_transformer: Optional[TransformCodeFunc] = None + self._func_name: str = "forward" + + def gen_fn_def(self, free_vars: List[str], maybe_return_annotation: str) -> str: + """ + Given the free variables and a return annotation, generates the beginning of the FX function. + By default, `gen_fn_def(['a', 'b'], '') == 'def {self._func_name}(a, b):'` + """ + # If the original function didn't have self as its first argument, we + # would have added it. + if len(free_vars) == 0 or free_vars[0] != 'self': + free_vars.insert(0, 'self') + return f"def {self._func_name}({', '.join(free_vars)}){maybe_return_annotation}:" + + def generate_output(self, output_args: Argument) -> str: + """ + Given the output arguments, generates the return statement of the FX function. + Note: The returned statement should not be indented. + """ + return f'return {repr(output_args)}' + + def process_inputs(self, *args: Any) -> Any: + """ + Transforms the inputs so that the graph can take them as arguments, as + non-default codegen may result in the inputs to the function being + different from the inputs to the graph. + + If the graph was directly runnable, this invariant should hold true + `f.graph.process_outputs(f.graph(*f.graph.process_inputs(*inputs))) == f(*inputs)` + """ + return args + + def process_outputs(self, outputs: Any) -> Any: + """ + Transforms the outputs of the graph to be identical to the codegen. + + See ``process_inputs`` for more details. + """ + return outputs + + def additional_globals(self) -> List[Tuple[str, Any]]: + """ + If your codegen uses extra global values, add tuples of (identifier,reference to the value) here. + For example, return ['List', typing.List] if you need ``List`` in the global context. + """ + return [] + + def _gen_python_code( + self, nodes, root_module: str, namespace: _Namespace, *, verbose: bool = False, + ) -> PythonCode: + free_vars: List[str] = [] + body: List[str] = [] + globals_: Dict[str, Any] = {} + wrapped_fns: Dict[str, None] = {} + + # Wrap string in list to pass by reference + maybe_return_annotation : List[str] = [''] + + def add_global(name_hint: str, obj: Any): + """Add an obj to be tracked as a global. + + We call this for names that reference objects external to the + Graph, like functions or types. + + Returns: the global name that should be used to reference 'obj' in generated source. + """ + if _is_from_torch(obj) and obj != torch.device: # to support registering torch.device + # HACK: workaround for how torch custom ops are registered. We + # can't import them like normal modules so they must retain their + # fully qualified name. + return _get_qualified_name(obj) + + # normalize the name hint to get a proper identifier + global_name = namespace.create_name(name_hint, obj) + + if global_name in globals_: + assert globals_[global_name] is obj + return global_name + globals_[global_name] = obj + return global_name + + # Pre-fill the globals table with registered builtins. + for name, (_, obj) in _custom_builtins.items(): + add_global(name, obj) + + def type_repr(o : Any): + if o == (): + # Empty tuple is used for empty tuple type annotation Tuple[()] + return '()' + + typename = _type_repr(o) + + if hasattr(o, '__origin__'): + # This is a generic type, e.g. typing.List[torch.Tensor] + origin_type = _origin_type_map.get(o.__origin__, o.__origin__) + origin_typename = add_global(_type_repr(origin_type), origin_type) + + if hasattr(o, '__args__'): + # Assign global names for each of the inner type variables. + args = [type_repr(arg) for arg in o.__args__] + + if len(args) == 0: + # Bare type, such as `typing.Tuple` with no subscript + # This code-path used in Python < 3.9 + return origin_typename + + return f'{origin_typename}[{",".join(args)}]' + else: + # Bare type, such as `typing.Tuple` with no subscript + # This code-path used in Python 3.9+ + return origin_typename + + # Common case: this is a regular module name like 'foo.bar.baz' + return add_global(typename, o) + + def _get_repr(arg: Any) -> str: + # Handle NamedTuples (if it has `_fields`) via add_global. + if isinstance(arg, tuple) and hasattr(arg, '_fields'): + qualified_name = _get_qualified_name(type(arg)) + global_name = add_global(qualified_name, type(arg)) + return f"{global_name}{repr(tuple(arg))}" + elif isinstance(arg, torch._ops.OpOverload): + qualified_name = _get_qualified_name(arg) + global_name = add_global(qualified_name, arg) + return f"{global_name}" + elif isinstance(arg, enum.Enum): + cls = arg.__class__ + clsname = add_global(cls.__name__, cls) + return f"{clsname}.{arg.name}" + return repr(arg) + + def _format_args(args: Tuple[Argument, ...], kwargs: Dict[str, Argument]) -> str: + args_s = ', '.join(_get_repr(a) for a in args) + kwargs_s = ', '.join(f'{k} = {_get_repr(v)}' for k, v in kwargs.items()) + if args_s and kwargs_s: + return f'{args_s}, {kwargs_s}' + return args_s or kwargs_s + + # Run through reverse nodes and record the first instance of a use + # of a given node. This represents the *last* use of the node in the + # execution order of the program, which we will use to free unused + # values + node_to_last_use : Dict[Node, Node] = {} + user_to_last_uses : Dict[Node, List[Node]] = {} + + def register_last_uses(n : Node, user : Node): + if n not in node_to_last_use: + node_to_last_use[n] = user + user_to_last_uses.setdefault(user, []).append(n) + + for node in reversed(nodes): + map_arg(node.args, lambda n: register_last_uses(n, node)) + map_arg(node.kwargs, lambda n: register_last_uses(n, node)) + + def delete_unused_values(user : Node): + """ + Delete values after their last use. This ensures that values that are + not used in the remainder of the code are freed and the memory usage + of the code is optimal. + """ + if user.op == 'placeholder': + return + if user.op == 'output': + body.append('\n') + return + nodes_to_delete = user_to_last_uses.get(user, []) + if len(nodes_to_delete): + to_delete_str = ' = '.join([repr(n) for n in nodes_to_delete] + ['None']) + body.append(f'; {to_delete_str}\n') + else: + body.append('\n') + + prev_stacktrace = None + + def append_stacktrace_summary(node : Node): + """ + Append a summary of the stacktrace to the generated code. This is + useful for debugging. + """ + nonlocal prev_stacktrace + + if node.op not in {'placeholder', 'output'}: + if node.stack_trace: + if node.stack_trace != prev_stacktrace: + prev_stacktrace = node.stack_trace + summary_str = "" + + parsed_stack_trace = _parse_stack_trace(node.stack_trace) + + if parsed_stack_trace is not None: + lineno = parsed_stack_trace.lineno + code = parsed_stack_trace.code + summary_str = f'File: {parsed_stack_trace.file}:{lineno}, code: {code}' + + body.append(f'\n# {summary_str}\n') + elif prev_stacktrace != "": + prev_stacktrace = "" + body.append('\n# No stacktrace found for following nodes\n') + + def stringify_shape(shape : torch.Size) -> str: + return f"[{', '.join(str(x) for x in shape)}]" + + def emit_node(node : Node): + maybe_type_annotation = '' if node.type is None else f' : {type_repr(node.type)}' + + if verbose: + # override annotation with more detailed information + from torch._subclasses.fake_tensor import FakeTensor + from torch.fx.experimental.proxy_tensor import py_sym_types + from torch.fx.passes.shape_prop import TensorMetadata + + meta_val = node.meta.get('val', node.meta.get('tensor_meta', None)) + + # use string as annotation, to make it valid python code + if isinstance(meta_val, FakeTensor): + maybe_type_annotation = f': "{dtype_abbrs[meta_val.dtype]}{stringify_shape(meta_val.shape)}"' + elif isinstance(meta_val, py_sym_types): + maybe_type_annotation = f': "Sym({meta_val})"' + elif isinstance(meta_val, TensorMetadata): + maybe_type_annotation = f': "{dtype_abbrs[meta_val.dtype]}{stringify_shape(meta_val.shape)}"' + + if node.op == 'placeholder': + assert isinstance(node.target, str) + maybe_default_arg = '' if not node.args else f' = {_get_repr(node.args[0])}' + free_vars.append(f'{node.target}{maybe_type_annotation}{maybe_default_arg}') + raw_name = node.target.replace('*', '') + if raw_name != repr(node): + body.append(f'{repr(node)} = {raw_name}\n') + return + elif node.op == 'call_method': + assert isinstance(node.target, str) + body.append( + f'{repr(node)}{maybe_type_annotation} = {_format_target(_get_repr(node.args[0]), node.target)}' + f'({_format_args(node.args[1:], node.kwargs)})') + return + elif node.op == 'call_function': + assert callable(node.target) + # pretty print operators + if getattr(node.target, "__module__", "") == '_operator' and node.target.__name__ in magic_methods: + assert isinstance(node.args, tuple) + body.append(f'{repr(node)}{maybe_type_annotation} = ' + f'{magic_methods[node.target.__name__].format(*(_get_repr(a) for a in node.args))}') + return + + # pretty print inplace operators; required for jit.script to work properly + # not currently supported in normal FX graphs, but generated by torchdynamo + if getattr(node.target, "__module__", "") == '_operator' and node.target.__name__ in inplace_methods: + body.append(f'{inplace_methods[node.target.__name__].format(*(_get_repr(a) for a in node.args))}; ' + f'{repr(node)}{maybe_type_annotation} = {_get_repr(node.args[0])}') + return + + qualified_name = _get_qualified_name(node.target) + global_name = add_global(qualified_name, node.target) + # special case for getattr: node.args could be 2-argument or 3-argument + # 2-argument: attribute access; 3-argument: fall through to attrib function call with default value + if global_name == 'getattr' and \ + isinstance(node.args, tuple) and \ + isinstance(node.args[1], str) and \ + node.args[1].isidentifier() and \ + len(node.args) == 2: + body.append(f'{repr(node)}{maybe_type_annotation} = {_format_target(_get_repr(node.args[0]), node.args[1])}') + return + body.append(f'{repr(node)}{maybe_type_annotation} = {global_name}({_format_args(node.args, node.kwargs)})') + if node.meta.get('is_wrapped', False): + wrapped_fns.setdefault(global_name) + return + elif node.op == 'call_module': + assert isinstance(node.target, str) + body.append(f'{repr(node)}{maybe_type_annotation} = ' + f'{_format_target(root_module, node.target)}({_format_args(node.args, node.kwargs)})') + return + elif node.op == 'get_attr': + assert isinstance(node.target, str) + body.append(f'{repr(node)}{maybe_type_annotation} = {_format_target(root_module, node.target)}') + return + elif node.op == 'output': + if node.type is not None: + maybe_return_annotation[0] = f" -> {type_repr(node.type)}" + body.append(self.generate_output(node.args[0])) + return + raise NotImplementedError(f'node: {node.op} {node.target}') + + for i, node in enumerate(nodes): + # NOTE: emit_node does not emit a string with newline. It depends + # on delete_unused_values to append one + if verbose: + append_stacktrace_summary(node) + # emit a counter comment to keep track of + # node index, which will be deleted later + # after going through _body_transformer + body.append(f"# COUNTER: {i}\n") + emit_node(node) + delete_unused_values(node) + + if len(body) == 0: + # If the Graph has no non-placeholder nodes, no lines for the body + # have been emitted. To continue to have valid Python code, emit a + # single pass statement + body.append('pass\n') + + + + if len(wrapped_fns) > 0: + wrap_name = add_global('wrap', torch.fx.wrap) + wrap_stmts = '\n'.join([f'{wrap_name}("{name}")' for name in wrapped_fns]) + else: + wrap_stmts = '' + + if self._body_transformer: + body = self._body_transformer(body) + + for name, value in self.additional_globals(): + add_global(name, value) + + prologue = self.gen_fn_def(free_vars, maybe_return_annotation[0]) + + # remove counter and generate lineno to node index mapping + lineno_map: Dict[int, Optional[int]] = {} + prologue_len = prologue.count('\n') + 1 + new_lines: List[str] = [] + cur_idx = None + for line in ''.join(body).split('\n'): + counter = re.search(r"# COUNTER: (\d+)", line) + if counter and counter.group(1) is not None: + cur_idx = int(counter.group(1)) + else: + lineno_map[len(new_lines) + prologue_len] = cur_idx + new_lines.append(line) + + code = "\n".join(new_lines).lstrip('\n') + code = '\n'.join(' ' + line for line in code.split('\n')) + + fn_code = f""" +{wrap_stmts} + +{prologue} +{code}""" + return PythonCode(fn_code, globals_, _lineno_map=lineno_map) + + +# Ideally, we'd like to refactor all of the pytree logic into this codegen +# class. Unfortunately, there are 3 areas we currently need extra logic in FX. +# 1. In the initial symbolic trace, the pytree logic is tied up with `concrete_args`. +# 2. In the FX graph, we need to access 2 attributes - in_spec and out_spec. +# Since we can't access .graph within the FX forward, we need to copy the attribute to the module. +# 3. We currently can't register the pytree imports with `add_global` - not sure why. +class _PyTreeCodeGen(CodeGen): + def __init__(self, pytree_info: _PyTreeInfo): + super().__init__() + self.pytree_info: _PyTreeInfo = pytree_info + + def process_inputs(self, *inputs: Any) -> Any: + flat_args = pytree.arg_tree_leaves(*inputs) + return flat_args + + def process_outputs(self, out: Any) -> Any: + if self.pytree_info is None or self.pytree_info.out_spec is None: + return out + if not isinstance(out, (list, tuple)): + out = [out] + assert(self.pytree_info.out_spec is not None) + return pytree.tree_unflatten(out, self.pytree_info.out_spec) + + def gen_fn_def(self, free_vars, maybe_return_annotation): + # Given a user function/model: + # myargs = [myargs0, myargs1] + # mykwargs = {'mykwargs0': ..., 'mykwargs1': ...} + # def forward(self, mypos, *myargs, mykey=None, **mykwargs): + # + # The generated code flattens all keywords into positional arguments for `forward()` + # e.g forward(self, mypos, myargs0, myargs1, mykey, mykwargs0, mykwargs1): + # + # Within `forward`, `tree_flatten_spec``still parses args and kwargs separately + # e.g. tree_flatten_spec(([mypos, myargs0, myargs1], + # {'mykey':mykey, 'mykwargs0':mykwargs0, 'mykwargs1':mykwargs1}), + # self._in_spec) + # + # If the user function/model does not have keywords, the dict is suppressed from tree_flatten_spec + # e.g. tree_flatten_spec([mypos, myargs0, myargs1]), self._in_spec) + if self.pytree_info is None: + return super().gen_fn_def(free_vars, maybe_return_annotation) + + fn_args = self.pytree_info.orig_args + has_orig_self = (fn_args[0] == 'self') if len(fn_args) > 0 else False + if has_orig_self: + free_vars.insert(0, 'self') + fn_definition = super().gen_fn_def(fn_args[:], maybe_return_annotation) + + if len(free_vars) > 0: # pytree has placeholders in it + # when kwargs is present, in_spec is tuple(args, kwargs) + has_args_kwargs_tuple = self.pytree_info.in_spec.type == tuple and \ + len(self.pytree_info.in_spec.children_specs) == 2 and \ + self.pytree_info.in_spec.children_specs[0].type == tuple and \ + self.pytree_info.in_spec.children_specs[1].type == dict + fn_kwargs = '{}' + fn_signature = f"[{', '.join(fn_args)}], self._in_spec" + if has_args_kwargs_tuple: + count_args = len(self.pytree_info.in_spec.children_specs[0].children_specs) + fn_args = self.pytree_info.orig_args[:count_args] + fn_kwargs = '{' + ', '.join(f"'{k}':{v}" for k, v in zip( + self.pytree_info.in_spec.children_specs[1].context, + self.pytree_info.orig_args[count_args:])) + '}' + fn_signature = f"([{', '.join(fn_args)}], {fn_kwargs}), self._in_spec" + + # in Python, `var1: annotation1, var2: annotation2 = function_call()` is invalid. + # we need to split it to two lines: + # one for annotation: `var1: annotation1; var2: annotation2;` (note the semicolon) + # one for code: `var1, var2, = function_call()` + without_annotation = [x.split(":")[0] for x in free_vars] + has_annotation = [x + "; " for x in free_vars if ":" in x] + if len(has_annotation) > 0: + fn_definition += "\n " + "".join(has_annotation) + "\n" + fn_definition += f""" + {', '.join(without_annotation)}, = fx_pytree.tree_flatten_spec({fn_signature})""" + return fn_definition + + def generate_output(self, output_args): + if self.pytree_info and self.pytree_info.out_spec: + return f'return pytree.tree_unflatten({repr(output_args)}, self._out_spec)' + else: + return super().generate_output(output_args) + +@compatibility(is_backward_compatible=True) +class Graph: + """ + ``Graph`` is the main data structure used in the FX Intermediate Representation. + It consists of a series of ``Node`` s, each representing callsites (or other + syntactic constructs). The list of ``Node`` s, taken together, constitute a + valid Python function. + + For example, the following code + + .. code-block:: python + + import torch + import torch.fx + + class MyModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.param = torch.nn.Parameter(torch.rand(3, 4)) + self.linear = torch.nn.Linear(4, 5) + + def forward(self, x): + return torch.topk(torch.sum(self.linear(x + self.linear.weight).relu(), dim=-1), 3) + + m = MyModule() + gm = torch.fx.symbolic_trace(m) + + Will produce the following Graph:: + + print(gm.graph) + + .. code-block:: text + + graph(x): + %linear_weight : [num_users=1] = self.linear.weight + %add_1 : [num_users=1] = call_function[target=operator.add](args = (%x, %linear_weight), kwargs = {}) + %linear_1 : [num_users=1] = call_module[target=linear](args = (%add_1,), kwargs = {}) + %relu_1 : [num_users=1] = call_method[target=relu](args = (%linear_1,), kwargs = {}) + %sum_1 : [num_users=1] = call_function[target=torch.sum](args = (%relu_1,), kwargs = {dim: -1}) + %topk_1 : [num_users=1] = call_function[target=torch.topk](args = (%sum_1, 3), kwargs = {}) + return topk_1 + + For the semantics of operations represented in the ``Graph``, please see :class:`Node`. + """ + + @compatibility(is_backward_compatible=True) + def __init__(self, owning_module: Optional["GraphModule"] = None, tracer_cls: Optional[Type["Tracer"]] = None, + tracer_extras: Optional[Dict[str, Any]] = None): + """ + Construct an empty Graph. + """ + self._root : Node = Node(self, '', 'root', '', (), {}) + self._used_names : Dict[str, int] = {} # base name -> number + self._insert = self._root.prepend + self._len = 0 + self._graph_namespace = _Namespace() + self._owning_module = owning_module + self._tracer_cls = tracer_cls + self._tracer_extras = tracer_extras + self._codegen = CodeGen() + self._co_fields : Dict[str, Any] = {} + + @property + def owning_module(self): + return self._owning_module + + @owning_module.setter + def owning_module(self, mod: Optional["GraphModule"]): + self._owning_module = mod + + @property + def nodes(self) -> _node_list: + """ + Get the list of Nodes that constitute this Graph. + + Note that this ``Node`` list representation is a doubly-linked list. Mutations + during iteration (e.g. delete a Node, add a Node) are safe. + + Returns: + + A doubly-linked list of Nodes. Note that ``reversed`` can be called on + this list to switch iteration order. + """ + return _node_list(self) + + @compatibility(is_backward_compatible=True) + def graph_copy(self, g : 'Graph', val_map : Dict[Node, Node], return_output_node=False) -> 'Optional[Argument]': + """ + Copy all nodes from a given graph into ``self``. + + Args: + + g (Graph): The source graph from which to copy Nodes. + + val_map (Dict[Node, Node]): a dictionary that will be populated with a mapping + from nodes in ``g`` to nodes in ``self``. Note that ``val_map`` can be passed + in with values in it already to override copying of certain values. + + Returns: + + The value in ``self`` that is now equivalent to the output value in ``g``, + if ``g`` had an ``output`` node. ``None`` otherwise. + """ + for node in g.nodes: + if node in val_map: + continue + if node.op == 'output': + rv = map_arg(node.args[0], lambda n: val_map[n]) + return rv if not return_output_node else (rv, node) + val_map[node] = self.node_copy(node, lambda n : val_map[n]) + return None + + def __deepcopy__(self, memo=None) -> 'Graph': + """ + Explicitly implement __deepcopy__ to prevent excessive recursion depth + from the default implementation. This uses graph_copy to copy the nodes + in an iterative way, rather than recursive. It also populates the + memoization table to prevent unnecessary copies (e.g. references to + nodes or other parts of the Graph from a custom GraphModule implementation. + """ + memo = memo if memo else {} + g = Graph(tracer_cls=self._tracer_cls) + output_vals = g.graph_copy(self, val_map=memo, return_output_node=True) + g._codegen = copy.deepcopy(self._codegen) + assert isinstance(output_vals, tuple) + output_val, old_output_node = output_vals + new_output_node = g.output(output_val, type_expr=getattr(old_output_node, 'type', None)) + new_output_node.meta = copy.copy(old_output_node.meta) + return g + + @compatibility(is_backward_compatible=True) + def create_node(self, op: str, target: 'Target', + args: Optional[Tuple['Argument', ...]] = None, + kwargs: Optional[Dict[str, 'Argument']] = None, + name: Optional[str] = None, + type_expr: Optional[Any] = None) -> Node: + """ + Create a ``Node`` and add it to the ``Graph`` at the current insert-point. + Note that the current insert-point can be set via :meth:`Graph.inserting_before` + and :meth:`Graph.inserting_after`. + + Args: + op (str): the opcode for this Node. One of 'call_function', 'call_method', 'get_attr', + 'call_module', 'placeholder', or 'output'. The semantics of these opcodes are + described in the ``Graph`` docstring. + + args (Optional[Tuple[Argument, ...]]): is a tuple of arguments to this node. + + kwargs (Optional[Dict[str, Argument]]): the kwargs of this Node + + name (Optional[str]): an optional string name for the ``Node``. + This will influence the name of the value assigned to in the + Python generated code. + + type_expr (Optional[Any]): an optional type annotation representing the + Python type the output of this node will have. + + Returns: + + The newly-created and inserted node. + """ + assert op in ('call_function', 'call_method', 'get_attr', 'call_module', 'placeholder', 'output') + args = () if args is None else args + kwargs = {} if kwargs is None else kwargs + assert isinstance(args, tuple), "args must be a tuple" + assert isinstance(kwargs, dict), "kwargs must be a dict" + + candidate = name if name is not None else self._target_to_str(target) + name = self._graph_namespace.create_name(candidate, None) + n = Node(self, name, op, target, args, kwargs, type_expr) + + self._graph_namespace.associate_name_with_obj(name, n) + + self._insert(n) + self._len += 1 + return n + + @compatibility(is_backward_compatible=False) + def process_inputs(self, *args): + """ + Processes args so that they can be passed to the FX graph. + """ + return self._codegen.process_inputs(*args) + + @compatibility(is_backward_compatible=False) + def process_outputs(self, out): + return self._codegen.process_outputs(out) + + + @compatibility(is_backward_compatible=True) + def erase_node(self, to_erase : Node) -> None: + """ + Erases a ``Node`` from the ``Graph``. Throws an exception if + there are still users of that node in the ``Graph``. + + Args: + + to_erase (Node): The ``Node`` to erase from the ``Graph``. + """ + if len(to_erase.users) > 0: + raise RuntimeError(f'Tried to erase Node {to_erase} but it still had {len(to_erase.users)} ' + f'users in the graph: {to_erase.users}!') + if to_erase._erased: + warnings.warn(f"erase_node({to_erase}) on an already erased node") + return + + to_erase._remove_from_list() + to_erase._erased = True # iterators may retain handles to erased nodes + self._len -= 1 + + # Null out this Node's argument nodes so that the Nodes referred to + # can update their ``users`` accordingly + new_args = map_arg(to_erase.args, lambda n: None) + assert isinstance(new_args, tuple) + to_erase.args = new_args + new_kwargs = map_arg(to_erase.kwargs, lambda n: None) + assert isinstance(new_kwargs, dict) + to_erase.kwargs = new_kwargs + + @compatibility(is_backward_compatible=True) + def inserting_before(self, n: Optional[Node] = None): + """Set the point at which create_node and companion methods will insert into the graph. + When used within a 'with' statement, this will temporary set the insert point and + then restore it when the with statement exits:: + + with g.inserting_before(n): + ... # inserting before node n + ... # insert point restored to what it was previously + g.inserting_before(n) # set the insert point permanently + + Args: + + n (Optional[Node]): The node before which to insert. If None this will insert before + the beginning of the entire graph. + + Returns: + A resource manager that will restore the insert point on ``__exit__``. + """ + if n is None: + return self.inserting_after(self._root) + assert n.graph == self, "Node to insert before is not in graph." + return _InsertPoint(self, n.prepend) + + @compatibility(is_backward_compatible=True) + def inserting_after(self, n: Optional[Node] = None): + """Set the point at which create_node and companion methods will insert into the graph. + When used within a 'with' statement, this will temporary set the insert point and + then restore it when the with statement exits:: + + with g.inserting_after(n): + ... # inserting after node n + ... # insert point restored to what it was previously + g.inserting_after(n) # set the insert point permanently + + Args: + + n (Optional[Node]): The node before which to insert. If None this will insert after + the beginning of the entire graph. + + Returns: + A resource manager that will restore the insert point on ``__exit__``. + """ + if n is None: + return self.inserting_before(self._root) + assert n.graph == self, "Node to insert after is not in graph." + return _InsertPoint(self, n.append) + + @compatibility(is_backward_compatible=True) + def placeholder(self, name: str, type_expr: Optional[Any] = None, + default_value : Any = inspect.Signature.empty) -> Node: + """ + Insert a ``placeholder`` node into the Graph. A ``placeholder`` represents + a function input. + + Args: + + name (str): A name for the input value. This corresponds to the name + of the positional argument to the function this ``Graph`` represents. + + type_expr (Optional[Any]): an optional type annotation representing the + Python type the output of this node will have. This is needed in some + cases for proper code generation (e.g. when the function is used + subsequently in TorchScript compilation). + + default_value (Any): The default value this function argument should take + on. NOTE: to allow for `None` as a default value, `inspect.Signature.empty` + should be passed as this argument to specify that the parameter does _not_ + have a default value. + + .. note:: + The same insertion point and type expression rules apply for this method + as ``Graph.create_node``. + """ + args = () if default_value is inspect.Signature.empty else (default_value,) + return self.create_node('placeholder', name, args=args, type_expr=type_expr) + + @compatibility(is_backward_compatible=True) + def get_attr(self, qualified_name: str, type_expr: Optional[Any] = None) -> Node: + """ + Insert a ``get_attr`` node into the Graph. A ``get_attr`` ``Node`` represents the + fetch of an attribute from the ``Module`` hierarchy. + + Args: + + qualified_name (str): the fully-qualified name of the attribute to be retrieved. + For example, if the traced Module has a submodule named ``foo``, which has a + submodule named ``bar``, which has an attribute named ``baz``, the qualified + name ``foo.bar.baz`` should be passed as ``qualified_name``. + + type_expr (Optional[Any]): an optional type annotation representing the + Python type the output of this node will have. + + + Returns: + + The newly-created and inserted ``get_attr`` node. + + .. note:: + The same insertion point and type expression rules apply for this method + as ``Graph.create_node``. + """ + def _get_attr_reference_exists(mod: torch.nn.Module, qualified_name: str) -> bool: + module_path, _, name = qualified_name.rpartition(".") + + try: + submod: torch.nn.Module = mod.get_submodule(module_path) + except AttributeError: + warnings.warn(f"Failed to fetch module {module_path}!") + return False + + if not hasattr(submod, name): + return False + + res = getattr(submod, name) + + if (not isinstance(res, torch.nn.Module) + and not isinstance(res, torch.nn.Parameter) + and name not in submod._buffers): + return False + + return True + + if (self.owning_module and + not _get_attr_reference_exists(self.owning_module, qualified_name)): + warnings.warn("Attempted to insert a get_attr Node with no " + "underlying reference in the owning " + "GraphModule! Call " + "GraphModule.add_submodule to add the " + "necessary submodule, " + "GraphModule.add_parameter to add the " + "necessary Parameter, or " + "nn.Module.register_buffer to add the " + "necessary buffer", stacklevel=2) + return self.create_node('get_attr', qualified_name, type_expr=type_expr) + + @compatibility(is_backward_compatible=True) + def call_module(self, + module_name: str, + args: Optional[Tuple['Argument', ...]] = None, + kwargs: Optional[Dict[str, 'Argument']] = None, + type_expr: Optional[Any] = None) -> Node: + """ + Insert a ``call_module`` ``Node`` into the ``Graph``. A ``call_module`` node + represents a call to the forward() function of a ``Module`` in the ``Module`` + hierarchy. + + Args: + + module_name (str): The qualified name of the ``Module`` in the ``Module`` + hierarchy to be called. For example, if the traced ``Module`` has a + submodule named ``foo``, which has a submodule named ``bar``, the + qualified name ``foo.bar`` should be passed as ``module_name`` to + call that module. + + args (Optional[Tuple[Argument, ...]]): The positional arguments to be passed + to the called method. Note that this should *not* include a ``self`` argument. + + kwargs (Optional[Dict[str, Argument]]): The keyword arguments to be passed + to the called method + + type_expr (Optional[Any]): an optional type annotation representing the + Python type the output of this node will have. + + Returns: + + The newly-created and inserted ``call_module`` node. + + .. note:: + The same insertion point and type expression rules apply for this method + as :meth:`Graph.create_node`. + """ + if (self.owning_module and + self.owning_module.get_submodule(module_name) is None): + warnings.warn("Attempted to insert a call_module Node with " + "no underlying reference in the owning " + "GraphModule! Call " + "GraphModule.add_submodule to add the " + "necessary submodule") + return self.create_node('call_module', module_name, args, kwargs, type_expr=type_expr) + + @compatibility(is_backward_compatible=True) + def call_method(self, + method_name: str, + args: Optional[Tuple['Argument', ...]] = None, + kwargs: Optional[Dict[str, 'Argument']] = None, + type_expr: Optional[Any] = None) -> Node: + """ + Insert a ``call_method`` ``Node`` into the ``Graph``. A ``call_method`` node + represents a call to a given method on the 0th element of ``args``. + + Args: + + method_name (str): The name of the method to apply to the self argument. + For example, if args[0] is a ``Node`` representing a ``Tensor``, + then to call ``relu()`` on that ``Tensor``, pass ``relu`` to ``method_name``. + + args (Optional[Tuple[Argument, ...]]): The positional arguments to be passed + to the called method. Note that this *should* include a ``self`` argument. + + kwargs (Optional[Dict[str, Argument]]): The keyword arguments to be passed + to the called method + + type_expr (Optional[Any]): an optional type annotation representing the + Python type the output of this node will have. + + Returns: + + The newly created and inserted ``call_method`` node. + + .. note:: + The same insertion point and type expression rules apply for this method + as :meth:`Graph.create_node`. + """ + return self.create_node('call_method', method_name, args, kwargs, type_expr=type_expr) + + @compatibility(is_backward_compatible=True) + def call_function(self, + the_function: Callable[..., Any], + args: Optional[Tuple['Argument', ...]] = None, + kwargs: Optional[Dict[str, 'Argument']] = None, + type_expr: Optional[Any] = None) -> Node: + """ + Insert a ``call_function`` ``Node`` into the ``Graph``. A ``call_function`` node + represents a call to a Python callable, specified by ``the_function``. + + Args: + + the_function (Callable[..., Any]): The function to be called. Can be any PyTorch + operator, Python function, or member of the ``builtins`` or ``operator`` + namespaces. + + args (Optional[Tuple[Argument, ...]]): The positional arguments to be passed + to the called function. + + kwargs (Optional[Dict[str, Argument]]): The keyword arguments to be passed + to the called function + + type_expr (Optional[Any]): an optional type annotation representing the + Python type the output of this node will have. + + Returns: + + The newly created and inserted ``call_function`` node. + + .. note:: + The same insertion point and type expression rules apply for this method + as :meth:`Graph.create_node`. + """ + return self.create_node('call_function', the_function, args, kwargs, type_expr=type_expr) + + @compatibility(is_backward_compatible=True) + def node_copy(self, node: Node, arg_transform: Callable[[Node], 'Argument'] = lambda x: x) -> Node: + """ + Copy a node from one graph into another. ``arg_transform`` needs to transform arguments from + the graph of node to the graph of self. Example:: + + # Copying all the nodes in `g` into `new_graph` + g : torch.fx.Graph = ... + new_graph = torch.fx.graph() + value_remap = {} + for node in g.nodes: + value_remap[node] = new_graph.node_copy(node, lambda n : value_remap[n]) + + Args: + + node (Node): The node to copy into ``self``. + + arg_transform (Callable[[Node], Argument]): A function that transforms + ``Node`` arguments in node's ``args`` and ``kwargs`` into the + equivalent argument in ``self``. In the simplest case, this should + retrieve a value out of a table mapping Nodes in the original + graph to ``self``. + """ + args = map_arg(node.args, arg_transform) + kwargs = map_arg(node.kwargs, arg_transform) + assert isinstance(args, tuple) + assert isinstance(kwargs, dict) + result_node = self.create_node(node.op, node.target, args, kwargs, node.name, node.type) + result_node.meta = copy.copy(node.meta) + return result_node + + @compatibility(is_backward_compatible=True) + def output(self, result: 'Argument', type_expr: Optional[Any] = None): + """ + Insert an ``output`` ``Node`` into the ``Graph``. An ``output`` node represents + a ``return`` statement in Python code. ``result`` is the value that should + be returned. + + Args: + + result (Argument): The value to be returned. + + type_expr (Optional[Any]): an optional type annotation representing the + Python type the output of this node will have. + + .. note:: + + The same insertion point and type expression rules apply for this method + as ``Graph.create_node``. + """ + return self.create_node(op='output', target='output', args=(result,), type_expr=type_expr) + + def _target_to_str(self, target : Target) -> str: + if callable(target): + op = target.__name__ + else: + assert isinstance(target, str) + op = target + if _is_magic(op): + op = op[2:-2] + op = _snake_case(op) + return op + + @compatibility(is_backward_compatible=True) + def python_code(self, root_module: str, *, verbose: bool = False) -> PythonCode: + """ + Turn this ``Graph`` into valid Python code. + + Args: + + root_module (str): The name of the root module on which to look-up + qualified name targets. This is usually 'self'. + + Returns: + + A PythonCode object, consisting of two fields: + src: the Python source code representing the object + globals: a dictionary of global names in `src` -> the objects that they reference. + """ + # NOTE: [Graph Namespaces] + # + # There are two types of symbols in generated Python source code: + # locals and globals. + # Locals are locally defined by the output of a node in the Graph. + # Globals are references to external objects, like functions or types. + # + # When generating Python code, we need to make sure to name things + # appropriately. In particular: + # - All names should be unique, to avoid weird shadowing bugs. + # - These names need to be consistent, e.g. a object should always be + # referenced by the same name. + # + # To do this, we create a new namespace just for this source. All names + # that get printed must come from this namespace. + # + # Why can't we re-use node.name? Because it was generated within the + # namespace `self._graph_namespace`. In order to provide uniqueness + # over both locals (node.name) *and* globals, we create a completely + # new namespace to put all identifiers in. + namespace = _Namespace() + + # Override Node's repr to generate a valid name within our namespace. + # Since repr() is designed to produce a valid Python expression, it + # makes sense to re-use it. This way, it's easy to print something like + # Tuple[Node, Node] by simply calling repr() on it. Node's __repr__ is + # implemented cooperatively to allow this. + def node_repr(n: Node): + return namespace.create_name(n.name, n) + + @contextmanager + def override_node_repr(graph: Graph): + orig_repr_fns = {} + for node in graph.nodes: + orig_repr_fns[node] = node._repr_fn + node._repr_fn = node_repr + try: + yield None + finally: + # restore the original repr functions + for node in graph.nodes: + node._repr_fn = orig_repr_fns[node] + + with override_node_repr(self): + return self._python_code(root_module, namespace, verbose=verbose) + + def _python_code(self, root_module: str, namespace: _Namespace, *, verbose: bool = False) -> PythonCode: + return self._codegen._gen_python_code(self.nodes, root_module, namespace, verbose=verbose) + + + def __str__(self) -> str: + """ + Return a human-readable (not machine-readable) string representation + of this Graph + """ + placeholder_names : List[str] = [] + # This is a one-element array just so ``format_node`` can modify the closed + # over value + maybe_return_typename : List[str] = [''] + + node_strs = [node.format_node(placeholder_names) for node in self.nodes] + param_str = ', '.join(placeholder_names) + s = f'graph({param_str}){maybe_return_typename[0]}:' + for node_str in node_strs: + if node_str: + s += '\n ' + node_str + return s + + @compatibility(is_backward_compatible=True) + def print_tabular(self): + """ + Prints the intermediate representation of the graph in tabular + format. Note that this API requires the ``tabulate`` module to be + installed. + """ + try: + from tabulate import tabulate + except ImportError: + print("`print_tabular` relies on the library `tabulate`, " + "which could not be found on this machine. Run `pip " + "install tabulate` to install the library.") + raise + + node_specs = [[n.op, n.name, n.target, n.args, n.kwargs] + for n in self.nodes] + print(tabulate(node_specs, + headers=['opcode', 'name', 'target', 'args', 'kwargs'])) + + @compatibility(is_backward_compatible=True) + def lint(self): + """ + Runs various checks on this Graph to make sure it is well-formed. In + particular: + - Checks Nodes have correct ownership (owned by this graph) + - Checks Nodes appear in topological order + - If this Graph has an owning GraphModule, checks that targets + exist in that GraphModule + """ + + # Check topo order + def check_arg(arg : Node, n : Optional[Node] = None) -> None: + context_str = f' of Node \'{n}\' ' if n else ' ' + if arg.graph is not self: + raise RuntimeError(f'Argument \'{arg}\'{context_str}does not belong to this Graph, ' + f'but was used as an argument! If you are copying nodes from another graph, make ' + f'sure to use ``arg_transform`` on node_copy() to remap values\n{self}') + if arg not in seen_values: + raise RuntimeError(f'Argument \'{arg}\'{context_str}was used before it has been ' + f'defined! Please check that Nodes in the graph are topologically ordered\n{self}') + + seen_names : Set[str] = set() + seen_values : Set[Node] = set() + for node in self.nodes: + if node.op not in ['placeholder', 'call_method', 'call_module', 'call_function', 'get_attr', 'output']: + raise RuntimeError(f'Node {node} had unknown opcode {node.op}!') + if node.graph is not self: + raise RuntimeError(f'Node \'{node}\' does not belong to this Graph!') + map_arg(node.args, lambda arg: check_arg(arg, node)) + map_arg(node.kwargs, lambda arg: check_arg(arg, node)) + seen_values.add(node) + + if node.name in seen_names: + raise RuntimeError(f'Node redefined name {node.name}!') + seen_names.add(node.name) + + # Check targets are legit + if self.owning_module: + for node in self.nodes: + if node.op == 'call_function': + if not callable(node.target): + raise ValueError(f'Node {node} target {node.target} has type {torch.typename(node.target)} but ' + 'a Callable is expected') + else: + if not isinstance(node.target, str): + raise ValueError(f'Node {node} target {node.target} has type {torch.typename(node.target)} but ' + 'a str is expected') + if node.op in ['get_attr', 'call_module']: + target_atoms = node.target.split('.') + m_itr = self.owning_module + for i, atom in enumerate(target_atoms): + new_m_itr = getattr(m_itr, atom, None) + seen_qualname = '.'.join(target_atoms[:i]) + if new_m_itr is None: + raise RuntimeError(f'Node {node} target {node.target} references nonexistent attribute ' + f'{atom} of {seen_qualname}') + if (node.op == "call_module" + and not isinstance(new_m_itr, torch.nn.Module)): + raise RuntimeError(f'Node {node} target {node.target} {atom} of {seen_qualname} does ' + 'not reference an nn.Module') + elif (node.op == "get_attr" + and not isinstance(new_m_itr, torch.nn.Module) + and not isinstance(new_m_itr, torch.nn.Parameter) + and atom not in m_itr._buffers): + warnings.warn(f'Node {node} target {node.target} {atom} of {seen_qualname} does ' + 'not reference an nn.Module, nn.Parameter, or buffer, which is ' + 'what \'get_attr\' Nodes typically target') + else: + m_itr = new_m_itr + + @compatibility(is_backward_compatible=True) + def eliminate_dead_code(self): + """ + Remove all dead code from the graph, based on each node's number of + users, and whether the nodes have any side effects. The graph must be + topologically sorted before calling. + + Returns: + bool: Whether the graph was changed as a result of the pass. + + Example: + + Before dead code is eliminated, `a` from `a = x + 1` below has no users + and thus can be eliminated from the graph without having an effect. + + .. code-block:: python + + def forward(self, x): + a = x + 1 + return x + self.attr_1 + + After dead code is eliminated, `a = x + 1` has been removed, and the rest + of `forward` remains. + + .. code-block:: python + + def forward(self, x): + return x + self.attr_1 + + .. warning:: + + Dead code elimination has some heuristics to avoid removing + side-effectful nodes (see Node.is_impure) but in general coverage + is very bad, so you should assume that this method is not sound + to call unless you know that your FX graph consists entirely + of functional operations. + """ + # Lint the graph first to make sure its topologically sorted, otherwise + # DCE below will not behave as expected. + self.lint() + + # Reverse iterate so that when we remove a node, any nodes used as an + # input to that node have an updated user count that no longer reflects + # the removed node. + changed = False + for node in reversed(self.nodes): + if not node.is_impure() and len(node.users) == 0: + self.erase_node(node) + changed = True + + return changed + + @compatibility(is_backward_compatible=False) + def set_codegen(self, codegen: CodeGen): + self._codegen = codegen + + @compatibility(is_backward_compatible=False) + def on_generate_code( + self, + make_transformer: Callable[[Optional[TransformCodeFunc]], TransformCodeFunc] + ): + """Register a transformer function when python code is generated + + Args: + make_transformer (Callable[[Optional[TransformCodeFunc]], TransformCodeFunc]): + a function that returns a code transformer to be registered. + This function is called by `on_generate_code` to obtain the + code transformer. + + This function is also given as its input the currently + registered code transformer (or None if nothing is registered), + in case it is not desirable to overwrite it. This is useful to + chain code transformers together. + + Returns: + a context manager that when used in a `with` statement, to automatically + restore the previously registered code transformer. + + Example: + + .. code-block:: python + + + gm: fx.GraphModule = ... + + # This is a code transformer we want to register. This code + # transformer prepends a pdb import and trace statement at the very + # beginning of the generated torch.fx code to allow for manual + # debugging with the PDB library. + def insert_pdb(body): + return ["import pdb; pdb.set_trace()\\n", *body] + + # Registers `insert_pdb`, and overwrites the current registered + # code transformer (given by `_` to the lambda): + gm.graph.on_generate_code( + lambda _: insert_pdb + ) + + # Or alternatively, registers a code transformer which first + # runs `body` through existing registered transformer, then + # through `insert_pdb`: + gm.graph.on_generate_code( + lambda current_trans: ( + lambda body: insert_pdb( + current_trans(body) if current_trans + else body + ) + ) + ) + + gm.recompile() + gm(*inputs) # drops into pdb + + + This function can also be used as a context manager, with the benefit to + automatically restores the previously registered code transformer: + + .. code-block:: python + + # ... continue from previous example + + with gm.graph.on_generate_code(lambda _: insert_pdb): + # do more stuff with `gm`... + gm.recompile() + gm(*inputs) # drops into pdb + + # now previous code transformer is restored (but `gm`'s code with pdb + # remains - that means you can run `gm` with pdb here too, until you + # run next `recompile()`). + """ + on_gen_code_old = self._codegen._body_transformer + self._codegen._body_transformer = make_transformer(on_gen_code_old) + + @contextlib.contextmanager + def on_generate_code_context_manager(): + try: + yield + finally: + self._codegen._body_transformer = on_gen_code_old + + return on_generate_code_context_manager() + + +reflectable_magic_methods = { + 'add': '{} + {}', + 'sub': '{} - {}', + 'mul': '{} * {}', + 'floordiv': '{} // {}', + 'truediv': '{} / {}', + 'div': '{} / {}', + 'mod': '{} % {}', + 'pow': '{} ** {}', + 'lshift': '{} << {}', + 'rshift': '{} >> {}', + 'and_': '{} & {}', + 'or_': '{} | {}', + 'xor': '{} ^ {}', + 'getitem': '{}[{}]', + 'matmul': '{} @ {}', +} + +magic_methods = dict({ + 'eq': '{} == {}', + 'ne': '{} != {}', + 'lt': '{} < {}', + 'gt': '{} > {}', + 'le': '{} <= {}', + 'ge': '{} >= {}', + 'pos': '+{}', + 'neg': '-{}', + 'invert': '~{}'}, **reflectable_magic_methods) + +inplace_methods = { + 'iadd': '{} += {}', + 'iand': '{} &= {}', + 'ifloordiv': '{} //= {}', + 'ilshift': '{} <<= {}', + 'imod': '{} %= {}', + 'imul': '{} *= {}', + 'imatmul': '{} @= {}', + 'ior': '{} |= {}', + 'ipow': '{} **= {}', + 'irshift': '{} >>= {}', + 'isub': '{} -= {}', + 'itruediv': '{} /= {}', + 'ixor': '{} ^= {}', + 'setitem': '{}[{}] = {}', +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/graph_module.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/graph_module.py new file mode 100644 index 0000000000000000000000000000000000000000..7951faebdbd7bb9a7334d5f9c383bc1edfe5182f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/graph_module.py @@ -0,0 +1,867 @@ +import copy +import itertools +import linecache +import os +import sys +import traceback +import warnings +from pathlib import Path +from typing import Any, Callable, Dict, List, Optional, Set, Type, Union + +import torch +import torch.nn as nn +import torch.overrides +from torch.nn.modules.module import _addindent +from torch.package import Importer, PackageExporter, PackageImporter, sys_importer + +from ._compatibility import compatibility +from .graph import _custom_builtins, _is_from_torch, _PyTreeCodeGen, Graph, PythonCode + +__all__ = [ + "reduce_graph_module", + "reduce_package_graph_module", + "reduce_deploy_graph_module", + "GraphModule", +] + +_USER_PRESERVED_ATTRIBUTES_KEY = "_user_preserved_attributes" + +# Normal exec loses the source code, however we can work with +# the linecache module to recover it. +# Using _exec_with_source will add it to our local cache +# and then tools like TorchScript will be able to get source info. +class _EvalCacheLoader: + def __init__(self): + self.eval_cache = {} + self.next_id = 0 + + def cache(self, src: str, globals: Dict[str, Any], co_fields=None): + """Store the source in a private cache, and add a lazy entry in linecache + that allows the source to be retrieved by 'filename'. + + Args: + src (str): The module source to cache + globals (dict): The module globals + + Returns: + str: The cache key (and dummy filename) generated for src. + """ + + key = self._get_key() + if co_fields: + key += f" from {co_fields['co_filename']}:{co_fields['co_firstlineno']} in {co_fields['co_name']}" + self.eval_cache[key] = src + + # Don't mutate globals so that this loader is only used + # to populate linecache, and doesn't interact with other modules + # that might check `__loader__` + globals_copy = globals.copy() + globals_copy["__file__"] = key + globals_copy["__name__"] = key + globals_copy["__loader__"] = self + linecache.lazycache(key, globals_copy) + + return key + + # Part of the loader protocol (PEP 302) + # linecache will use this method when trying to find source code + def get_source(self, module_name) -> Optional[str]: + if module_name in self.eval_cache: + return self.eval_cache[module_name] + return None + + def _get_key(self): + key = f".{self.next_id}" + self.next_id += 1 + return key + + +_loader = _EvalCacheLoader() + + +def _exec_with_source(src: str, globals: Dict[str, Any], co_fields=None): + key = _loader.cache(src, globals, co_fields) + exec(compile(src, key, "exec"), globals) + + +def _forward_from_src(src: str, globals: Dict[str, Any], co_fields=None): + return _method_from_src( + method_name="forward", src=src, globals=globals, co_fields=co_fields + ) + + +def _method_from_src( + method_name: str, src: str, globals: Dict[str, Any], co_fields=None +) -> Callable: + # avoid mutating the passed in dict + globals_copy = globals.copy() + _exec_with_source(src, globals_copy, co_fields) + fn = globals_copy[method_name] + del globals_copy[method_name] + return fn + + +def _format_import_statement(name: str, obj: Any, importer: Importer) -> str: + if name in _custom_builtins: + return _custom_builtins[name].import_str + if _is_from_torch(name): + return "import torch" + module_name, attr_name = importer.get_name(obj) + return f"from {module_name} import {attr_name} as {name}" + + +def _format_import_block(globals: Dict[str, Any], importer: Importer): + import_strs: Set[str] = set() + for name, obj in globals.items(): + import_strs.add(_format_import_statement(name, obj, importer)) + # Sort the imports so we have a stable import block that allows us to + # hash the graph module and get a consistent key for use in a cache. + return "\n".join(sorted(import_strs)) + + +@compatibility(is_backward_compatible=True) +def reduce_graph_module(body: Dict[Any, Any], import_block: str) -> torch.nn.Module: + # BC: attribute name was changed from `code` to `_code` to facilitate + # making `code` into a property and adding a docstring to it + fn_src = body.get("_code") or body["code"] + forward = _forward_from_src(import_block + fn_src, {}) + return _deserialize_graph_module(forward, body) + + +@compatibility(is_backward_compatible=True) +def reduce_package_graph_module( + importer: PackageImporter, body: Dict[Any, Any], generated_module_name: str +) -> torch.nn.Module: + forward = importer.import_module(generated_module_name).forward + return _deserialize_graph_module(forward, body) + + +@compatibility(is_backward_compatible=True) +def reduce_deploy_graph_module( + importer: PackageImporter, body: Dict[Any, Any], import_block: str +) -> torch.nn.Module: + ns = {} + ns["__builtins__"] = importer.patched_builtins + fn_src = body.get("_code") + assert fn_src is not None + forward = _forward_from_src(import_block + fn_src, ns) + return _deserialize_graph_module(forward, body) + + +# We create a dummy class here because symbolic_trace pulls the forward() +# function off of the class, rather than the instance. This class is used +# in _deserialize_graph_module() below. +class _CodeOnlyModule(torch.nn.Module): + def __init__(self, body): + super().__init__() + self.__dict__ = body + + +def _deserialize_graph_module(forward, body: Dict[Any, Any], graph_module_cls=None) -> torch.nn.Module: + """ + Deserialize a GraphModule given the dictionary of the original module, + using the code to reconstruct the graph. We delete the actual graph before + saving the dictionary so that changes to the in-memory graph format do not + get serialized. + """ + + # Try to retrieve the forward source in a backward-compatible way + _CodeOnlyModule.forward = forward + + tracer_cls = body.get("_tracer_cls") + if tracer_cls is None: + from ._symbolic_trace import Tracer + + tracer_cls = Tracer + + graphmodule_cls_name = body.get("_graphmodule_cls_name", "GraphModule") + + # This is a workaround for a mypy linter issue related to + # passing base class as an argument - https://github.com/python/mypy/issues/5865. + cls_tracer: Any = tracer_cls + + class KeepModules(cls_tracer): + # we shouldn't trace into any of the submodules, + # because they were not traced in the original GraphModule + def is_leaf_module(self, _: torch.nn.Module, __: str) -> bool: + return True + + com = _CodeOnlyModule(body) + + tracer_extras = body.get("_tracer_extras", {}) + graph = KeepModules().trace(com, **tracer_extras) + + # Manually set Tracer class on the reconstructed Graph, to avoid + # referencing the private local subclass KeepModules. + graph._tracer_cls = tracer_cls + if graph_module_cls is None: + graph_module_cls = GraphModule + gm = graph_module_cls(com, graph, class_name=graphmodule_cls_name) + + # The GraphModule constructor only retains attributes referenced by the graph. + # In this case, our goal is return a GraphModule as close to identical as the one + # put into the package. If any additional attributes were present in body, + # we should keep them. + for k, v in body.items(): + if not hasattr(gm, k): + setattr(gm, k, v) + return gm + + +# copy an attribute value with qualified name 'target' from 'from_module' to 'to_module' +# This installs empty Modules where none exist yet if they are subpaths of target +def _copy_attr(from_module: torch.nn.Module, to_module: torch.nn.Module, target: str): + *prefix, field = target.split(".") + for item in prefix: + f = getattr(from_module, item) + t = getattr(to_module, item, None) + if f is t: + # we have already installed one of its parents + # (e.g. target = root.linear.weight, but we have already installed root.linear) + # once we install a parent, we no longer need to copy the children + # since all the needed properties will already be present + return + + if t is None: + t = torch.nn.Module() + setattr(to_module, item, t) + from_module, to_module = f, t + + orig = getattr(from_module, field) + # If it is a tensor and not a parameter attribute of a module, it should be a named buffer. + # So, we register it as a named buffer in the target module. + if isinstance(orig, torch.Tensor) and not isinstance(orig, torch.nn.Parameter): + to_module.register_buffer(field, orig) + else: + setattr(to_module, field, orig) + + +# Assign attribute 'from_obj' to the qualified name 'target' on 'to_module +# This installs empty Modules where none exist yet if they are subpaths of target +def _assign_attr(from_obj: Any, to_module: torch.nn.Module, target: str): + *prefix, field = target.split(".") + for item in prefix: + t = getattr(to_module, item, None) + + if t is None: + t = torch.nn.Module() + setattr(to_module, item, t) + to_module = t + + # If it is a tensor and not a parameter attribute of a module, it should be a named buffer. + # So, we register it as a named buffer in the target module. + if isinstance(from_obj, torch.Tensor) and not isinstance( + from_obj, torch.nn.Parameter + ): + to_module.register_buffer(field, from_obj) + else: + setattr(to_module, field, from_obj) + + +class _WrappedCall: + def __init__(self, cls, cls_call): + self.cls = cls + self.cls_call = cls_call + + # Previously, if an error occurred when valid + # symbolically-traced code was run with an invalid input, the + # user would see the source of the error as coming from + # `File "`, where N is some number. We use + # this function to generate a more informative error message. We + # return the traceback itself, a message explaining that the + # error occurred in a traced Module's generated forward + # function, and five lines of context surrounding the faulty + # line + @staticmethod + def _generate_error_message(frame_summary: traceback.FrameSummary) -> str: + # auxiliary variables (for readability) + err_lineno = frame_summary.lineno + assert err_lineno is not None + line = frame_summary.line + assert line is not None + err_line_len = len(line) + all_src_lines = linecache.getlines(frame_summary.filename) + + # constituent substrings of the error message + tb_repr = traceback.format_exc() + custom_msg = ( + "Call using an FX-traced Module, " + f"line {err_lineno} of the traced Module's " + "generated forward function:" + ) + before_err = "".join(all_src_lines[err_lineno - 2 : err_lineno]) + marker = "~" * err_line_len + "~~~ <--- HERE" + err_and_after_err = "\n".join(all_src_lines[err_lineno : err_lineno + 2]) + + # joined message + return "\n".join([tb_repr, custom_msg, before_err, marker, err_and_after_err]) + + def __call__(self, obj, *args, **kwargs): + try: + if self.cls_call is not None: + return self.cls_call(obj, *args, **kwargs) + else: + return super(self.cls, obj).__call__(*args, **kwargs) # type: ignore[misc] + except Exception as e: + assert e.__traceback__ + topmost_framesummary: traceback.FrameSummary = ( + traceback.StackSummary.extract(traceback.walk_tb(e.__traceback__))[-1] + ) # type: ignore[arg-type] + if "eval_with_key" in topmost_framesummary.filename: + print( + _WrappedCall._generate_error_message(topmost_framesummary), + file=sys.stderr, + ) + raise e.with_traceback(None) # noqa: TRY200 + else: + raise e + + +@compatibility(is_backward_compatible=True) +class GraphModule(torch.nn.Module): + """ + GraphModule is an nn.Module generated from an fx.Graph. Graphmodule has a + ``graph`` attribute, as well as ``code`` and ``forward`` attributes generated + from that ``graph``. + + .. warning:: + + When ``graph`` is reassigned, ``code`` and ``forward`` will be automatically + regenerated. However, if you edit the contents of the ``graph`` without reassigning + the ``graph`` attribute itself, you must call ``recompile()`` to update the generated + code. + """ + + def __new__(cls: "Type[GraphModule]", *args, **kwargs): + # each instance of a graph module needs its own forward method + # so create a new singleton class for each instance. + # it is a subclass of the user-defined class, the only difference + # is an extra layer to install the forward method + + # address issue described at https://github.com/pytorch/pytorch/issues/63883 + # in other words, traverse class hierarchy to fix the redundant class definition problem + for t in cls.__mro__: + c = t.__qualname__.split(".")[-1] + if c != "GraphModuleImpl": + cls = t + break + + class GraphModuleImpl(cls): # type: ignore[misc, valid-type] + pass + + return super().__new__(GraphModuleImpl) + + @compatibility(is_backward_compatible=True) + def __init__( + self, + root: Union[torch.nn.Module, Dict[str, Any]], + graph: Graph, + class_name: str = "GraphModule", + ): + """ + Construct a GraphModule. + + Args: + + root (Union[torch.nn.Module, Dict[str, Any]): + ``root`` can either be an nn.Module instance or a Dict mapping strings to any attribute type. + In the case that ``root`` is a Module, any references to Module-based objects (via qualified + name) in the Graph's Nodes' ``target`` field will be copied over from the respective place + within ``root``'s Module hierarchy into the GraphModule's module hierarchy. + In the case that ``root`` is a dict, the qualified name found in a Node's ``target`` will be + looked up directly in the dict's keys. The object mapped to by the Dict will be copied + over into the appropriate place within the GraphModule's module hierarchy. + + graph (Graph): ``graph`` contains the nodes this GraphModule should use for code generation + + class_name (str): ``name`` denotes the name of this GraphModule for debugging purposes. If it's unset, all + error messages will report as originating from ``GraphModule``. It may be helpful to set this + to ``root``'s original name or a name that makes sense within the context of your transform. + """ + super().__init__() + self.__class__.__name__ = class_name + if isinstance(root, torch.nn.Module): + if hasattr(root, "training"): + self.training = root.training + + # When we pickle/unpickle graph module, we don't want to drop any module or attributes. + if isinstance(root, _CodeOnlyModule): + for k, _ in root.named_children(): + _copy_attr(root, self, k) + + for k, _ in root.named_buffers(): + _copy_attr(root, self, k) + + for k, _ in root.named_parameters(): + _copy_attr(root, self, k) + + for node in graph.nodes: + if node.op in ["get_attr", "call_module"]: + assert isinstance(node.target, str) + _copy_attr(root, self, node.target) + elif isinstance(root, dict): + targets_to_copy = [] + for node in graph.nodes: + if node.op in ["get_attr", "call_module"]: + assert isinstance(node.target, str) + if node.target not in root: + raise RuntimeError( + "Node " + + str(node) + + " referenced target " + + node.target + + " but that target was not provided in ``root``!" + ) + targets_to_copy.append(node.target) + # Sort targets in ascending order of the # of atoms. + # This will ensure that less deeply nested attributes are assigned + # before more deeply nested attributes. For example, foo.bar + # will be assigned before foo.bar.baz. Otherwise, we might assign + # the user-provided ``foo.bar`` and wipe out the previously-assigned + # ``foo.bar.baz`` + targets_to_copy.sort(key=lambda t: t.count(".")) + for target_to_copy in targets_to_copy: + _assign_attr(root[target_to_copy], self, target_to_copy) + else: + raise RuntimeError("Unsupported type " + str(root) + " passed for root!") + + self.graph = graph + + # Store the Tracer class responsible for creating a Graph separately as part of the + # GraphModule state, except when the Tracer is defined in a local namespace. + # Locally defined Tracers are not pickleable. This is needed because torch.package will + # serialize a GraphModule without retaining the Graph, and needs to use the correct Tracer + # to re-create the Graph during deserialization. + self._tracer_cls = None + if ( + self.graph._tracer_cls + and "" not in self.graph._tracer_cls.__qualname__ + ): + self._tracer_cls = self.graph._tracer_cls + + self._tracer_extras = {} + if self.graph._tracer_extras: + self._tracer_extras = self.graph._tracer_extras + + # Dictionary to store metadata + self.meta: Dict[str, Any] = {} + + # TorchScript breaks trying to compile the graph setter because of the + # continued string literal. Issue here: https://github.com/pytorch/pytorch/issues/44842 + # + # Shouldn't be an issue since these methods shouldn't be used in TorchScript anyway + __jit_unused_properties__ = ["graph"] + + @property + def graph(self) -> Graph: + """ + Return the ``Graph`` underlying this ``GraphModule`` + """ + return self._graph + + @graph.setter + def graph(self, g: Graph) -> None: + """ + Set the underlying ``Graph`` for this ``GraphModule``. This will internally + recompile the ``GraphModule`` so that the generated ``forward()`` function + corresponds to ``g`` + """ + assert isinstance(g, Graph), f"Expected a Graph instance, but got {type(g)}" + self._graph = g + g.owning_module = self + self.recompile() + + @compatibility(is_backward_compatible=False) + def to_folder(self, folder: Union[str, os.PathLike], module_name: str = "FxModule"): + """Dumps out module to ``folder`` with ``module_name`` so that it can be + imported with ``from import `` + + Args: + + folder (Union[str, os.PathLike]): The folder to write the code out to + + module_name (str): Top-level name to use for the ``Module`` while + writing out the code + """ + folder = Path(folder) + Path(folder).mkdir(exist_ok=True) + torch.save(self.state_dict(), folder / "state_dict.pt") + tab = " " * 4 + custom_builtins = "\n".join([v.import_str for v in _custom_builtins.values()]) + model_str = f""" +import torch +{custom_builtins} + +from torch.nn import * +class {module_name}(torch.nn.Module): + def __init__(self): + super().__init__() +""" + + def _gen_model_repr(module_name: str, module: torch.nn.Module) -> Optional[str]: + safe_reprs = [ + nn.Linear, + nn.Conv1d, + nn.Conv2d, + nn.Conv3d, + nn.BatchNorm1d, + nn.BatchNorm2d, + nn.BatchNorm3d, + ] + if type(module) in safe_reprs: + return f"{module.__repr__()}" + else: + return None + + blobified_modules = [] + for module_name, module in self.named_children(): + module_str = _gen_model_repr(module_name, module) + if module_str is None: + module_file = folder / f"{module_name}.pt" + torch.save(module, module_file) + blobified_modules.append(module_name) + module_repr = module.__repr__().replace("\r", " ").replace("\n", " ") + module_str = f"torch.load(r'{module_file}') # {module_repr}" + model_str += f"{tab*2}self.{module_name} = {module_str}\n" + + for buffer_name, buffer in self._buffers.items(): + if buffer is None: + continue + model_str += f"{tab*2}self.register_buffer('{buffer_name}', torch.empty({list(buffer.shape)}, dtype={buffer.dtype}))\n" + + for param_name, param in self._parameters.items(): + if param is None: + continue + model_str += f"{tab*2}self.{param_name} = torch.nn.Parameter(torch.empty({list(param.shape)}, dtype={param.dtype}))\n" + + model_str += ( + f"{tab*2}self.load_state_dict(torch.load(r'{folder}/state_dict.pt'))\n" + ) + model_str += f"{_addindent(self.code, 4)}\n" + + module_file = folder / "module.py" + module_file.write_text(model_str) + + init_file = folder / "__init__.py" + init_file.write_text("from .module import *") + + if len(blobified_modules) > 0: + warnings.warn( + "Was not able to save the following children modules as reprs -" + f"saved as pickled files instead: {blobified_modules}" + ) + + @compatibility(is_backward_compatible=True) + def add_submodule(self, target: str, m: torch.nn.Module) -> bool: + """ + Adds the given submodule to ``self``. + + This installs empty Modules where none exist yet if they are + subpaths of ``target``. + + Args: + target: The fully-qualified string name of the new submodule + (See example in ``nn.Module.get_submodule`` for how to + specify a fully-qualified string.) + m: The submodule itself; the actual object we want to + install in the current Module + + Return: + bool: Whether or not the submodule could be inserted. For + this method to return True, each object in the chain + denoted by ``target`` must either a) not exist yet, + or b) reference an ``nn.Module`` (not a parameter or + other attribute) + """ + *prefix, field = target.split(".") + mod: torch.nn.Module = self + + for item in prefix: + + submod = getattr(mod, item, None) + + if submod is None: + submod = torch.nn.Module() + setattr(mod, item, submod) + + if not isinstance(submod, torch.nn.Module): + return False + + mod = submod + + mod.add_module(field, m) + return True + + @compatibility(is_backward_compatible=True) + def delete_submodule(self, target: str) -> bool: + """ + Deletes the given submodule from ``self``. + + The module will not be deleted if ``target`` is not a valid + target. + + Args: + target: The fully-qualified string name of the new submodule + (See example in ``nn.Module.get_submodule`` for how to + specify a fully-qualified string.) + + Returns: + bool: Whether or not the target string referenced a + submodule we want to delete. A return value of ``False`` + means that the ``target`` was not a valid reference to + a submodule. + """ + atoms = target.split(".") + path, target_submod = atoms[:-1], atoms[-1] + mod: torch.nn.Module = self + + # Get the parent module + for item in path: + + if not hasattr(mod, item): + return False + + mod = getattr(mod, item) + + if not isinstance(mod, torch.nn.Module): + return False + + if not hasattr(mod, target_submod): + return False + + if not isinstance(getattr(mod, target_submod), torch.nn.Module): + return False + + delattr(mod, target_submod) + return True + + @compatibility(is_backward_compatible=True) + def delete_all_unused_submodules(self) -> None: + """ + Deletes all unused submodules from ``self``. + + A Module is considered "used" if any one of the following is + true: + 1. It has children that are used + 2. Its forward is called directly via a ``call_module`` node + 3. It has a non-Module attribute that is used from a + ``get_attr`` node + + This method can be called to clean up an ``nn.Module`` without + manually calling ``delete_submodule`` on each unused submodule. + """ + used: List[str] = [] + + for node in self.graph.nodes: + + if node.op == "call_module" or node.op == "get_attr": + + # A list of strings representing the different parts + # of the path. For example, `foo.bar.baz` gives us + # ["foo", "bar", "baz"] + fullpath = node.target.split(".") + + # If we're looking at multiple parts of a path, join + # join them with a dot. Otherwise, return that single + # element without doing anything to it. + def join_fn(x: str, y: str) -> str: + return ".".join([x, y] if y else [x]) + + # Progressively collect all the names of intermediate + # modules. For example, if we have the target + # `foo.bar.baz`, we'll add `foo`, `foo.bar`, and + # `foo.bar.baz` to the list. + for path in itertools.accumulate(fullpath, join_fn): + used.append(path) + + # For a `call_module` node, also register all recursive submodules + # as used + if node.op == "call_module": + try: + submod = self.get_submodule(node.target) + + for submod_name, _ in submod.named_modules(): + if submod_name != "": + used.append(".".join([node.target, submod_name])) + except AttributeError: + # Node referenced nonexistent submodule, don't need to + # worry about GCing anything + pass + + to_delete = [name for name, _ in self.named_modules() if name not in used] + + for name in to_delete: + self.delete_submodule(name) + + @property + def code(self) -> str: + """ + Return the Python code generated from the ``Graph`` underlying this + ``GraphModule``. + """ + if not hasattr(self, "_code"): + raise RuntimeError( + "Code has not been generated! Please report a bug to PyTorch" + ) + return self._code + + @compatibility(is_backward_compatible=True) + def recompile(self) -> PythonCode: + """ + Recompile this GraphModule from its ``graph`` attribute. This should be + called after editing the contained ``graph``, otherwise the generated + code of this ``GraphModule`` will be out of date. + """ + if isinstance(self._graph._codegen, _PyTreeCodeGen): + self._in_spec = self._graph._codegen.pytree_info.in_spec + self._out_spec = self._graph._codegen.pytree_info.out_spec + python_code = self._graph.python_code(root_module="self") + self._code = python_code.src + self._lineno_map = python_code._lineno_map + + cls = type(self) + co_fields = self._graph._co_fields if hasattr(self._graph, "_co_fields") else {} + cls.forward = _forward_from_src(self._code, python_code.globals, co_fields) + + # Determine whether this class explicitly defines a __call__ implementation + # to wrap. If it does, save it in order to have wrapped_call invoke it. + # If it does not, wrapped_call can use a dynamic call to super() instead. + # In most cases, super().__call__ should be torch.nn.Module.__call__. + # We do not want to hold a reference to Module.__call__ here; doing so will + # bypass patching of torch.nn.Module.__call__ done while symbolic tracing. + cls_call = cls.__call__ if "__call__" in vars(cls) else None + + if "_wrapped_call" not in vars(cls): + cls._wrapped_call = _WrappedCall(cls, cls_call) # type: ignore[attr-defined] + + def call_wrapped(self, *args, **kwargs): + return self._wrapped_call(self, *args, **kwargs) + + cls.__call__ = call_wrapped # type: ignore[method-assign] + + return python_code + + # Passing Tracer as argument allows subclasses extending fx.GraphModule + # define their own Tracer (extending fx.Tracer). + def __reduce_deploy__(self, importer: Importer): + dict_without_graph = self.__dict__.copy() + dict_without_graph["_graphmodule_cls_name"] = self.__class__.__name__ + del dict_without_graph["_graph"] + + python_code = self.recompile() + import_block = _format_import_block(python_code.globals, importer) + return (reduce_deploy_graph_module, (dict_without_graph, import_block)) + + def __reduce_package__(self, exporter: PackageExporter): + dict_without_graph = self.__dict__.copy() + dict_without_graph["_graphmodule_cls_name"] = self.__class__.__name__ + del dict_without_graph["_graph"] + + generated_module_name = f"fx-generated._{exporter.get_unique_id()}" + python_code = self.recompile() + import_block = _format_import_block(python_code.globals, exporter.importer) + module_code = import_block + self.code + exporter.save_source_string(generated_module_name, module_code) + return ( + reduce_package_graph_module, + (dict_without_graph, generated_module_name), + ) + + def __reduce__(self): + """ + Serialization of GraphModule. We serialize only the generated code, not + the underlying ``Graph``. This is because ``Graph`` does not have on-disk + backward-compatibility guarantees, whereas Python source code does. + On the deserialization side, we symbolically trace through the generated + code to regenerate the underlying ``Graph`` + """ + dict_without_graph = self.__dict__.copy() + python_code = self.recompile() + import_block = _format_import_block(python_code.globals, sys_importer) + del dict_without_graph["_graph"] + return (reduce_graph_module, (dict_without_graph, import_block)) + + def _deepcopy_init(self): + return GraphModule.__init__ + + # because __reduce__ is defined for serialization, + # we need to define deepcopy otherwise it will call __reduce__ + # and cause symbolic tracing to occur every time we try to copy the object + def __deepcopy__(self, memo): + res = type(self).__new__(type(self)) + memo[id(self)] = res + fake_mod = _CodeOnlyModule(copy.deepcopy(self.__dict__, memo)) + self._deepcopy_init()(res, fake_mod, fake_mod.__dict__["_graph"]) + # hooks are lost during `GraphModule.__init__`, so we need to copy over + # them explicitly, note right now we are only copying state_dict related + # hooks, to reduce bc-related issues, we can copy forward/backward related + # hooks in the future as well if needed + extra_preserved_attrs = [ + "_state_dict_hooks", + "_load_state_dict_pre_hooks", + "_load_state_dict_post_hooks", + ] + for attr in extra_preserved_attrs: + if attr in self.__dict__: + setattr(res, attr, copy.deepcopy(self.__dict__[attr], memo)) + res.meta = copy.deepcopy(getattr(self, "meta", {}), memo) + if _USER_PRESERVED_ATTRIBUTES_KEY in res.meta: + for attr_name, attr in res.meta[_USER_PRESERVED_ATTRIBUTES_KEY].items(): + setattr(res, attr_name, attr) + return res + + def __copy__(self): + res = GraphModule(self, self.graph) + res.meta = getattr(self, "meta", {}) + return res + + @compatibility(is_backward_compatible=False) + def print_readable(self, print_output=True): + """ + Return the Python code generated for current GraphModule and its children GraphModules + """ + verbose_python_code = self._graph.python_code(root_module="self", verbose=True) + module_code = verbose_python_code.src + module_code = module_code.lstrip("\n") + module_code = f"class {self._get_name()}(torch.nn.Module):\n" + module_code + module_code = _addindent(module_code, 4) + + submodule_code_list = [""] + for submodule in self.children(): + if isinstance(submodule, GraphModule): + submodule_code_list.append(submodule.print_readable(print_output=False)) + submodule_code = "\n".join(submodule_code_list) + submodule_code = _addindent(submodule_code, 4) + + output = module_code + submodule_code + if print_output: + print(module_code + submodule_code) + return output + + def __str__(self) -> str: + orig_str = super().__str__() + print_readable_reminder = ( + "# To see more debug info, please use `graph_module.print_readable()`" + ) + return "\n".join([orig_str, self._code, print_readable_reminder]) + + def _replicate_for_data_parallel(self): + new_gm = self.__copy__() + new_gm._is_replica = True + return new_gm + + +# workarounds for issues in __torch_function__ + +# WAR for __torch_function__ not handling tensor lists, +# fix is in https://github.com/pytorch/pytorch/pull/34725 +# orig_cat = torch.cat +# def patched_cat(*args, **kwargs): +# tensors = args[0] +# for t in tensors: +# if isinstance(t, Proxy): +# return t.__torch_function__(patched_cat, (), args, kwargs) +# return orig_cat(*args, **kwargs) +# patched_cat.__module__ = 'torch' +# patched_cat.__name__ = 'cat' +# torch.cat = patched_cat diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/immutable_collections.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/immutable_collections.py new file mode 100644 index 0000000000000000000000000000000000000000..a359335f6ecedaff9eca88b8a1f726573fe2e5b1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/immutable_collections.py @@ -0,0 +1,54 @@ +from typing import Any, Dict, Iterable, List, Tuple + +from ._compatibility import compatibility +from torch.utils._pytree import Context, register_pytree_node + +__all__ = ["immutable_list", "immutable_dict"] + +_help_mutation = """\ +If you are attempting to modify the kwargs or args of a torch.fx.Node object, +instead create a new copy of it and assign the copy to the node: + new_args = ... # copy and mutate args + node.args = new_args +""" + +def _no_mutation(self, *args, **kwargs): + raise NotImplementedError(f"'{type(self).__name__}' object does not support mutation. {_help_mutation}") + +def _create_immutable_container(base, mutable_functions): + container = type('immutable_' + base.__name__, (base,), {}) + for attr in mutable_functions: + setattr(container, attr, _no_mutation) + return container + +immutable_list = _create_immutable_container(list, + ['__delitem__', '__iadd__', '__imul__', '__setitem__', 'append', + 'clear', 'extend', 'insert', 'pop', 'remove']) +immutable_list.__reduce__ = lambda self: (immutable_list, (tuple(iter(self)),)) +immutable_list.__hash__ = lambda self: hash(tuple(self)) + +compatibility(is_backward_compatible=True)(immutable_list) + +immutable_dict = _create_immutable_container(dict, ['__delitem__', '__setitem__', 'clear', 'pop', 'popitem', 'update']) +immutable_dict.__reduce__ = lambda self: (immutable_dict, (iter(self.items()),)) +immutable_dict.__hash__ = lambda self: hash(tuple(self.items())) +compatibility(is_backward_compatible=True)(immutable_dict) + + +# Register immutable collections for PyTree operations + +def _immutable_dict_flatten(d: Dict[Any, Any]) -> Tuple[List[Any], Context]: + return list(d.values()), list(d.keys()) + +def _immutable_dict_unflatten(values: Iterable[Any], context: Context) -> Dict[Any, Any]: + return immutable_dict(dict(zip(context, values))) + +def _immutable_list_flatten(d: List[Any]) -> Tuple[List[Any], Context]: + return d, None + +def _immutable_list_unflatten(values: Iterable[Any], context: Context) -> List[Any]: + return immutable_list(values) + + +register_pytree_node(immutable_dict, _immutable_dict_flatten, _immutable_dict_unflatten) +register_pytree_node(immutable_list, _immutable_list_flatten, _immutable_list_unflatten) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/interpreter.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/interpreter.py new file mode 100644 index 0000000000000000000000000000000000000000..92aead83fa4bea5e22f6c0110b5657fdf3c02480 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/interpreter.py @@ -0,0 +1,505 @@ +from .graph_module import GraphModule +from .graph import Graph +from .node import Argument, Node, Target, map_arg, map_aggregate +from .proxy import Proxy +from ._symbolic_trace import Tracer +from ._compatibility import compatibility +from . import config +import torch.fx.traceback as fx_traceback +import torch +from typing import Any, Dict, Iterator, List, Optional, Tuple, Union +import inspect +from contextlib import contextmanager +from torch.hub import tqdm + +__all__ = ['Interpreter', 'Transformer'] + +@compatibility(is_backward_compatible=True) +class Interpreter: + """ + An Interpreter executes an FX graph Node-by-Node. This pattern + can be useful for many things, including writing code + transformations as well as analysis passes. + + Methods in the Interpreter class can be overridden to customize + the behavior of execution. The map of overrideable methods + in terms of call hierarchy:: + + run() + +-- run_node + +-- placeholder() + +-- get_attr() + +-- call_function() + +-- call_method() + +-- call_module() + +-- output() + + Example: + + Suppose we want to swap all instances of ``torch.neg`` with + ``torch.sigmoid`` and vice versa (including their ``Tensor`` + method equivalents). We could subclass Interpreter like so:: + + class NegSigmSwapInterpreter(Interpreter): + def call_function(self, target : Target, + args : Tuple, kwargs : Dict) -> Any: + if target == torch.sigmoid: + return torch.neg(*args, **kwargs) + return super().call_function(n) + + def call_method(self, target : Target, + args : Tuple, kwargs : Dict) -> Any: + if target == 'neg': + call_self, *args_tail = args + return call_self.sigmoid(*args_tail, **kwargs) + return super().call_method(n) + + def fn(x): + return torch.sigmoid(x).neg() + + gm = torch.fx.symbolic_trace(fn) + input = torch.randn(3, 4) + result = NegSigmSwapInterpreter(gm).run(input) + torch.testing.assert_close(result, torch.neg(input).sigmoid()) + + Args: + module (GraphModule): The module to be executed + garbage_collect_values (bool): Whether to delete values after their last + use within the Module's execution. This ensures optimal memory usage during + execution. This can be disabled to, for example, examine all of the intermediate + values in the execution by looking at the ``Interpreter.env`` attribute. + """ + @compatibility(is_backward_compatible=True) + def __init__(self, module : GraphModule, garbage_collect_values : bool = True): + assert isinstance(module, GraphModule) + self.module = module + self.submodules = dict(self.module.named_modules()) + self.env : Dict[Node, Any] = {} + self.name = "Interpreter" + self.garbage_collect_values = garbage_collect_values + self.extra_traceback = True + + if self.garbage_collect_values: + # Run through reverse nodes and record the first instance of a use + # of a given node. This represents the *last* use of the node in the + # execution order of the program, which we will use to free unused + # values + node_to_last_use : Dict[Node, Node] = {} + self.user_to_last_uses : Dict[Node, List[Node]] = {} + + def register_last_uses(n : Node, user : Node): + if n not in node_to_last_use: + node_to_last_use[n] = user + self.user_to_last_uses.setdefault(user, []).append(n) + + for node in reversed(self.module.graph.nodes): + map_arg(node.args, lambda n: register_last_uses(n, node)) + map_arg(node.kwargs, lambda n: register_last_uses(n, node)) + + @compatibility(is_backward_compatible=True) + def run(self, *args, initial_env : Optional[Dict[Node, Any]] = None, enable_io_processing : bool = True) -> Any: + """ + Run `module` via interpretation and return the result. + + Args: + *args: The arguments to the Module to run, in positional order + initial_env (Optional[Dict[Node, Any]]): An optional starting environment for execution. + This is a dict mapping `Node` to any value. This can be used, for example, to + pre-populate results for certain `Nodes` so as to do only partial evaluation within + the interpreter. + enable_io_processing (bool): If true, we process the inputs and outputs with graph's process_inputs and + process_outputs function first before using them. + + Returns: + Any: The value returned from executing the Module + """ + self.env = initial_env if initial_env is not None else {} + + # Positional function args are consumed left-to-right by + # `placeholder` nodes. Use an iterator to keep track of + # position and extract those values. + if enable_io_processing: + args = self.module.graph.process_inputs(*args) + self.args_iter : Iterator[Any] = iter(args) + pbar = tqdm(total=len(self.module.graph.nodes), + desc=f"{self.name}: {str(list(self.module.graph.nodes)) if config.verbose_progress else ''}", + initial=0, position=0, leave=True, disable=config.disable_progress, delay=0) + + for node in self.module.graph.nodes: + pbar.update(1) + if node in self.env: + # Short circuit if we have this value. This could + # be used, for example, for partial evaluation + # where the caller has pre-populated `env` with + # values for a subset of the program. + continue + + try: + self.env[node] = self.run_node(node) + except Exception as e: + if self.extra_traceback: + msg = f"While executing {node.format_node()}" + msg = f'{e.args[0]}\n\n{msg}' if e.args else str(msg) + msg += f"\nOriginal traceback:\n{node.stack_trace}" + e.args = (msg,) + e.args[1:] + if isinstance(e, KeyError): + raise RuntimeError(*e.args) from e + raise + + if self.garbage_collect_values: + for to_delete in self.user_to_last_uses.get(node, []): + del self.env[to_delete] + + if node.op == 'output': + output_val = self.env[node] + return self.module.graph.process_outputs(output_val) if enable_io_processing else output_val + + @compatibility(is_backward_compatible=True) + def boxed_run(self, args_list): + """ + Run `module` via interpretation and return the result. This uses the "boxed" + calling convention, where you pass a list of arguments, which will be cleared + by the interpreter. This ensures that input tensors are promptly deallocated. + """ + args_iter = iter(args_list) + env = {} + for n in self.module.graph.nodes: + if n.op == "placeholder": + env[n] = next(args_iter) + args_list.clear() + return self.run(initial_env=env) + + @contextmanager + def _set_current_node(self, node): + with fx_traceback.set_current_meta(node): + yield + + @compatibility(is_backward_compatible=True) + def run_node(self, n : Node) -> Any: + """ + Run a specific node ``n`` and return the result. + Calls into placeholder, get_attr, call_function, + call_method, call_module, or output depending + on ``node.op`` + + Args: + n (Node): The Node to execute + + Returns: + Any: The result of executing ``n`` + """ + with self._set_current_node(n): + args, kwargs = self.fetch_args_kwargs_from_env(n) + assert isinstance(args, tuple) + assert isinstance(kwargs, dict) + return getattr(self, n.op)(n.target, args, kwargs) + + # Main Node running APIs + @compatibility(is_backward_compatible=True) + def placeholder(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any: + """ + Execute a ``placeholder`` node. Note that this is stateful: + ``Interpreter`` maintains an internal iterator over + arguments passed to ``run`` and this method returns + next() on that iterator. + + Args: + target (Target): The call target for this node. See + `Node `__ for + details on semantics + args (Tuple): Tuple of positional args for this invocation + kwargs (Dict): Dict of keyword arguments for this invocation + + Returns: + Any: The argument value that was retrieved. + """ + assert isinstance(target, str) + if target.startswith('*'): + # For a starred parameter e.g. `*args`, retrieve all + # remaining values from the args list. + return list(self.args_iter) + else: + try: + return next(self.args_iter) + except StopIteration as si: + if len(args) > 0: + return args[0] + else: + raise RuntimeError(f'Expected positional argument for parameter {target}, but one was not passed in!') from si + + @compatibility(is_backward_compatible=True) + def get_attr(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any: + """ + Execute a ``get_attr`` node. Will retrieve an attribute + value from the ``Module`` hierarchy of ``self.module``. + + Args: + target (Target): The call target for this node. See + `Node `__ for + details on semantics + args (Tuple): Tuple of positional args for this invocation + kwargs (Dict): Dict of keyword arguments for this invocation + + Return: + Any: The value of the attribute that was retrieved + """ + assert isinstance(target, str) + return self.fetch_attr(target) + + @compatibility(is_backward_compatible=True) + def call_function(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any: + """ + Execute a ``call_function`` node and return the result. + + Args: + target (Target): The call target for this node. See + `Node `__ for + details on semantics + args (Tuple): Tuple of positional args for this invocation + kwargs (Dict): Dict of keyword arguments for this invocation + + Return + Any: The value returned by the function invocation + """ + assert not isinstance(target, str) + + # Execute the function and return the result + return target(*args, **kwargs) + + @compatibility(is_backward_compatible=True) + def call_method(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any: + """ + Execute a ``call_method`` node and return the result. + + Args: + target (Target): The call target for this node. See + `Node `__ for + details on semantics + args (Tuple): Tuple of positional args for this invocation + kwargs (Dict): Dict of keyword arguments for this invocation + + Return + Any: The value returned by the method invocation + """ + # args[0] is the `self` object for this method call + self_obj, *args_tail = args + + # Execute the method and return the result + assert isinstance(target, str) + return getattr(self_obj, target)(*args_tail, **kwargs) + + @compatibility(is_backward_compatible=True) + def call_module(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any: + """ + Execute a ``call_module`` node and return the result. + + Args: + target (Target): The call target for this node. See + `Node `__ for + details on semantics + args (Tuple): Tuple of positional args for this invocation + kwargs (Dict): Dict of keyword arguments for this invocation + + Return + Any: The value returned by the module invocation + """ + # Retrieve executed args and kwargs values from the environment + + # Execute the method and return the result + assert isinstance(target, str) + submod = self.fetch_attr(target) + + return submod(*args, **kwargs) + + @compatibility(is_backward_compatible=True) + def output(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any: + """ + Execute an ``output`` node. This really just retrieves + the value referenced by the ``output`` node and returns it. + + Args: + target (Target): The call target for this node. See + `Node `__ for + details on semantics + args (Tuple): Tuple of positional args for this invocation + kwargs (Dict): Dict of keyword arguments for this invocation + + Return: + Any: The return value referenced by the output node + """ + return args[0] + + # Helper methods + @compatibility(is_backward_compatible=True) + def fetch_attr(self, target : str): + """ + Fetch an attribute from the ``Module`` hierarchy of ``self.module``. + + Args: + target (str): The fully-qualified name of the attribute to fetch + + Return: + Any: The value of the attribute. + """ + target_atoms = target.split('.') + attr_itr = self.module + for i, atom in enumerate(target_atoms): + if not hasattr(attr_itr, atom): + raise RuntimeError(f"Node referenced nonexistent target {'.'.join(target_atoms[:i])}") + attr_itr = getattr(attr_itr, atom) + return attr_itr + + @compatibility(is_backward_compatible=True) + def fetch_args_kwargs_from_env(self, n : Node) -> Tuple[Tuple, Dict]: + """ + Fetch the concrete values of ``args`` and ``kwargs`` of node ``n`` + from the current execution environment. + + Args: + n (Node): The node for which ``args`` and ``kwargs`` should be fetched. + + Return: + Tuple[Tuple, Dict]: ``args`` and ``kwargs`` with concrete values for ``n``. + """ + args = self.map_nodes_to_values(n.args, n) + assert isinstance(args, tuple) + kwargs = self.map_nodes_to_values(n.kwargs, n) + assert isinstance(kwargs, dict) + return args, kwargs + + @compatibility(is_backward_compatible=True) + def map_nodes_to_values(self, args : Argument, n : Node) -> Argument: + """ + Recursively descend through ``args`` and look up the concrete value + for each ``Node`` in the current execution environment. + + Args: + args (Argument): Data structure within which to look up concrete values + + n (Node): Node to which ``args`` belongs. This is only used for error reporting. + """ + def load_arg(n_arg : Node) -> Any: + if n_arg not in self.env: + raise RuntimeError(f'Node {n} referenced nonexistent value {n_arg}! Run Graph.lint() ' + f'to diagnose such issues') + return self.env[n_arg] + return map_arg(args, load_arg) + +@compatibility(is_backward_compatible=True) +class Transformer(Interpreter): + """ + ``Transformer`` is a special type of interpreter that produces a + new ``Module``. It exposes a ``transform()`` method that returns + the transformed ``Module``. ``Transformer`` does not require + arguments to run, as ``Interpreter`` does. ``Transformer`` works + entirely symbolically. + + Example: + + Suppose we want to swap all instances of ``torch.neg`` with + ``torch.sigmoid`` and vice versa (including their ``Tensor`` + method equivalents). We could subclass ``Transformer`` like so:: + + class NegSigmSwapXformer(Transformer): + def call_function(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any: + if target == torch.sigmoid: + return torch.neg(*args, **kwargs) + return super().call_function(n) + + def call_method(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any: + if target == 'neg': + call_self, *args_tail = args + return call_self.sigmoid(*args_tail, **kwargs) + return super().call_method(n) + + def fn(x): + return torch.sigmoid(x).neg() + + gm = torch.fx.symbolic_trace(fn) + + transformed : torch.nn.Module = NegSigmSwapXformer(gm).transform() + input = torch.randn(3, 4) + torch.testing.assert_close(transformed(input), torch.neg(input).sigmoid()) + + Args: + module (GraphModule): The ``Module`` to be transformed. + """ + + @compatibility(is_backward_compatible=True) + def __init__(self, module): + super().__init__(module) + self.new_graph = Graph() + self.new_graph.set_codegen(module.graph._codegen) + + class TransformerTracer(Tracer): + def __init__(self, graph: Graph): + super().__init__() + self.graph = graph + self.tensor_attrs: Dict[torch.Tensor, str] = {} # type: ignore[assignment] + + def is_leaf_module(self, _, __) -> bool: + return True + + self.tracer = TransformerTracer(self.new_graph) + self.tracer.root = module + + @compatibility(is_backward_compatible=True) + def placeholder(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Proxy: + """ + Execute a ``placeholder`` node. In ``Transformer``, this is + overridden to insert a new ``placeholder`` into the output + graph. + + Args: + target (Target): The call target for this node. See + `Node `__ for + details on semantics + args (Tuple): Tuple of positional args for this invocation + kwargs (Dict): Dict of keyword arguments for this invocation + """ + assert isinstance(target, str) + default_value = next(iter(args)) if args else inspect.Signature.empty + return Proxy(self.new_graph.placeholder(target, default_value=default_value), self.tracer) + + @compatibility(is_backward_compatible=True) + def get_attr(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Proxy: + """ + Execute a ``get_attr`` node. In ``Transformer``, this is + overridden to insert a new ``get_attr`` node into the output + graph. + + Args: + target (Target): The call target for this node. See + `Node `__ for + details on semantics + args (Tuple): Tuple of positional args for this invocation + kwargs (Dict): Dict of keyword arguments for this invocation + """ + assert isinstance(target, str) + return self.tracer.create_proxy("get_attr", target, args, kwargs) + + @compatibility(is_backward_compatible=True) + def call_module(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any: + # Override so that the leaf module policy from `self.tracer` is respected. + assert isinstance(target, str) + submod = self.fetch_attr(target) + return self.tracer.call_module(submod, submod.forward, args, kwargs) + + @compatibility(is_backward_compatible=True) + def call_function(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any: + # Override so that functions that were wrapped are still wrapped. + return self.tracer.create_proxy('call_function', target, args, kwargs) + + @compatibility(is_backward_compatible=True) + def transform(self) -> GraphModule: + """ + Transform ``self.module`` and return the transformed + ``GraphModule``. + """ + with fx_traceback.preserve_node_meta(): + result = super().run(enable_io_processing=False) + if result is not None: + def strip_proxy(a : Union[Argument, Proxy]) -> Any: + return a.node if isinstance(a, Proxy) else a + self.new_graph.output(map_aggregate(result, strip_proxy)) + return GraphModule(self.module, self.new_graph) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/node.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/node.py new file mode 100644 index 0000000000000000000000000000000000000000..c0df33a380929d727d46cba379b1039203695660 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/node.py @@ -0,0 +1,696 @@ +# Nodes represent a definition of a value in our graph of operators. +from typing import TYPE_CHECKING, Union, Callable, Any, Tuple, List, Optional, Dict, Set +from ._compatibility import compatibility +from .immutable_collections import immutable_dict, immutable_list +import torch +import builtins +import types +import inspect +import warnings +from torch.fx.operator_schemas import normalize_function, normalize_module, ArgsKwargsPair +from .._ops import ops as _ops + +if TYPE_CHECKING: + from .graph import Graph + +__all__ = ['Node', 'map_arg', 'map_aggregate', "has_side_effect"] + +BaseArgumentTypes = Union[str, int, float, bool, complex, torch.dtype, + torch.Tensor, torch.device, torch.memory_format, torch.layout, torch._ops.OpOverload] +base_types = BaseArgumentTypes.__args__ # type: ignore[attr-defined] + +Target = Union[Callable[..., Any], str] + +Argument = Optional[Union[ + Tuple[Any, ...], # actually Argument, but mypy can't represent recursive types + List[Any], # actually Argument + Dict[str, Any], # actually Argument + slice, # Slice[Argument, Argument, Argument], but slice is not a templated type in typing + range, + 'Node', + BaseArgumentTypes +]] + +_side_effectful_functions: Set[Callable] = { + torch._assert, + torch._assert_async, + _ops.aten._assert_async.msg, + _ops.aten.copy_.default, + _ops.aten.sym_constrain_range.default, + _ops.aten.sym_constrain_range_for_size.default, + _ops.profiler._record_function_enter, + _ops.profiler._record_function_enter_new, + _ops.profiler._record_function_exit, + _ops.inductor.accumulate_grad_.default, +} + + +@compatibility(is_backward_compatible=False) +def has_side_effect(fn: Callable) -> None: + _side_effectful_functions.add(fn) + return fn + + +# this is fixed on master, WAR for 1.5 +def _find_module_of_method(orig_method: Callable[..., Any]) -> str: + name = orig_method.__name__ + module = orig_method.__module__ + if module is not None: + return module + for guess in [torch, torch.nn.functional]: + if getattr(guess, name, None) is orig_method: + return guess.__name__ + raise RuntimeError(f'cannot find module for {orig_method}') + +# Borrowed from CPython typing module +# https://github.com/python/cpython/blob/f90dc36c15d7fee0efaf6d39e97be0bdf2683e93/Lib/typing.py#L156 +def _type_repr(obj): + """Return the repr() of an object, special-casing types (internal helper). + If obj is a type, we return a shorter version than the default + type.__repr__, based on the module and qualified name, which is + typically enough to uniquely identify a type. For everything + else, we fall back on repr(obj). + """ + if isinstance(obj, type): + if obj.__module__ == 'builtins': + return obj.__qualname__ + return f'{obj.__module__}.{obj.__qualname__}' + if obj is ...: + return('...') + if isinstance(obj, types.FunctionType): + return obj.__name__ + return repr(obj) + +def _get_qualified_name(func: Callable[..., Any]) -> str: + # things like getattr just appear in builtins + if getattr(builtins, func.__name__, None) is func: + return func.__name__ + # torch.Tensor.{fn} + if (isinstance(func, (types.MethodDescriptorType, types.WrapperDescriptorType)) + and func is getattr(torch.Tensor, func.__name__, None)): + return f"torch.Tensor.{func.__name__}" + name = func.__name__ + if name == "": + # For lambdas, try to get their defining name in the module + try: + name = inspect.getsource(func).split("=")[0].strip() + except Exception as e: + raise RuntimeError("Unable to represent lambda") from e + module = _find_module_of_method(func) + module = module.replace('torch._ops', 'torch.ops') # WAR for bug in how torch.ops assigns module + # Fixup segment_reduce mismatch + if module == "torch" and name == "segment_reduce": + name = "_" + name + return f'{module}.{name}' + +def _format_arg(arg, max_list_len=float('inf')) -> str: + if hasattr(arg, '_custom_fx_repr_fn'): + return arg._custom_fx_repr_fn() + elif isinstance(arg, list): + items = ', '.join(_format_arg(a) for idx, a in enumerate(arg) if idx < max_list_len) + maybe_len = '' if len(arg) < max_list_len + 1 else f', ...[total_len={len(arg)}]' + return f'[{items}{maybe_len}]' + elif isinstance(arg, tuple): + items = ', '.join(_format_arg(a) for idx, a in enumerate(arg) if idx < max_list_len) + maybe_len = '' if len(arg) < max_list_len + 1 else f', ...[total_len={len(arg)}]' + maybe_comma = ',' if len(arg) == 1 else '' + return f'({items}{maybe_comma}{maybe_len})' + elif isinstance(arg, dict): + items_str = ', '.join(f'{k}: {_format_arg(v)}' for k, v in arg.items()) + return f'{{{items_str}}}' + + if isinstance(arg, Node): + return '%' + str(arg) + else: + return str(arg) + +@compatibility(is_backward_compatible=True) +class Node: + """ + ``Node`` is the data structure that represents individual operations within + a ``Graph``. For the most part, Nodes represent callsites to various entities, + such as operators, methods, and Modules (some exceptions include nodes that + specify function inputs and outputs). Each ``Node`` has a function specified + by its ``op`` property. The ``Node`` semantics for each value of ``op`` are as follows: + + - ``placeholder`` represents a function input. The ``name`` attribute specifies the name this value will take on. + ``target`` is similarly the name of the argument. ``args`` holds either: 1) nothing, or 2) a single argument + denoting the default parameter of the function input. ``kwargs`` is don't-care. Placeholders correspond to + the function parameters (e.g. ``x``) in the graph printout. + - ``get_attr`` retrieves a parameter from the module hierarchy. ``name`` is similarly the name the result of the + fetch is assigned to. ``target`` is the fully-qualified name of the parameter's position in the module hierarchy. + ``args`` and ``kwargs`` are don't-care + - ``call_function`` applies a free function to some values. ``name`` is similarly the name of the value to assign + to. ``target`` is the function to be applied. ``args`` and ``kwargs`` represent the arguments to the function, + following the Python calling convention + - ``call_module`` applies a module in the module hierarchy's ``forward()`` method to given arguments. ``name`` is + as previous. ``target`` is the fully-qualified name of the module in the module hierarchy to call. + ``args`` and ``kwargs`` represent the arguments to invoke the module on, *excluding the self argument*. + - ``call_method`` calls a method on a value. ``name`` is as similar. ``target`` is the string name of the method + to apply to the ``self`` argument. ``args`` and ``kwargs`` represent the arguments to invoke the module on, + *including the self argument* + - ``output`` contains the output of the traced function in its ``args[0]`` attribute. This corresponds to the "return" statement + in the Graph printout. + """ + + @compatibility(is_backward_compatible=True) + def __init__(self, graph: 'Graph', name: str, op: str, target: 'Target', + args: Tuple['Argument', ...], kwargs: Dict[str, 'Argument'], + return_type : Optional[Any] = None) -> None: + """ + Instantiate an instance of ``Node``. Note: most often, you want to use the + Graph APIs, i.e. ``Graph.call_module``, ``Graph.call_method``, etc. rather + than instantiating a ``Node`` directly. + + Args: + graph (Graph): The ``Graph`` to which this ``Node`` should belong. + + name (str): The name to which the output of this ``Node`` should be assigned + + op (str): The opcode for this ``Node``. Can be one of 'placeholder', + 'call_method', 'call_module', 'call_function', 'get_attr', + 'output' + + target ('Target'): The target this op should call. See the broader + ``Node`` docstring for more details. + + args (Tuple['Argument']): The args to be passed to ``target`` + + kwargs (Dict[str, 'Argument']): The kwargs to be passed to ``target`` + + return_type (Optional[Any]): The python type expression representing the + type of the output of this node. This field can be used for + annotation of values in the generated code or for other types + of analyses. + """ + self.graph = graph + self.name = name # unique name of value being created + assert op in ['placeholder', 'call_method', 'call_module', 'call_function', 'get_attr', 'output', 'root'] + self.op = op # the kind of operation = placeholder|call_method|call_module|call_function|get_attr + if op == 'call_function': + if not callable(target): + raise ValueError(f'Node [graph = {graph}, name = \'{name}\'] target {target} has type {torch.typename(target)} ' + 'but a Callable is expected') + else: + if not isinstance(target, str): + raise ValueError(f'Node [graph = {graph}, name = \'{name}\'] target {target} has type {torch.typename(target)} ' + 'but a str is expected') + self.target = target # for method/module/function, the name of the method/module/function/attr + # being invoked, e.g add, layer1, or torch.add + + # All `Node`-valued inputs. Key is the Node, value is don't-care. + # The public API for this is `all_input_nodes`, this private attribute + # should not be accessed directly. + self._input_nodes : Dict[Node, None] = {} + self.__update_args_kwargs(map_arg(args, lambda x: x), map_arg(kwargs, lambda x: x)) # type: ignore[arg-type] + + # All of the nodes that use the value produced by this Node + # Note one user may correspond to several uses, e.g. the node fo ``x + x`` + # would appear once here, but represents two uses. + # + # Is a dict to act as an "ordered set". Keys are significant, value dont-care + self.users : Dict[Node, None] = {} + # Type expression representing the output value of this node. + # This should contain the same class of Type objects that would appear + # as type annotations for function inputs/outputs. + # + # For placeholder nodes, this value will be used to type-annotate the + # generated function parameters. + # For the return node, this value will be used to type-annotate the + # generated function return type. (Note this is a special case. ``return`` + # does not produce a value, it's more of a notation. Thus, this value + # describes the type of args[0] in the ``return`` node. + self.type : Optional[Any] = return_type + self._prev = self + self._next = self + self._erased = False + + # If set, use this fn to print this node + self._repr_fn : Optional[Callable[[Node], str]] = None + + # Dictionary to store metadata passes need to do their + # transformations. This metadata is preserved across node copies + self.meta : Dict[str, Any] = {} + + @property + def next(self) -> 'Node': + """ + Returns the next ``Node`` in the linked list of Nodes. + + Returns: + + The next ``Node`` in the linked list of Nodes. + """ + return self._next + + @property + def prev(self) -> 'Node': + """ + Returns the previous ``Node`` in the linked list of Nodes. + + Returns: + + The previous ``Node`` in the linked list of Nodes. + """ + return self._prev + + @compatibility(is_backward_compatible=True) + def prepend(self, x: 'Node') -> None: + """ + Insert x before this node in the list of nodes in the graph. Example:: + + Before: p -> self + bx -> x -> ax + After: p -> x -> self + bx -> ax + + Args: + x (Node): The node to put before this node. Must be a member of the same graph. + """ + assert self.graph == x.graph, "Attempting to move a Node into a different Graph" + if self == x: + warnings.warn("Trying to prepend a node to itself. This behavior has no effect on the graph.") + return + x._remove_from_list() + p = self._prev + p._next, x._prev = x, p + x._next, self._prev = self, x + + @compatibility(is_backward_compatible=True) + def append(self, x: 'Node') -> None: + """ + Insert ``x`` after this node in the list of nodes in the graph. + Equivalent to ``self.next.prepend(x)`` + + Args: + x (Node): The node to put after this node. Must be a member of the same graph. + """ + self._next.prepend(x) + + def _remove_from_list(self): + p, n = self._prev, self._next + p._next, n._prev = n, p + + @property + def args(self) -> Tuple[Argument, ...]: + """ + The tuple of arguments to this ``Node``. The interpretation of arguments + depends on the node's opcode. See the :class:`Node` docstring for more + information. + + Assignment to this property is allowed. All accounting of uses and users + is updated automatically on assignment. + """ + return self._args + + @args.setter + def args(self, a : Tuple[Argument, ...]): + """ + Set the tuple of arguments to this Node. The interpretation of arguments + depends on the node's opcode. See the ``fx.Graph`` docstring for more + information. + """ + # DO NOT CALL `__update_args_kwargs` directly. The correct way to + # set `args` is via direct assignment, i.e. `node.args = new_args` + self.__update_args_kwargs(map_arg(a, lambda x: x), self._kwargs) # type: ignore[arg-type] + + @property + def kwargs(self) -> Dict[str, Argument]: + """ + The dict of keyword arguments to this ``Node``. The interpretation of arguments + depends on the node's opcode. See the :class:`Node` docstring for more + information. + + Assignment to this property is allowed. All accounting of uses and users + is updated automatically on assignment. + """ + return self._kwargs + + @kwargs.setter + def kwargs(self, k : Dict[str, Argument]): + """ + Set the dict of kwargs to this Node. The interpretation of arguments + depends on the node's opcode. See the ``fx.Graph`` docstring for more + information. + """ + # DO NOT CALL `__update_args_kwargs` directly. The correct way to + # set `args` is via direct assignment, i.e. `node.kwargs = new_kwargs` + self.__update_args_kwargs(self._args, map_arg(k, lambda x: x)) # type: ignore[arg-type] + + @property + def all_input_nodes(self) -> List['Node']: + """ + Return all Nodes that are inputs to this Node. This is equivalent to + iterating over ``args`` and ``kwargs`` and only collecting the values that + are Nodes. + + Returns: + + List of ``Nodes`` that appear in the ``args`` and ``kwargs`` of this + ``Node``, in that order. + """ + return list(self._input_nodes.keys()) + + @compatibility(is_backward_compatible=True) + def update_arg(self, idx : int, arg : Argument) -> None: + """ + Update an existing positional argument to contain the new value + ``arg``. After calling, ``self.args[idx] == arg``. + + Args: + + idx (int): The index into ``self.args`` of the element to update + arg (Argument): The new argument value to write into ``args`` + """ + args = list(self.args) + args[idx] = arg + self.args = tuple(args) + + @compatibility(is_backward_compatible=True) + def insert_arg(self, idx : int, arg : Argument) -> None: + """ + Insert an positional argument to the argument list with given index. + + Args: + + idx (int): The index of the element in ``self.args`` to be inserted before. + arg (Argument): The new argument value to insert into ``args`` + """ + assert 0 <= idx <= len(self.args), "insert_args index must be between 0 and len(self.args)" + args_left = self.args[:idx] + args_right = self.args[idx:] + + self._args = args_left + (arg,) + args_right + + _new_input_nodes = {} + map_arg(arg, _new_input_nodes.setdefault) + + for new_use in _new_input_nodes.keys(): + if new_use not in self._input_nodes: + self._input_nodes.setdefault(new_use) + new_use.users.setdefault(self) + + @compatibility(is_backward_compatible=True) + def update_kwarg(self, key : str, arg : Argument) -> None: + """ + Update an existing keyword argument to contain the new value + ``arg``. After calling, ``self.kwargs[key] == arg``. + + Args: + + key (str): The key in ``self.kwargs`` of the element to update + arg (Argument): The new argument value to write into ``kwargs`` + """ + kwargs = dict(self.kwargs) + kwargs[key] = arg + self.kwargs = kwargs + + @property + def stack_trace(self) -> Optional[str]: + """ + Return the Python stack trace that was recorded during tracing, if any. + When traced with fx.Tracer, this property is usually populated by + `Tracer.create_proxy`. To record stack traces during tracing for debug purposes, + set `record_stack_traces = True` on the `Tracer` instance. + When traced with dynamo, this property will be populated by default by + `OutputGraph.create_proxy`. + + stack_trace would have the innermost frame at the end of the string. + """ + return self.meta.get("stack_trace", None) + + @stack_trace.setter + def stack_trace(self, trace : Optional[str]): + self.meta["stack_trace"] = trace + + def __update_args_kwargs(self, new_args : Tuple['Argument', ...], new_kwargs : Dict[str, 'Argument']): + """ + This API is internal. Do *not* call it directly. + """ + self._args = new_args + self._kwargs = new_kwargs + + for old_use in self._input_nodes.keys(): + old_use.users.pop(self) + + self._input_nodes = {} + map_arg(self._args, self._input_nodes.setdefault) + map_arg(self._kwargs, self._input_nodes.setdefault) + + for new_use in self._input_nodes.keys(): + new_use.users.setdefault(self) + + def __repr__(self) -> str: + if self._repr_fn: + return self._repr_fn(self) + return self.name + + def _pretty_print_target(self, target): + """ + Make target printouts more user-friendly. + 1) builtins will be printed as `builtins.xyz` + 2) operators will be printed as `operator.xyz` + 3) other callables will be printed with qualified name, e.g. torch.add + """ + if isinstance(target, str): + return target + if hasattr(target, '__module__'): + if not hasattr(target, '__name__'): + # Just to be defensive, if we don't have `__name__`, get the + # qualname. Not sure if this happens for any members of `operator` + # or `builtins`. This fallback path is not as good, since e.g. + # things in `operator` have `_operator` as their __module__. + return _get_qualified_name(target) + if target.__module__ == 'builtins': + return f'builtins.{target.__name__}' + elif target.__module__ == '_operator': + return f'operator.{target.__name__}' + return _get_qualified_name(target) + + @compatibility(is_backward_compatible=True) + def format_node(self, + placeholder_names: Optional[List[str]] = None, + maybe_return_typename: Optional[List[str]] = None) -> Optional[str]: + """ + Return a descriptive string representation of ``self``. + + This method can be used with no arguments as a debugging + utility. + + This function is also used internally in the ``__str__`` method + of ``Graph``. Together, the strings in ``placeholder_names`` + and ``maybe_return_typename`` make up the signature of the + autogenerated ``forward`` function in this Graph's surrounding + GraphModule. ``placeholder_names`` and ``maybe_return_typename`` + should not be used otherwise. + + Args: + placeholder_names: A list that will store formatted strings + representing the placeholders in the generated + ``forward`` function. Internal use only. + maybe_return_typename: A single-element list that will store + a formatted string representing the output of the + generated ``forward`` function. Internal use only. + + Returns: + str: If 1) we're using ``format_node`` as an internal helper + in the ``__str__`` method of ``Graph``, and 2) ``self`` + is a placeholder Node, return ``None``. Otherwise, + return a descriptive string representation of the + current Node. + """ + if self.op == 'placeholder': + assert isinstance(self.target, str) + arg_str = self.target + arg_str += arg_str + f': {_type_repr(self.type)}' if self.type else '' + if placeholder_names: + placeholder_names.append(arg_str) + return None + maybe_typename = f'{_type_repr(self.type)} ' if self.type else '' + default_val = '(default=' + str(self.args[0]) + ')' if self.args else '' + return f'%{self.name} : {maybe_typename}[num_users={len(self.users)}] = {self.op}[target={self.target}]{default_val}' + elif self.op == 'get_attr': + maybe_typename = f'{_type_repr(self.type)} ' if self.type is not None else '' + return f'%{self.name} : {maybe_typename}[num_users={len(self.users)}] = ' \ + f'{self.op}[target={self._pretty_print_target(self.target)}]' + elif self.op == 'output': + if self.type and maybe_return_typename: + maybe_return_typename[0] = f' -> {_type_repr(self.type)}' + return f'return {self.args[0]}' + else: + maybe_typename = f'{_type_repr(self.type)} ' if self.type is not None else '' + return f'%{self.name} : {maybe_typename}[num_users={len(self.users)}] = ' \ + f'{self.op}[target={self._pretty_print_target(self.target)}](' \ + f'args = {_format_arg(self.args)}, kwargs = {_format_arg(self.kwargs)})' + + @compatibility(is_backward_compatible=True) + def replace_all_uses_with(self, + replace_with : 'Node', + delete_user_cb: Callable[['Node'], bool] = lambda user: True, + *, + propagate_meta=False + ) -> List['Node']: + """ + Replace all uses of ``self`` in the Graph with the Node ``replace_with``. + + Args: + + replace_with (Node): The node to replace all uses of ``self`` with. + delete_user_cb (Callable): Callback that is called to determine + whether a given user of the self node should be removed. + propagate_meta (bool): Whether or not to copy all properties + on the .meta field of the original node onto the replacement node. + For safety, this is only valid to do if the replacement node + doesn't already have an existing .meta field. + + Returns: + + The list of Nodes on which this change was made. + """ + if propagate_meta: + assert len(replace_with.meta) == 0, \ + 'Called node.replace_all_uses_with(replace_with, propagate_meta=True), ' \ + 'but replace_with already has .meta keys' + for k, v in self.meta.items(): + replace_with.meta[k] = v + to_process = list(self.users) + skipped = [] + for use_node in to_process: + if not delete_user_cb(use_node): + skipped.append(use_node) + continue + + def maybe_replace_node(n : Node) -> Node: + if n == self: + return replace_with + else: + return n + + new_args = map_arg(use_node.args, maybe_replace_node) + new_kwargs = map_arg(use_node.kwargs, maybe_replace_node) + assert isinstance(new_args, tuple) + assert isinstance(new_kwargs, dict) + use_node.__update_args_kwargs(new_args, new_kwargs) + + assert len(self.users) - len(skipped) == 0 + return [n for n in to_process if n not in skipped] + + @compatibility(is_backward_compatible=False) + def is_impure(self): + """ + Returns whether this op is impure, i.e. if its op is a placeholder or + output, or if a call_function or call_module which is impure. + + Returns: + + bool: If the op is impure or not. + """ + if self.op in {"placeholder", "output"}: + return True + + # Check if an impure function. + if self.op == "call_function": + return self.target in _side_effectful_functions + + # Check if an impure module. + if self.op == "call_module": + assert ( + self.graph.owning_module is not None + ), "self.graph.owning_module not set for purity check" + target_mod = self.graph.owning_module.get_submodule(self.target) + assert ( + target_mod is not None + ), f"Did not find expected submodule target {self.target}" + return getattr(target_mod, "_is_impure", False) + + return False + + @compatibility(is_backward_compatible=False) + def normalized_arguments( + self, root : torch.nn.Module, arg_types : Optional[Tuple[Any]] = None, + kwarg_types : Optional[Dict[str, Any]] = None, + normalize_to_only_use_kwargs : bool = False) -> Optional[ArgsKwargsPair]: + """ + Returns normalized arguments to Python targets. This means that + `args/kwargs` will be matched up to the module/functional's + signature and return exclusively kwargs in positional order + if `normalize_to_only_use_kwargs` is true. + Also populates default values. Does not support positional-only + parameters or varargs parameters. + + Supports module calls. + + May require `arg_types` and `kwarg_types` in order to disambiguate overloads. + + Args: + root (torch.nn.Module): Module upon which to resolve module targets. + arg_types (Optional[Tuple[Any]]): Tuple of arg types for the args + kwarg_types (Optional[Dict[str, Any]]): Dict of arg types for the kwargs + normalize_to_only_use_kwargs (bool): Whether to normalize to only use kwargs. + + Returns: + + Returns NamedTuple ArgsKwargsPair, or `None` if not successful. + """ + if self.op == 'call_function': + assert callable(self.target) + return normalize_function(self.target, self.args, self.kwargs, arg_types, kwarg_types) # type: ignore[arg-type] + elif self.op == 'call_module': + assert isinstance(self.target, str) + return normalize_module(root, self.target, self.args, self.kwargs) # type: ignore[arg-type] + + return None + + @compatibility(is_backward_compatible=True) + def replace_input_with(self, old_input: 'Node', new_input: 'Node'): + """ + Loop through input nodes of ``self``, and replace all instances of + ``old_input`` with ``new_input``. + + Args: + + old_input (Node): The old input node to be replaced. + new_input (Node): The new input node to replace ``old_input``. + """ + def maybe_replace_node(n : Node) -> Node: + return new_input if n == old_input else n + + new_args = map_arg(self.args, maybe_replace_node) + new_kwargs = map_arg(self.kwargs, maybe_replace_node) + assert isinstance(new_args, tuple) + assert isinstance(new_kwargs, dict) + self.__update_args_kwargs(new_args, new_kwargs) + + def _rename(self, candidate: str): + if candidate == self.name: + return + name = self.graph._graph_namespace.create_name(candidate, None) + self.name = name + self.graph._graph_namespace._rename_object(self, name) + + +@compatibility(is_backward_compatible=True) +def map_arg(a: Argument, fn: Callable[[Node], Argument]) -> Argument: + """ + Apply fn to each Node appearing arg. arg may be a list, tuple, slice, or dict with string keys. + """ + assert callable(fn), "torch.fx.map_arg(a, fn): fn must be a callable" + return map_aggregate(a, lambda x: fn(x) if isinstance(x, Node) else x) + +@compatibility(is_backward_compatible=True) +def map_aggregate(a: Argument, fn: Callable[[Argument], Argument]) -> Argument: + """ + Apply fn to each Node appearing arg. arg may be a list, tuple, slice, or dict with string keys. + """ + if isinstance(a, tuple): + t = tuple(map_aggregate(elem, fn) for elem in a) + # Support NamedTuple (if it has `_fields`) by repacking into original type. + return t if not hasattr(a, '_fields') else type(a)(*t) + elif isinstance(a, list): + return immutable_list(map_aggregate(elem, fn) for elem in a) + elif isinstance(a, dict): + return immutable_dict((k, map_aggregate(v, fn)) for k, v in a.items()) + elif isinstance(a, slice): + return slice(map_aggregate(a.start, fn), map_aggregate(a.stop, fn), map_aggregate(a.step, fn)) + else: + return fn(a) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/operator_schemas.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/operator_schemas.py new file mode 100644 index 0000000000000000000000000000000000000000..3e796ba65fc7813f4c210d7a196fe53f89b1e286 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/operator_schemas.py @@ -0,0 +1,440 @@ +import torch +import inspect +import numbers +import types +import typing +import enum +import warnings +from typing import Any, Callable, Dict, List, Optional, Tuple, NamedTuple, cast, TYPE_CHECKING +from torch._jit_internal import boolean_dispatched +from ._compatibility import compatibility +from torch._ops import OpOverloadPacket, OpOverload + +if TYPE_CHECKING: + from .node import Argument + +__all__ = ["ArgsKwargsPair", "check_for_mutable_operation", "get_signature_for_torch_op", "create_type_hint", + "type_matches", "normalize_function", "normalize_module"] + +@compatibility(is_backward_compatible=False) +class ArgsKwargsPair(NamedTuple): + """ + Simple named tuple for wrapping args/kwargs pairs. + """ + args: Tuple[Any, ...] + kwargs: Dict[str, Any] + +_manual_overrides : Dict[Callable, List[inspect.Signature]] = {} + +def _nonzero_schemas(): + signatures = [] + + def nonzero(self): + pass + signatures.append(inspect.signature(nonzero)) + + def nonzero(self, *, as_tuple : bool): # type: ignore[no-redef] + pass + signatures.append(inspect.signature(nonzero)) + + return signatures + +_manual_overrides[torch.nonzero] = _nonzero_schemas() + +class _FakeGlobalNamespace: + def __getattr__(self, name): + if name == 'torch': + return torch + raise RuntimeError('Expected a torch namespace lookup') + +_type_eval_globals = {'Tensor' : torch.Tensor, 'Device' : torch.device, 'Layout' : torch.layout, + 'number' : numbers.Number, 'Future' : torch.jit.Future, + 'AnyEnumType' : enum.Enum, 'QScheme' : torch.qscheme, + '__torch__': _FakeGlobalNamespace(), 'NoneType': type(None), + 't': typing.TypeVar('t')} +for k in dir(typing): + _type_eval_globals[k] = getattr(typing, k) + +def _torchscript_type_to_python_type(ts_type : 'torch._C.JitType') -> Any: + """ + Convert a TorchScript type to a Python type (including subtypes) via + eval'ing the annotation_str. _type_eval_globals sets up expressions + like "List" and "Future" to map to actual types (typing.List and jit.Future) + """ + return eval(ts_type.annotation_str, _type_eval_globals) + +def _torchscript_schema_to_signature_impl(ts_schema : torch._C.FunctionSchema) -> inspect.Signature: + from inspect import Parameter + parameters : List[Parameter] = [] + for arg in ts_schema.arguments: + arg_type = _torchscript_type_to_python_type(arg.type) + default = arg.default_value if arg.has_default_value() else Parameter.empty + # TODO: Figure out if this is safe. It seems like when generating the type signatures for + # PythonArgParser, we emit signatures with `input` instead of `self` as the first tensor + # argument name. Downstream, if someone converts that positional argument to a keyword + # argument, the name mismatch will break things, so here we're going to normalize the + # name to "input" + name = arg.name if arg.name != 'self' else 'input' + kind = Parameter.KEYWORD_ONLY if arg.kwarg_only else Parameter.POSITIONAL_OR_KEYWORD + # "from" is a keyword therefore it must be a POSITIONAL_ONLY argument + if name == "from": + assert kind == Parameter.POSITIONAL_OR_KEYWORD + # ParameterKind type is internal implementation detail to inspec package + # which makes it hard to do type annotation + kind = Parameter.POSITIONAL_ONLY # type: ignore[assignment] + # This renders all previous arguments to positional only + for idx, p in enumerate(parameters): + assert p.kind == Parameter.POSITIONAL_OR_KEYWORD + parameters[idx] = Parameter(name=p.name, kind=Parameter.POSITIONAL_ONLY, default=p.default, annotation=p.annotation) + parameters.append(Parameter(name=name, kind=kind, default=default, annotation=arg_type)) + return_types = [_torchscript_type_to_python_type(ret.type) for ret in ts_schema.returns] + if len(return_types) == 0: + return_type = None + elif len(return_types) == 1: + return_type = return_types[0] + else: + return_type = tuple(return_types) + + return inspect.Signature(parameters, return_annotation=return_type) + +_SCHEMA_TO_SIGNATURE_CACHE : Dict[Tuple[str, str], inspect.Signature] = {} + +def _torchscript_schema_to_signature(ts_schema : torch._C.FunctionSchema) -> inspect.Signature: + # Cached as it's called in the hot path of FakeTensor dispatch + cache_key = ts_schema.name, ts_schema.overload_name + cache_val = _SCHEMA_TO_SIGNATURE_CACHE.get(cache_key) + if cache_val is not None: + return cache_val + + res = _torchscript_schema_to_signature_impl(ts_schema) + _SCHEMA_TO_SIGNATURE_CACHE[cache_key] = res + return res + +@compatibility(is_backward_compatible=False) +def check_for_mutable_operation(target : Callable, args : Tuple['Argument', ...], kwargs : Dict[str, 'Argument']): + signatures, schemas = get_signature_for_torch_op(target, return_schemas=True) + + if signatures and schemas: + matched_schemas = [] + + # Iterate through all of the schema until we find one that matches + # If one matches, populate `new_args_and_kwargs` with the new args/kwargs + # values. If none matches, `new_args_and_kwargs` will be None + for candidate_signature, schema in zip(signatures, schemas): + try: + candidate_signature.bind(*args, **kwargs) + matched_schemas.append((candidate_signature, schema)) + except TypeError as e: + continue + + def throw_if_mutable(schema): + if schema.is_mutable: + raise RuntimeError(f'Tried to trace mutable operation {schema}. FX only supports functional ' + f'code, so operations that mutate operands in-place (e.g. via `out` arguments) ' + f'are not supported') + + if len(matched_schemas) == 0: + # Did not match any schema. Cannot check for mutation + pass + elif len(matched_schemas) == 1: + # Matched exactly one schema, unambiguous + _, schema_to_check = matched_schemas[0] + throw_if_mutable(schema_to_check) + pass + else: + # Ambiguous schema match. Since mutability checking is best effort, + # do nothing. + pass + +@compatibility(is_backward_compatible=False) +def get_signature_for_torch_op(op : Callable, return_schemas : bool = False): + """ + Given an operator on the `torch` namespace, return a list of `inspect.Signature` + objects corresponding to the overloads of that op.. May return `None` if a signature + could not be retrieved. + + Args: + op (Callable): An operator on the `torch` namespace to look up a signature for + + Returns: + Optional[List[inspect.Signature]]: A list of signatures for the overloads of this + operator, or None if the operator signatures could not be retrieved. If + return_schemas=True, returns a tuple containing the optional Python signatures + and the optional TorchScript Function signature + """ + if isinstance(op, OpOverload): + schemas = [op._schema] + elif isinstance(op, OpOverloadPacket): + schemas = [getattr(op, overload)._schema for overload in op.overloads()] + else: + override = _manual_overrides.get(op) + if override: + return (override, None) if return_schemas else None + + aten_fn = torch.jit._builtins._find_builtin(op) + + if aten_fn is None: + return (None, None) if return_schemas else None + schemas = torch._C._jit_get_schemas_for_operator(aten_fn) + + signatures = [_torchscript_schema_to_signature(schema) for schema in schemas] + return (signatures, schemas) if return_schemas else signatures + +@compatibility(is_backward_compatible=False) +def create_type_hint(x): + try: + if isinstance(x, (list, tuple)): + # todo(chilli): Figure out the right way for mypy to handle this + if isinstance(x, list): + def ret_type(x): + return List[x] # type: ignore[valid-type] + else: + def ret_type(x): + return Tuple[x, ...] + if len(x) == 0: + return ret_type(Any) + base_type = x[0] + for t in x: + if issubclass(t, base_type): + continue + elif issubclass(base_type, t): + base_type = t + else: + return ret_type(Any) + return ret_type(base_type) + except Exception as e: + # We tried to create a type hint for list but failed. + warnings.warn(f"We were not able to successfully create type hint from the type {x}") + pass + return x + +@compatibility(is_backward_compatible=False) +def type_matches(signature_type : Any, argument_type : Any): + sig_origin_type = getattr(signature_type, '__origin__', signature_type) + + if signature_type is argument_type: + return True + + # Union types in signature. Given type needs to match one of the + # contained types in the Union + if sig_origin_type is typing.Union and signature_type != argument_type: + sig_contained = signature_type.__args__ + return any(type_matches(c, argument_type) for c in sig_contained) + + if signature_type is List[int] and argument_type is int: + # int can be promoted to List[int] + return True + + if getattr(signature_type, '__origin__', None) in {list, List}: + sig_el_type = signature_type.__args__[0] + if not inspect.isclass(sig_el_type): + warnings.warn( + f"Does not support nested parametric types, got {signature_type}. Please file a bug.") + return False + if getattr(argument_type, '__origin__', None) in {list, List}: + return issubclass(argument_type.__args__[0], sig_el_type) + + def is_homogeneous_tuple(t): + if getattr(t, "__origin__", None) not in {tuple, Tuple}: + return False + contained = t.__args__ + if t.__args__ == ((),): # Tuple[()].__args__ == ((),) for some reason + return True + return all((c is Ellipsis) or issubclass(c, sig_el_type) for c in contained) + + # Tuple[T] is accepted for List[T] parameters + return is_homogeneous_tuple(argument_type) + + # Dtype is an int in schemas + if signature_type is int and argument_type is torch.dtype: + return True + + if signature_type is numbers.Number and argument_type in {int, float}: + return True + if inspect.isclass(argument_type) and inspect.isclass(signature_type): + return issubclass(argument_type, signature_type) + + return False + +@compatibility(is_backward_compatible=False) +def normalize_function( + target: Callable, args: Tuple[Any], kwargs : Optional[Dict[str, Any]] = None, arg_types : Optional[Tuple[Any]] = None, + kwarg_types : Optional[Dict[str, Any]] = None, + normalize_to_only_use_kwargs : bool = False) -> Optional[ArgsKwargsPair]: + """ + Returns normalized arguments to PyTorch functions. This means that + `args/kwargs` will be matched up to the functional's + signature and return exclusively kwargs in positional order if + `normalize_to_only_use_kwargs` is True. + Also populates default values. Does not support positional-only + parameters or varargs parameters (*args, **kwargs). Does not support modules. + + May require `arg_types` and `kwarg_types` in order to disambiguate overloads. + + Args: + target (Callable): Function that we are normalizing + args (Tuple[Any]): Tuple of args to the function + kwargs (Optional[Dict[str, Any]]): Dict of kwargs to the function + arg_types (Optional[Tuple[Any]]): Tuple of arg types for the args + kwarg_types (Optional[Dict[str, Any]]): Dict of arg types for the kwargs + normalize_to_only_use_kwargs (bool): Whether to normalize to only use kwargs. + + Returns: + + Returns normalized_args_and_kwargs, or `None` if not successful. + """ + if kwargs is None: + kwargs = {} + new_args_and_kwargs = None + if not isinstance(target, types.BuiltinFunctionType) and not ( + isinstance(target, (OpOverloadPacket, OpOverload)) + ): + target_for_analysis = target + if target in boolean_dispatched: + # HACK: `boolean_dispatch` as used in `torch.nn.functional` makes it so that we have + # a 2-way dispatch based on a boolean value. Here we check that the `true` and `false` + # branches of the dispatch have exactly the same signature. If they do, use the `true` + # branch signature for analysis. Otherwise, leave this un-normalized + assert not isinstance(target, str) + dispatched = boolean_dispatched[target] + if_true, if_false = dispatched['if_true'], dispatched['if_false'] + if inspect.signature(if_true).parameters != inspect.signature(if_false).parameters: + return None + target_for_analysis = if_true + + assert callable(target_for_analysis) + sig = inspect.signature(inspect.unwrap(target_for_analysis)) + new_args_and_kwargs = _args_kwargs_to_normalized_args_kwargs(sig, args, kwargs, normalize_to_only_use_kwargs) + else: + assert callable(target) + torch_op_schemas = get_signature_for_torch_op(target) + matched_schemas = [] + if torch_op_schemas: + # Iterate through all of the schema until we find one that matches + # If one matches, populate `new_args_and_kwargs` with the new args/kwargs + # values. If none matches, `new_args_and_kwargs` will be None + for candidate_signature in torch_op_schemas: + try: + candidate_signature.bind(*args, **kwargs) + matched_schemas.append(candidate_signature) + except TypeError as e: + continue + + if len(matched_schemas) == 0: + # Did not match any schema. Cannot normalize + pass + elif len(matched_schemas) == 1: + # Matched exactly one schema, unambiguous + new_args_and_kwargs = _args_kwargs_to_normalized_args_kwargs(matched_schemas[0], args, kwargs, + normalize_to_only_use_kwargs) + else: + if arg_types is not None or kwarg_types is not None: + arg_types = arg_types if arg_types else cast(Tuple[Any], ()) + kwarg_types = kwarg_types if kwarg_types else {} + for candidate_signature in torch_op_schemas: + sig_matches = True + try: + bound_types = candidate_signature.bind(*arg_types, **kwarg_types) + for arg_name, arg_type in bound_types.arguments.items(): + param = candidate_signature.parameters[arg_name] + sig_matches = sig_matches and type_matches(param.annotation, arg_type) + except TypeError as e: + sig_matches = False + if sig_matches: + new_args_and_kwargs = _args_kwargs_to_normalized_args_kwargs(candidate_signature, args, kwargs, + normalize_to_only_use_kwargs) + break + else: + # Matched more than one schema. In this situation, the caller must provide the types of + # the arguments of the overload they expect. + schema_printouts = '\n'.join(str(schema) for schema in matched_schemas) + raise RuntimeError(f'Tried to normalize arguments to {torch.typename(target)} but ' + f'the schema match was ambiguous! Please provide argument types to ' + f'the normalize_arguments() call. Available schemas:\n{schema_printouts}') + + return new_args_and_kwargs + +@compatibility(is_backward_compatible=False) +def normalize_module( + root: torch.nn.Module, target: str, args: Tuple[Any], kwargs : Optional[Dict[str, Any]] = None, + normalize_to_only_use_kwargs : bool = False) -> Optional[ArgsKwargsPair]: + """ + Returns normalized arguments to PyTorch modules. This means that + `args/kwargs` will be matched up to the functional's + signature and return exclusively kwargs in positional order if + `normalize_to_only_use_kwargs` is True. + Also populates default values. Does not support positional-only + parameters or varargs parameters (*args, **kwargs). + + Args: + root (nn.Module): root module upon which we query modules + target (Callable): Function that we are normalizing + args (Tuple[Any]): Tuple of args to the function + kwargs (Optional[Dict[str, Any]]): Dict of kwargs to the function + normalize_to_only_use_kwargs (bool): Whether to normalize to only use kwargs. + + Returns: + + Returns normalized_args_and_kwargs, or `None` if not successful. + """ + try: + submod = root.get_submodule(target) + except AttributeError as e: + raise RuntimeError(f"Tried to normalize node with target {target} but root did not " + f"have that target!") from e + if hasattr(submod.__class__, '__name__'): + classname = submod.__class__.__name__ + if getattr(torch.nn, classname, None) == submod.__class__: + sig = inspect.signature(inspect.unwrap(submod.forward)) + if kwargs is None: + kwargs = {} + new_args_and_kwargs = _args_kwargs_to_normalized_args_kwargs(sig, args, kwargs, + normalize_to_only_use_kwargs) + return new_args_and_kwargs + return None + +def _args_kwargs_to_normalized_args_kwargs(sig : inspect.Signature, args : Tuple[Any, ...], + kwargs : Dict[str, Any], + normalize_to_only_use_kwargs : bool) -> Optional[ArgsKwargsPair]: + """ + Given a call target, args, and kwargs, return the arguments normalized into + an ArgsKwargsPair, or None if the type signature is not supported by + this normalization. + + Args: + + sig (inspect.Signature): Signature object for the target + args (Tuple): Arguments that appear at the callsite for `target` + kwargs (Dict): Keyword arguments that appear at the callsite for `target` + normalize_to_only_use_kwargs (bool): Whether to normalize to only use kwargs. + + Returns: + + Optional[ArgsKwargsPair]: Normalized args and kwargs for `target`, or `None` if + this target is not supported. + """ + + # Don't currently support positional-only + # or varargs (*args, **kwargs) signatures + supported_parameter_types = { + inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.KEYWORD_ONLY} + if any(p.kind not in supported_parameter_types for p in sig.parameters.values()): + # Add an exception for one signature, which is common for random/uniform, i.e.: + # Tensor(a!) self, float from=0, float to=1, *, Generator? generator=None + # `from` is Python keyword and as such functions with that signature should have + # positional-only args, but at the same time they could be dispatched as kwargs + if list(sig.parameters.keys()) != ['input', 'from', 'to', 'generator']: + return None + + bound_args = sig.bind(*args, **kwargs) + bound_args.apply_defaults() + + new_kwargs : Dict[str, Any] = {} + new_args : List[Any] = [] + for i, param in enumerate(sig.parameters): + if not normalize_to_only_use_kwargs and i < len(args): + new_args.append(bound_args.arguments[param]) + else: + new_kwargs[param] = bound_args.arguments[param] + + return ArgsKwargsPair(tuple(new_args), new_kwargs) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/passes/graph_drawer.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/passes/graph_drawer.py new file mode 100644 index 0000000000000000000000000000000000000000..2e418114c468ae55808ceca59924fd852ca812ae --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/passes/graph_drawer.py @@ -0,0 +1,418 @@ + +import hashlib +import torch +import torch.fx +from typing import Any, Dict, Optional, TYPE_CHECKING +from torch.fx.node import _get_qualified_name, _format_arg +from torch.fx.graph import _parse_stack_trace +from torch.fx.passes.shape_prop import TensorMetadata +from torch.fx._compatibility import compatibility +from itertools import chain + +__all__ = ['FxGraphDrawer'] +try: + import pydot + HAS_PYDOT = True +except ImportError: + HAS_PYDOT = False + +_COLOR_MAP = { + "placeholder": '"AliceBlue"', + "call_module": "LemonChiffon1", + "get_param": "Yellow2", + "get_attr": "LightGrey", + "output": "PowderBlue", +} + +_HASH_COLOR_MAP = [ + "CadetBlue1", + "Coral", + "DarkOliveGreen1", + "DarkSeaGreen1", + "GhostWhite", + "Khaki1", + "LavenderBlush1", + "LightSkyBlue", + "MistyRose1", + "MistyRose2", + "PaleTurquoise2", + "PeachPuff1", + "Salmon", + "Thistle1", + "Thistle3", + "Wheat1", +] + +_WEIGHT_TEMPLATE = { + "fillcolor": "Salmon", + "style": '"filled,rounded"', + "fontcolor": "#000000", +} + +if HAS_PYDOT: + @compatibility(is_backward_compatible=False) + class FxGraphDrawer: + """ + Visualize a torch.fx.Graph with graphviz + Basic usage: + g = FxGraphDrawer(symbolic_traced, "resnet18") + g.get_dot_graph().write_svg("a.svg") + """ + + def __init__( + self, + graph_module: torch.fx.GraphModule, + name: str, + ignore_getattr: bool = False, + ignore_parameters_and_buffers: bool = False, + skip_node_names_in_args: bool = True, + parse_stack_trace: bool = False, + dot_graph_shape: Optional[str] = None, + ): + self._name = name + self.dot_graph_shape = ( + dot_graph_shape if dot_graph_shape is not None else "record" + ) + _WEIGHT_TEMPLATE["shape"] = self.dot_graph_shape + + self._dot_graphs = { + name: self._to_dot( + graph_module, name, ignore_getattr, ignore_parameters_and_buffers, skip_node_names_in_args, parse_stack_trace + ) + } + + for node in graph_module.graph.nodes: + if node.op != "call_module": + continue + + leaf_node = self._get_leaf_node(graph_module, node) + + if not isinstance(leaf_node, torch.fx.GraphModule): + continue + + + self._dot_graphs[f"{name}_{node.target}"] = self._to_dot( + leaf_node, + f"{name}_{node.target}", + ignore_getattr, + ignore_parameters_and_buffers, + skip_node_names_in_args, + parse_stack_trace, + ) + + def get_dot_graph(self, submod_name=None) -> pydot.Dot: + """ + Visualize a torch.fx.Graph with graphviz + Example: + >>> # xdoctest: +REQUIRES(module:pydot) + >>> # define module + >>> class MyModule(torch.nn.Module): + >>> def __init__(self): + >>> super().__init__() + >>> self.linear = torch.nn.Linear(4, 5) + >>> def forward(self, x): + >>> return self.linear(x).clamp(min=0.0, max=1.0) + >>> module = MyModule() + >>> # trace the module + >>> symbolic_traced = torch.fx.symbolic_trace(module) + >>> # setup output file + >>> import ubelt as ub + >>> dpath = ub.Path.appdir('torch/tests/FxGraphDrawer').ensuredir() + >>> fpath = dpath / 'linear.svg' + >>> # draw the graph + >>> g = FxGraphDrawer(symbolic_traced, "linear") + >>> g.get_dot_graph().write_svg(fpath) + """ + if submod_name is None: + return self.get_main_dot_graph() + else: + return self.get_submod_dot_graph(submod_name) + + def get_main_dot_graph(self) -> pydot.Dot: + return self._dot_graphs[self._name] + + def get_submod_dot_graph(self, submod_name) -> pydot.Dot: + return self._dot_graphs[f"{self._name}_{submod_name}"] + + def get_all_dot_graphs(self) -> Dict[str, pydot.Dot]: + return self._dot_graphs + + def _get_node_style(self, node: torch.fx.Node) -> Dict[str, str]: + + template = { + "shape": self.dot_graph_shape, + "fillcolor": "#CAFFE3", + "style": '"filled,rounded"', + "fontcolor": "#000000", + } + if node.op in _COLOR_MAP: + template["fillcolor"] = _COLOR_MAP[node.op] + else: + # Use a random color for each node; based on its name so it's stable. + target_name = node._pretty_print_target(node.target) + target_hash = int(hashlib.md5(target_name.encode()).hexdigest()[:8], 16) + template["fillcolor"] = _HASH_COLOR_MAP[target_hash % len(_HASH_COLOR_MAP)] + return template + + def _get_leaf_node( + self, module: torch.nn.Module, node: torch.fx.Node + ) -> torch.nn.Module: + py_obj = module + assert isinstance(node.target, str) + atoms = node.target.split(".") + for atom in atoms: + if not hasattr(py_obj, atom): + raise RuntimeError( + str(py_obj) + " does not have attribute " + atom + "!" + ) + py_obj = getattr(py_obj, atom) + return py_obj + + def _typename(self, target: Any) -> str: + if isinstance(target, torch.nn.Module): + ret = torch.typename(target) + elif isinstance(target, str): + ret = target + else: + ret = _get_qualified_name(target) + + # Escape "{" and "}" to prevent dot files like: + # https://gist.github.com/SungMinCho/1a017aab662c75d805c5954d62c5aabc + # which triggers `Error: bad label format (...)` from dot + return ret.replace("{", r"\{").replace("}", r"\}") + + # shorten path to avoid drawing long boxes + # for full path = '/home/weif/pytorch/test.py' + # return short path = 'pytorch/test.py' + def _shorten_file_name( + self, + full_file_name: str, + truncate_to_last_n: int = 2, + ): + splits = full_file_name.split('/') + if len(splits) >= truncate_to_last_n: + return '/'.join(splits[-truncate_to_last_n:]) + return full_file_name + + + def _get_node_label( + self, + module: torch.fx.GraphModule, + node: torch.fx.Node, + skip_node_names_in_args: bool, + parse_stack_trace: bool, + ) -> str: + def _get_str_for_args_kwargs(arg): + if isinstance(arg, tuple): + prefix, suffix = r"|args=(\l", r",\n)\l" + arg_strs_list = [_format_arg(a, max_list_len=8) for a in arg] + elif isinstance(arg, dict): + prefix, suffix = r"|kwargs={\l", r",\n}\l" + arg_strs_list = [ + f"{k}: {_format_arg(v, max_list_len=8)}" + for k, v in arg.items() + ] + else: # Fall back to nothing in unexpected case. + return "" + + # Strip out node names if requested. + if skip_node_names_in_args: + arg_strs_list = [a for a in arg_strs_list if "%" not in a] + if len(arg_strs_list) == 0: + return "" + arg_strs = prefix + r",\n".join(arg_strs_list) + suffix + if len(arg_strs_list) == 1: + arg_strs = arg_strs.replace(r"\l", "").replace(r"\n", "") + return arg_strs.replace("{", r"\{").replace("}", r"\}") + + + label = "{" + f"name=%{node.name}|op_code={node.op}\n" + + if node.op == "call_module": + leaf_module = self._get_leaf_node(module, node) + label += r"\n" + self._typename(leaf_module) + r"\n|" + extra = "" + if hasattr(leaf_module, "__constants__"): + extra = r"\n".join( + [f"{c}: {getattr(leaf_module, c)}" for c in leaf_module.__constants__] # type: ignore[union-attr] + ) + label += extra + r"\n" + else: + label += f"|target={self._typename(node.target)}" + r"\n" + if len(node.args) > 0: + label += _get_str_for_args_kwargs(node.args) + if len(node.kwargs) > 0: + label += _get_str_for_args_kwargs(node.kwargs) + label += f"|num_users={len(node.users)}" + r"\n" + + tensor_meta = node.meta.get('tensor_meta') + label += self._tensor_meta_to_label(tensor_meta) + + # for original fx graph + # print buf=buf0, n_origin=6 + buf_meta = node.meta.get('buf_meta', None) + if buf_meta is not None: + label += f"|buf={buf_meta.name}" + r"\n" + label += f"|n_origin={buf_meta.n_origin}" + r"\n" + + # for original fx graph + # print file:lineno code + if parse_stack_trace and node.stack_trace is not None: + parsed_stack_trace = _parse_stack_trace(node.stack_trace) + fname = self._shorten_file_name(parsed_stack_trace.file) + label += f"|file={fname}:{parsed_stack_trace.lineno} {parsed_stack_trace.code}" + r"\n" + + + return label + "}" + + def _tensor_meta_to_label(self, tm) -> str: + if tm is None: + return "" + elif isinstance(tm, TensorMetadata): + return self._stringify_tensor_meta(tm) + elif isinstance(tm, list): + result = "" + for item in tm: + result += self._tensor_meta_to_label(item) + return result + elif isinstance(tm, dict): + result = "" + for v in tm.values(): + result += self._tensor_meta_to_label(v) + return result + elif isinstance(tm, tuple): + result = "" + for item in tm: + result += self._tensor_meta_to_label(item) + return result + else: + raise RuntimeError(f"Unsupported tensor meta type {type(tm)}") + + def _stringify_tensor_meta(self, tm: TensorMetadata) -> str: + result = "" + if not hasattr(tm, "dtype"): + print("tm", tm) + result += "|" + "dtype" + "=" + str(tm.dtype) + r"\n" + result += "|" + "shape" + "=" + str(tuple(tm.shape)) + r"\n" + result += "|" + "requires_grad" + "=" + str(tm.requires_grad) + r"\n" + result += "|" + "stride" + "=" + str(tm.stride) + r"\n" + if tm.is_quantized: + assert tm.qparams is not None + assert "qscheme" in tm.qparams + qscheme = tm.qparams["qscheme"] + if qscheme in { + torch.per_tensor_affine, + torch.per_tensor_symmetric, + }: + result += "|" + "q_scale" + "=" + str(tm.qparams["scale"]) + r"\n" + result += "|" + "q_zero_point" + "=" + str(tm.qparams["zero_point"]) + r"\n" + elif qscheme in { + torch.per_channel_affine, + torch.per_channel_symmetric, + torch.per_channel_affine_float_qparams, + }: + result += "|" + "q_per_channel_scale" + "=" + str(tm.qparams["scale"]) + r"\n" + result += "|" + "q_per_channel_zero_point" + "=" + str(tm.qparams["zero_point"]) + r"\n" + result += "|" + "q_per_channel_axis" + "=" + str(tm.qparams["axis"]) + r"\n" + else: + raise RuntimeError(f"Unsupported qscheme: {qscheme}") + result += "|" + "qscheme" + "=" + str(tm.qparams["qscheme"]) + r"\n" + return result + + def _get_tensor_label(self, t: torch.Tensor) -> str: + return str(t.dtype) + str(list(t.shape)) + r"\n" + + # when parse_stack_trace=True + # print file:lineno code + def _to_dot( + self, + graph_module: torch.fx.GraphModule, + name: str, + ignore_getattr: bool, + ignore_parameters_and_buffers: bool, + skip_node_names_in_args: bool, + parse_stack_trace: bool, + ) -> pydot.Dot: + """ + Actual interface to visualize a fx.Graph. Note that it takes in the GraphModule instead of the Graph. + If ignore_parameters_and_buffers is True, the parameters and buffers + created with the module will not be added as nodes and edges. + """ + + # "TB" means top-to-bottom rank direction in layout + dot_graph = pydot.Dot(name, rankdir="TB") + + + buf_name_to_subgraph = {} + + for node in graph_module.graph.nodes: + if ignore_getattr and node.op == "get_attr": + continue + + style = self._get_node_style(node) + dot_node = pydot.Node( + node.name, label=self._get_node_label(graph_module, node, skip_node_names_in_args, parse_stack_trace), **style + ) + + current_graph = dot_graph + + buf_meta = node.meta.get('buf_meta', None) + if buf_meta is not None and buf_meta.n_origin > 1: + buf_name = buf_meta.name + if buf_name not in buf_name_to_subgraph: + buf_name_to_subgraph[buf_name] = pydot.Cluster(buf_name, label=buf_name) + current_graph = buf_name_to_subgraph.get(buf_name) + + current_graph.add_node(dot_node) + + def get_module_params_or_buffers(): + for pname, ptensor in chain( + leaf_module.named_parameters(), leaf_module.named_buffers() + ): + pname1 = node.name + "." + pname + label1 = ( + pname1 + "|op_code=get_" + "parameter" + if isinstance(ptensor, torch.nn.Parameter) + else "buffer" + r"\l" + ) + dot_w_node = pydot.Node( + pname1, + label="{" + label1 + self._get_tensor_label(ptensor) + "}", + **_WEIGHT_TEMPLATE, + ) + dot_graph.add_node(dot_w_node) + dot_graph.add_edge(pydot.Edge(pname1, node.name)) + + if node.op == "call_module": + leaf_module = self._get_leaf_node(graph_module, node) + + if not ignore_parameters_and_buffers and not isinstance(leaf_module, torch.fx.GraphModule): + get_module_params_or_buffers() + + for subgraph in buf_name_to_subgraph.values(): + subgraph.set('color', 'royalblue') + subgraph.set('penwidth', '2') + dot_graph.add_subgraph(subgraph) + + for node in graph_module.graph.nodes: + if ignore_getattr and node.op == "get_attr": + continue + + for user in node.users: + dot_graph.add_edge(pydot.Edge(node.name, user.name)) + + return dot_graph + +else: + if not TYPE_CHECKING: + @compatibility(is_backward_compatible=False) + class FxGraphDrawer: + def __init__( + self, + graph_module: torch.fx.GraphModule, + name: str, + ignore_getattr: bool = False, + parse_stack_trace: bool = False, + ): + raise RuntimeError('FXGraphDrawer requires the pydot package to be installed. Please install ' + 'pydot through your favorite Python package manager.') diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/proxy.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/proxy.py new file mode 100644 index 0000000000000000000000000000000000000000..66b785b8b03f6e3e2b1213d19a3917537e83feb3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/proxy.py @@ -0,0 +1,563 @@ +import enum +import dis +import copy +import sys +import torch +import inspect +import operator +import traceback +import collections + +from dataclasses import is_dataclass, fields + + +from .graph import magic_methods, reflectable_magic_methods, Graph +from typing import Tuple, Dict, OrderedDict, Optional, Any, Iterator, Callable +from .node import Target, Node, Argument, base_types, map_aggregate +from ._compatibility import compatibility +from .operator_schemas import check_for_mutable_operation +import torch.fx.traceback as fx_traceback + +__all__ = ['TracerBase', 'GraphAppendingTracer', 'TraceError', + 'Proxy', 'Attribute', 'ParameterProxy', 'Scope', + 'ScopeContextManager'] + + +@compatibility(is_backward_compatible=False) +class Scope: + """ Scope object that records the module path and the module type + of a module. Scope is used to track the information of the module + that contains a Node in a Graph of GraphModule. For example:: + + class Sub(torch.nn.Module): + def forward(self, x): + # This will be a call_method Node in GraphModule, + # scope for this would be (module_path="sub", module_type=Sub) + return x.transpose(1, 2) + + class M(torch.nn.Module): + def __init__(self): + self.sub = Sub() + + def forward(self, x): + # This will be a call_method Node as well, + # scope for this would be (module_path="", None) + x = x.transpose(1, 2) + x = self.sub(x) + return x + + """ + + def __init__(self, module_path: str, module_type: Any): + super().__init__() + self.module_path = module_path + self.module_type = module_type + + +@compatibility(is_backward_compatible=False) +class ScopeContextManager: + """ A context manager to track the Scope of Node during symbolic tracing. + When entering a forward function of a Module, we'll update the scope information of + the current module, and when we exit, we'll restore the previous scope information. + """ + + def __init__( + self, + scope: Scope, + current_scope: Scope, + ): + super().__init__() + # Keep a copy of prev scope to restore on exit + self._prev_scope = copy.copy(scope) + # Update scope to current scope + scope.module_path = current_scope.module_path + scope.module_type = current_scope.module_type + # Save a reference so we can restore it + self._scope = scope + + def __enter__(self): + return self._scope + + def __exit__(self, *args): + self._scope.module_path = self._prev_scope.module_path + self._scope.module_type = self._prev_scope.module_type + return + + +_COPY_META_FIELDS = ["nn_module_stack", "source_fn_stack", "original_aten", "recompute", "from_node", "quantization_tag"] + + +@compatibility(is_backward_compatible=True) +class TracerBase: + graph: Graph + record_stack_traces : bool = False + # Feature flag for mutable schema checking + # Enableby default in 1.12 + check_mutable_operations : bool = False + # Feature flag for assert tracing + trace_asserts : bool = False + # Feature flag for proxying accesses to buffer values + proxy_buffer_attributes : bool = False + + # Name of the function to be traced. It will only be used when + # ``root`` is an instance of ``nn.Module`` + traced_func_name: str = "forward" + + # Maps the containing module's name to the operator name + scope : Scope + + # Records the module call stack + module_stack: OrderedDict[str, Tuple[str, Any]] + + # Mapping of node name to module scope + node_name_to_scope: Dict[str, Tuple[str, type]] + + @compatibility(is_backward_compatible=True) + def create_node(self, kind : str, target : Target, + args : Tuple[Argument, ...], kwargs : Dict[str, Argument], name : Optional[str] = None, + type_expr : Optional[Any] = None) -> Node: + """ + Inserts a graph node given target, args, kwargs, and name. + + This method can be overridden to do extra checking, validation, or + modification of values used in node creation. For example, one might + want to disallow in-place operations from being recorded. + """ + if kind == 'call_function' and self.check_mutable_operations: + check_for_mutable_operation(target, args, kwargs) + + node = self.graph.create_node(kind, target, args, kwargs, name, type_expr) + # TODO node_name_to_scope will be depreciated in favor of + # node.meta['nn_module_stack'] + self.node_name_to_scope[node.name] = ( + self.scope.module_path, + self.scope.module_type, + ) + # Optionally set stack trace on the created Node for debugging purposes + if fx_traceback.has_preserved_node_meta(): + current_meta: Dict[str, Any] = fx_traceback.get_current_meta() + + stack_trace = current_meta.get("stack_trace") + if stack_trace: + node.stack_trace = stack_trace + # Explicitly set the stack_trace, nn_module_stack and source_fn on the node.meta + # If other meta fields are needed, they can be added here + for field in _COPY_META_FIELDS: + if field in current_meta: + node.meta[field] = copy.copy(current_meta[field]) + + # Here we decrement to account for the sequence_nr having + # just been incremented while tracing this lowered aten op. + new_seq_nr = torch.autograd._get_sequence_nr() - 1 + # The sequence_nr increments every time a new autograd Node + # is created. During the FWD pass we store the sequence_nr + # corresponding to the last autograd Node created on this fx + # node's meta. A single aten op can create multiple autograd + # nodes as is the case with in-place foreach ops. During the + # BWD pass we retrieve the sequence_nr stored on the current + # executing autograd Node. See NOTE [ Sequence Number ]. + if current_meta.get("in_grad_fn", False): + new_seq_nr = current_meta["grad_fn_seq_nr"] + node.meta["seq_nr"] = new_seq_nr + + elif self.module_stack: + node.meta['nn_module_stack'] = copy.copy(self.module_stack) + return node + + @compatibility(is_backward_compatible=True) + def proxy(self, node: Node) -> 'Proxy': + return Proxy(node, self) + + @compatibility(is_backward_compatible=True) + def create_proxy(self, kind: str, target: Target, args: Tuple[Any, ...], kwargs: Dict[str, Any], + name: Optional[str] = None, type_expr : Optional[Any] = None, + proxy_factory_fn: Callable[[Node], 'Proxy'] = None): + ''' + Create a Node from the given arguments, then return the Node + wrapped in a Proxy object. + + If kind = 'placeholder', then we're creating a Node that + represents the parameter of a function. If we need to encode + a default parameter, we use the ``args`` tuple. ``args`` is + otherwise empty for ``placeholder`` Nodes. + ''' + + args_ = self.create_arg(args) + kwargs_ = self.create_arg(kwargs) + assert isinstance(args_, tuple) + assert isinstance(kwargs_, dict) + + node = self.create_node(kind, target, args_, kwargs_, name, type_expr) + + if not proxy_factory_fn: + proxy = self.proxy(node) + else: + proxy = proxy_factory_fn(node) + + if self.record_stack_traces and not proxy.node.stack_trace: + user_frame = self._find_user_frame() + if user_frame: + summary = traceback.extract_stack(user_frame) + tb_lines = summary.format() + # stack_trace would have innermost frame at the bottom + proxy.node.stack_trace = ''.join(tb_lines) + + return proxy + + def _find_user_frame(self): + """ + Find the Python stack frame executing the user code during + symbolic tracing. + """ + # We have to do a little dance here. Basically, walk up the callstack and + # record the first frame not in the pytorch source. This is the frame executing + # the user code during tracing. + frame = inspect.currentframe() + + pt_files = ['torch/fx/proxy.py', + 'torch/fx/_symbolic_trace.py', + 'torch/fx/experimental/proxy_tensor.py', + 'torch/_ops.py', + 'torch/_tensor.py', + 'torch/utils/_python_dispatch.py', + 'torch/_prims_common/wrappers.py', + 'torch/_refs/__init__.py', + 'torch/_refs/nn/functional/__init__.py', + 'torch/utils/_stats.py', + ] + while frame: + frame = frame.f_back + if frame and all(not frame.f_code.co_filename.endswith(file) for file in pt_files): + break + + if not frame: + return None + + return frame + + @compatibility(is_backward_compatible=True) + def create_arg(self, a: Any) -> Argument: + """ + A method that lowers the objects seen as arguments during symbolic evaluation + into Argument types that can be stored in IR. + + Can be override to support more trace-specific types. + """ + if not isinstance(a, Proxy) and hasattr(a, '__fx_create_arg__'): + return a.__fx_create_arg__(self) + # aggregates + elif isinstance(a, tuple) and hasattr(a, '_fields'): + # NamedTuple constructors don't seem to like getting a generator + # expression as an argument to their constructor, so build this + # intermediate tuple and unpack it into the NamedTuple constructor + args = tuple(self.create_arg(elem) for elem in a) + return type(a)(*args) # type: ignore[arg-type] + elif isinstance(a, (tuple, list)): + return type(a)(self.create_arg(elem) for elem in a) + elif isinstance(a, dict): + r = {} + for k, v in a.items(): + # Check for invalid dict keys. We do not want a Proxy to appear + # anywhere within the key. Since keys can be collection types, + # we iterate through the key with map_aggregate + k = self.create_arg(k) + + def no_node(arg): + if isinstance(arg, Node): + raise RuntimeError("Keys for dictionaries used as an argument cannot contain a " + f"Node. Got key: {k}") + map_aggregate(k, no_node) + + r[k] = self.create_arg(v) + return r + elif isinstance(a, slice): + return slice(self.create_arg(a.start), self.create_arg(a.stop), self.create_arg(a.step)) + + elif isinstance(a, range): + return range(self.create_arg(a.start), self.create_arg(a.stop), self.create_arg(a.step)) + + elif isinstance(a, torch._ops.OpOverload): + return a + + if isinstance(a, Proxy): + # base case: we unwrap the Proxy object + return a.node + + if is_dataclass(a): + kwargs = {field.name: self.create_arg(getattr(a, field.name)) for field in fields(a)} + return self.create_node("call_function", a.__class__, (), kwargs) + + elif isinstance(a, (*base_types, enum.Enum)) or a is None or a is ...: + return a + raise NotImplementedError(f"argument of type: {type(a)}") + + @compatibility(is_backward_compatible=True) + def to_bool(self, obj: 'Proxy') -> bool: + """Called when a proxy object is being converted to a boolean, such as + when used in control flow. Normally we don't know what to do because + we don't know the value of the proxy, but a custom tracer can attach more + information to the graph node using create_node and can choose to return a value. + """ + raise TraceError('symbolically traced variables cannot be used as inputs to control flow') + + @compatibility(is_backward_compatible=True) + def iter(self, obj: 'Proxy') -> Iterator: + """Called when a proxy object is being iterated over, such as + when used in control flow. Normally we don't know what to do because + we don't know the value of the proxy, but a custom tracer can attach more + information to the graph node using create_node and can choose to return an iterator. + """ + raise TraceError('Proxy object cannot be iterated. This can be ' + 'attempted when the Proxy is used in a loop or' + ' as a *args or **kwargs function argument. ' + 'See the torch.fx docs on pytorch.org for a ' + 'more detailed explanation of what types of ' + 'control flow can be traced, and check out the' + ' Proxy docstring for help troubleshooting ' + 'Proxy iteration errors') + + @compatibility(is_backward_compatible=True) + def keys(self, obj: 'Proxy') -> Any: + """Called when a proxy object is has the keys() method called. + This is what happens when ** is called on a proxy. This should return an + iterator it ** is suppose to work in your custom tracer. + """ + return Attribute(obj, 'keys')() + + +# used in Proxy object when just appending to the graph while not tracing. +@compatibility(is_backward_compatible=True) +class GraphAppendingTracer(TracerBase): + def __init__(self, graph: Graph): + super().__init__() + self.graph = graph + self.scope = Scope("", None) + self.module_stack = collections.OrderedDict() + self.node_name_to_scope = {} + +@compatibility(is_backward_compatible=False) +def assert_fn(x): + assert x + +@compatibility(is_backward_compatible=True) +class TraceError(ValueError): + pass + +@compatibility(is_backward_compatible=True) +class Proxy: + """ + ``Proxy`` objects are ``Node`` wrappers that flow through the + program during symbolic tracing and record all the operations + (``torch`` function calls, method calls, operators) that they touch + into the growing FX Graph. + + If you're doing graph transforms, you can wrap your own ``Proxy`` + method around a raw ``Node`` so that you can use the overloaded + operators to add additional things to a ``Graph``. + + ``Proxy`` objects cannot be iterated. In other words, the symbolic + tracer will throw an error if a ``Proxy`` is used in a loop or as + an ``*args``/``**kwargs`` function argument. + + There are two main ways around this: + 1. Factor out the untraceable logic into a top-level function and + use ``fx.wrap`` on it. + 2. If the control flow is static (i.e. the loop trip count is + based on some hyperparameter), the code can be kept in its original + position and refactored into something like:: + + for i in range(self.some_hyperparameter): + indexed_item = proxied_value[i] + + For a more detailed description into the Proxy internals, check out + the "Proxy" section in `torch/fx/OVERVIEW.md` + """ + + @compatibility(is_backward_compatible=True) + def __init__(self, node: Node, tracer: 'Optional[TracerBase]' = None): + if tracer is None: + # This allows you to create a Proxy object around a raw Node + tracer = GraphAppendingTracer(node.graph) + self.tracer = tracer + self.node = node + + def __repr__(self) -> str: + return f'Proxy({self.node.name})' + + def __getattr__(self, k) -> 'Attribute': + # note: not added to the graph yet, if this is a method call + # we peephole optimize to the method invocation + return Attribute(self, k) + + def __call__(self, *args, **kwargs) -> 'Proxy': + return self.tracer.create_proxy('call_method', '__call__', (self,) + args, kwargs) + + def __iter__(self) -> Iterator['Proxy']: + frame = inspect.currentframe() + assert frame is not None + calling_frame = frame.f_back + assert calling_frame is not None + inst_list = list(dis.get_instructions(calling_frame.f_code)) + if sys.version_info >= (3, 11): + from bisect import bisect_left + inst_idx = bisect_left(inst_list, calling_frame.f_lasti, key=lambda x: x.offset) + else: + inst_idx = calling_frame.f_lasti // 2 + inst = inst_list[inst_idx] + if inst.opname == 'UNPACK_SEQUENCE': + return (self[i] for i in range(inst.argval)) # type: ignore[index] + + return self.tracer.iter(self) + + def __abs__(self): + return self.tracer.create_proxy('call_function', operator.abs, (self,), {}) + + def __bool__(self) -> bool: + if self.tracer.trace_asserts: + # check if this boolean is used in an assertion, bytecode pattern for assertions + # is pretty stable for Python 3.7--3.9 + frame = inspect.currentframe() + assert frame is not None + calling_frame = frame.f_back + assert calling_frame is not None + insts = list(dis.get_instructions(calling_frame.f_code)) + if sys.version_info >= (3, 11): + from bisect import bisect_left + cur = bisect_left(insts, calling_frame.f_lasti, key=lambda x: x.offset) + else: + cur = calling_frame.f_lasti // 2 + inst = insts[cur] + + if inst.opname == 'POP_JUMP_IF_TRUE': + first = insts[cur + 1] + assert inst.arg is not None + last = insts[inst.arg // 2 - 1] + starts_with_assert = (first.opname == 'LOAD_GLOBAL' and first.argval == 'AssertionError' + or first.opname == 'LOAD_ASSERTION_ERROR') + if starts_with_assert and last.opname == 'RAISE_VARARGS': + self.tracer.create_proxy('call_function', assert_fn, (self,), {}) + return True + + return self.tracer.to_bool(self) + + @compatibility(is_backward_compatible=True) + def keys(self): + return self.tracer.keys(self) + + def __len__(self): + raise RuntimeError("'len' is not supported in symbolic tracing by default. If you want " + "this call to be recorded, please call torch.fx.wrap('len') at " + "module scope") + + @classmethod + def __torch_function__(cls, orig_method, types, args=None, kwargs=None): + args = args if args else () + kwargs = kwargs if kwargs else {} + + tracers : Dict[Any, None] = {} + + def find_tracer(a): + if isinstance(a, cls): + tracers[a.tracer] = None + torch.fx.node.map_aggregate(args, find_tracer) + torch.fx.node.map_aggregate(kwargs, find_tracer) + + if len(tracers) > 1: + raise RuntimeError(f'Found multiple different tracers {list(tracers.keys())} while ' + f'trying to trace operations {orig_method}') + tracer = next(iter(tracers.keys())) + + if isinstance(orig_method, torch._C.ScriptMethod): + args = (orig_method.owner,) + args + return tracer.create_proxy('call_method', orig_method.name, args, kwargs) + if torch.overrides.is_tensor_method_or_property(orig_method): + return tracer.create_proxy('call_method', orig_method.__name__, args, kwargs) + else: + if isinstance(orig_method, torch._ops.HigherOrderOperator): + # TODO: Define how to symbolically trace HigherOrderOperators + raise RuntimeError("Unable to symbolically trace HigherOrderOperators") + return tracer.create_proxy('call_function', orig_method, args, kwargs, + name=tracer.graph._target_to_str(orig_method.__name__)) + + +@compatibility(is_backward_compatible=True) +class Attribute(Proxy): + @compatibility(is_backward_compatible=True) + def __init__(self, root: Proxy, attr: str): + self.root = root + self.attr = attr + self.tracer = root.tracer + self._node: Optional[Node] = None + + @property + def node(self): + # the node for attributes is added lazily, since most will just be method calls + # which do not rely on the getitem call + if self._node is None: + self._node = self.tracer.create_proxy('call_function', getattr, (self.root, self.attr), {}).node + return self._node + + def __call__(self, *args, **kwargs): + return self.tracer.create_proxy('call_method', self.attr, (self.root,) + args, kwargs) + + +@compatibility(is_backward_compatible=False) +class ParameterProxy(Proxy): + """ + A special proxy which lets "shape", "size", "dim", and a few other + attribute accesses pass through to the underlying module parameter object, + so that conditional tests on these attributes will not throw exception during tracing + """ + def __init__(self, tracer: TracerBase, node: Node, name, param): + super().__init__(node, tracer) + assert(isinstance(param, torch.nn.Parameter)) + self.param = param + self.name = name + + def __repr__(self) -> str: + return f'ParameterProxy({self.name})' + + @property + def shape(self): + return self.param.shape + + def size(self): + return self.param.size() + + def dim(self): + return self.param.dim() + + @property + def ndim(self): + return self.param.ndim + + def numel(self): + return self.param.numel() + + def nelement(self): + return self.param.nelement() + + +for method in magic_methods: + def _scope(method): + def impl(*args, **kwargs): + tracer = args[0].tracer + target = getattr(operator, method) + return tracer.create_proxy('call_function', target, args, kwargs) + impl.__name__ = method + as_magic = f'__{method.strip("_")}__' + setattr(Proxy, as_magic, impl) + _scope(method) + +def _define_reflectable(orig_method_name): + method_name = f'__r{orig_method_name.strip("_")}__' + + def impl(self, rhs): + target = getattr(operator, orig_method_name) + return self.tracer.create_proxy('call_function', target, (rhs, self), {}) + impl.__name__ = method_name + impl.__qualname__ = method_name + setattr(Proxy, method_name, impl) + +for orig_method_name in reflectable_magic_methods: + _define_reflectable(orig_method_name) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/subgraph_rewriter.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/subgraph_rewriter.py new file mode 100644 index 0000000000000000000000000000000000000000..e13f421a5d21b065de87b4ddcb8b9cb8e8ddbea1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/subgraph_rewriter.py @@ -0,0 +1,343 @@ +from .graph_module import GraphModule +from .graph import Graph +from .node import Node +from ._symbolic_trace import symbolic_trace +from ._compatibility import compatibility + +import copy +from dataclasses import dataclass +from typing import Any, Callable, Dict, List, NamedTuple, Optional, Set, Union +import torch + +__all__ = ['Match', 'replace_pattern', 'replace_pattern_with_filters', "ReplacedPatterns"] + +@compatibility(is_backward_compatible=True) +class Match(NamedTuple): + # Node from which the match was found + anchor: Node + # Maps nodes in the pattern subgraph to nodes in the larger graph + nodes_map: Dict[Node, Node] + +@compatibility(is_backward_compatible=False) +@dataclass +class ReplacedPatterns: + # Node from which the match was found + anchor: Node + # Maps nodes in the pattern subgraph to nodes in the larger graph + nodes_map: Dict[Node, Node] + # List of nodes that were added into the graph + replacements: List[Node] + +def _replace_attributes(gm: GraphModule, replacement: torch.nn.Module) -> None: + gm.delete_all_unused_submodules() + + if isinstance(replacement, GraphModule): + replacement.graph.lint() + + def try_get_attr(gm: torch.nn.Module, target: str) -> Optional[Any]: + module_path, _, attr_name = target.rpartition(".") + mod: torch.nn.Module = gm.get_submodule(module_path) + attr = getattr(mod, attr_name, None) + return attr + + for node in gm.graph.nodes: + if node.op == "call_module" or node.op == "get_attr": + + gm_attr = try_get_attr(gm, node.target) + replacement_attr = try_get_attr(replacement, node.target) + + # CASE 1: This target already exists as an attribute in our + # result GraphModule. Whether or not it exists in + # `replacement`, the existing submodule takes precedence. + if gm_attr is not None: + continue + + # CASE 2: The target exists as an attribute in `replacement` + # only, so we need to copy it over. + elif replacement_attr is not None: + new_attr = copy.deepcopy(replacement_attr) + if isinstance(replacement_attr, torch.nn.Module): + gm.add_submodule(node.target, new_attr) + else: + setattr(gm, node.target, new_attr) + + # CASE 3: The target doesn't exist as an attribute in `gm` + # or `replacement` + else: + raise RuntimeError("Attempted to create a \"", node.op, + "\" node during subgraph rewriting " + f"with target {node.target}, but " + "the referenced attribute does not " + "exist in the replacement GraphModule") + + gm.graph.lint() + + +@compatibility(is_backward_compatible=True) +def replace_pattern( + gm: GraphModule, + pattern: Union[Callable, GraphModule], + replacement: Union[Callable, GraphModule] +) -> List[Match]: + """ + Matches all possible non-overlapping sets of operators and their + data dependencies (``pattern``) in the Graph of a GraphModule + (``gm``), then replaces each of these matched subgraphs with another + subgraph (``replacement``). + + Args: + ``gm``: The GraphModule that wraps the Graph to operate on + ``pattern``: The subgraph to match in ``gm`` for replacement + ``replacement``: The subgraph to replace ``pattern`` with + + Returns: + List[Match]: A list of ``Match`` objects representing the places + in the original graph that ``pattern`` was matched to. The list + is empty if there are no matches. ``Match`` is defined as: + + .. code-block:: python + + class Match(NamedTuple): + # Node from which the match was found + anchor: Node + # Maps nodes in the pattern subgraph to nodes in the larger graph + nodes_map: Dict[Node, Node] + + Examples: + + .. code-block:: python + + import torch + from torch.fx import symbolic_trace, subgraph_rewriter + + class M(torch.nn.Module): + def __init__(self): + super().__init__() + + def forward(self, x, w1, w2): + m1 = torch.cat([w1, w2]).sum() + m2 = torch.cat([w1, w2]).sum() + return x + torch.max(m1) + torch.max(m2) + + def pattern(w1, w2): + return torch.cat([w1, w2]).sum() + + def replacement(w1, w2): + return torch.stack([w1, w2]) + + traced_module = symbolic_trace(M()) + + subgraph_rewriter.replace_pattern(traced_module, pattern, replacement) + + The above code will first match ``pattern`` in the ``forward`` + method of ``traced_module``. Pattern-matching is done based on + use-def relationships, not node names. For example, if you had + ``p = torch.cat([a, b])`` in ``pattern``, you could match + ``m = torch.cat([a, b])`` in the original ``forward`` function, + despite the variable names being different (``p`` vs ``m``). + + The ``return`` statement in ``pattern`` is matched based on its + value only; it may or may not match to the ``return`` statement in + the larger graph. In other words, the pattern doesn't have to extend + to the end of the larger graph. + + When the pattern is matched, it will be removed from the larger + function and replaced by ``replacement``. If there are multiple + matches for ``pattern`` in the larger function, each non-overlapping + match will be replaced. In the case of a match overlap, the first + found match in the set of overlapping matches will be replaced. + ("First" here being defined as the first in a topological ordering + of the Nodes' use-def relationships. In most cases, the first Node + is the parameter that appears directly after ``self``, while the + last Node is whatever the function returns.) + + One important thing to note is that the parameters of the + ``pattern`` Callable must be used in the Callable itself, + and the parameters of the ``replacement`` Callable must match + the pattern. The first rule is why, in the above code block, the + ``forward`` function has parameters ``x, w1, w2``, but the + ``pattern`` function only has parameters ``w1, w2``. ``pattern`` + doesn't use ``x``, so it shouldn't specify ``x`` as a parameter. + As an example of the second rule, consider replacing + + .. code-block:: python + + def pattern(x, y): + return torch.neg(x) + torch.relu(y) + + with + + .. code-block:: python + + def replacement(x, y): + return torch.relu(x) + + In this case, ``replacement`` needs the same number of parameters + as ``pattern`` (both ``x`` and ``y``), even though the parameter + ``y`` isn't used in ``replacement``. + + After calling ``subgraph_rewriter.replace_pattern``, the generated + Python code looks like this: + + .. code-block:: python + + def forward(self, x, w1, w2): + stack_1 = torch.stack([w1, w2]) + sum_1 = stack_1.sum() + stack_2 = torch.stack([w1, w2]) + sum_2 = stack_2.sum() + max_1 = torch.max(sum_1) + add_1 = x + max_1 + max_2 = torch.max(sum_2) + add_2 = add_1 + max_2 + return add_2 + """ + match_and_replacements = _replace_pattern(gm, pattern, replacement) + return [Match(anchor=m.anchor, nodes_map=m.nodes_map) for m in match_and_replacements] + + +# Experimental API, not backward compatible +@compatibility(is_backward_compatible=False) +def replace_pattern_with_filters( + gm: GraphModule, + pattern: Union[Callable, Graph, GraphModule], + replacement: Union[Callable, Graph, GraphModule], + match_filters: Optional[List[Callable[["InternalMatch", Graph, Graph], bool]]] = None, # type: ignore[name-defined] + ignore_literals: bool = False, +) -> List[ReplacedPatterns]: + """ + See replace_pattern for documentation. This function is an overload with an additional match_filter argument. + + Args: + ``match_filters``: A list of functions that take in + (match: InternalMatch, original_graph: Graph, pattern_graph: Graph) and return a boolean indicating + whether the match satisfies the condition. + See matcher_utils.py for definition of InternalMatch. + """ + + return _replace_pattern(gm, pattern, replacement, match_filters, ignore_literals) + + +def _replace_pattern( + gm: GraphModule, + pattern: Union[Callable, Graph, GraphModule], + replacement: Union[Callable, Graph, GraphModule], + match_filters: Optional[List[Callable[["InternalMatch", Graph, Graph], bool]]] = None, # type: ignore[name-defined] + ignore_literals: bool = False, +) -> List[ReplacedPatterns]: + + from torch.fx.passes.utils.matcher_utils import SubgraphMatcher, InternalMatch + + if match_filters is None: + match_filters = [] + + # Get the graphs for `gm`, `pattern`, `replacement` + original_graph: Graph = gm.graph + + if isinstance(pattern, GraphModule): + pattern_graph = pattern.graph + elif isinstance(pattern, Graph): + pattern_graph = pattern + else: + pattern_graph = symbolic_trace(pattern).graph + + if isinstance(replacement, GraphModule): + replacement_graph = replacement.graph + elif isinstance(replacement, Graph): + replacement_graph = replacement + else: + replacement_graph = symbolic_trace(replacement).graph + + matcher = SubgraphMatcher(pattern_graph, match_output=False, match_placeholder=False, + remove_overlapping_matches=True, ignore_literals=ignore_literals) + _matches: List[InternalMatch] = matcher.match(original_graph) + + # Filter out matches that don't match the filter + _matches = [ + m for m in _matches + if all(match_filter(m, original_graph, pattern_graph) + for match_filter in match_filters) + ] + + replacement_placeholders = [n for n in replacement_graph.nodes if n.op == "placeholder"] + + # As we progressively replace nodes, we'll need to keep track of how the match results should change + match_changed_node: Dict[Node, Node] = {} + + match_and_replacements = [] + for match in _matches: + + # Build connecting between replacement graph's input and original graph input producer node + + # Initialize `val_map` with mappings from placeholder nodes in + # `replacement` to their corresponding node in `original_graph` + assert len(match.placeholder_nodes) == len(replacement_placeholders) + val_map: Dict[Node, Node] = {} + for rn, gn in zip(replacement_placeholders, match.placeholder_nodes): + if isinstance(gn, Node): + val_map[rn] = match_changed_node.get(gn, gn) + if gn != val_map[rn]: + # Update match.placeholder_nodes and match.nodes_map with the node that replaced gn + gn_ind = match.placeholder_nodes.index(gn) + match.placeholder_nodes[gn_ind] = match_changed_node[gn] + map_key = list(match.nodes_map.keys())[list(match.nodes_map.values()).index(gn)] + match.nodes_map[map_key] = match_changed_node[gn] + else: + val_map[rn] = gn + + # Copy the replacement graph over + user_nodes: Set[Node] = set() + for n in match.returning_nodes: + for user in n.users: + user_nodes.add(user) + assert user_nodes, "The returning_nodes should have at least one user node" + + if len(user_nodes) == 1: + first_user_node = next(iter(user_nodes)) + else: + # If there are multiple user nodes, we need to find the first user node + # in the current execution order of the `original_graph` + for n in original_graph.nodes: + if n in user_nodes: + first_user_node = n + break + + with original_graph.inserting_before(first_user_node): + copied_returning_nodes = original_graph.graph_copy(replacement_graph, val_map) + + if isinstance(copied_returning_nodes, Node): + copied_returning_nodes = (copied_returning_nodes, ) + + # Get a list of nodes that have been replaced into the graph + replacement_nodes: List[Node] = [v for v in val_map.values() if v not in match.placeholder_nodes] + + # Hook the output Node of the replacement subgraph in to the + # original Graph at the correct location + assert len(match.returning_nodes) == len(copied_returning_nodes) + for gn, copied_node in zip(match.returning_nodes, copied_returning_nodes): + gn.replace_all_uses_with(copied_node) + match_changed_node[gn] = copied_node + # Remove the original nodes + for node in reversed(pattern_graph.nodes): + if node.op != "placeholder" and node.op != "output": + gn = match.nodes_map[node] + gm.graph.erase_node(gn) + + match_and_replacements.append( + ReplacedPatterns( + anchor=match.anchors[0], + nodes_map=match.nodes_map, + replacements=replacement_nodes + ) + ) + + # Update the passed-in GraphModule to reflect the new state of + # `original_graph` + gm.recompile() + + # If `replacement` was an nn.Module, we'll need to make sure that + # all the submodules have been copied over correctly + if isinstance(replacement, torch.nn.Module): + _replace_attributes(gm, replacement) + + return match_and_replacements diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/tensor_type.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/tensor_type.py new file mode 100644 index 0000000000000000000000000000000000000000..c822a38ec78e44ecf3835aa7ef18cc682d8df522 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/tensor_type.py @@ -0,0 +1,104 @@ +from torch.fx.experimental.unification import Var # type: ignore[attr-defined] + +from ._compatibility import compatibility + + +@compatibility(is_backward_compatible=False) +class TensorType: + """ + TensorType defines a type for tensors, which consists of a list of dimensions. + Example: + class M(torch.nn.Module): + def forward(self, x:TensorType((1,2,3, Dyn)), y:TensorType((1,2,3, Dyn))): + return torch.add(x, y) + """ + + def __init__(self, dim): + self.__origin__ = TensorType + self.__args__ = dim + + def __repr__(self): + return f'TensorType[{self.__args__}]' + + def __eq__(self, other): + if isinstance(other, self.__class__): + return list(self.__args__) == list(other.__args__) + else: + return False + + @staticmethod + def __class_getitem__(*args): + if len(args) == 1 and isinstance(args[0], tuple): + args = args[0] + return TensorType(tuple(args)) + + +class _DynType: + """ + _DynType defines a type which stands for the absence of type information. + """ + def __init__(self): + self.__name__ = '_DynType' + + def __eq__(self, other): + return isinstance(other, self.__class__) + + def __str__(self): + return "Dyn" + + def __repr__(self): + return "Dyn" + + +Dyn = _DynType() + +@compatibility(is_backward_compatible=False) +def is_consistent(t1, t2): + """ + A binary relation denoted by ~ that determines if t1 is consistent with t2. + The relation is reflexive, symmetric but not transitive. + returns True if t1 and t2 are consistent and False otherwise. + Example: + Dyn ~ TensorType((1,2,3)) + int ~ Dyn + int ~ int + TensorType((1,Dyn,3)) ~ TensorType((1,2,3)) + """ + + if t1 == t2: + return True + + if t1 == Dyn or t2 == Dyn or isinstance(t1, Var) or isinstance(t2, Var): + return True + + if isinstance(t1, TensorType) and isinstance(t2, TensorType): + return len(t1.__args__) == len(t2.__args__) and \ + all(is_consistent(elem1, elem2) for elem1, elem2 in zip(t1.__args__, t2.__args__)) + else: + return False + + +@compatibility(is_backward_compatible=False) +def is_more_precise(t1, t2): + """ + A binary relation denoted by <= that determines if t1 is more precise than t2. + The relation is reflexive and transitive. + returns True if t1 is more precise than t2 and False otherwise. + Example: + Dyn >= TensorType((1,2,3)) + int >= Dyn + int >= int + TensorType((1,Dyn,3)) <= TensorType((1,2,3)) + """ + if t1 == t2: + return True + + if isinstance(t2, _DynType): + return True + + if isinstance(t1, TensorType) and isinstance(t2, TensorType): + return len(t1.__args__) == len(t2.__args__) and \ + all(is_more_precise(elem1, elem2) for elem1, elem2 in zip(t1.__args__, t2.__args__)) + + else: + return False diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/traceback.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/traceback.py new file mode 100644 index 0000000000000000000000000000000000000000..cad750b19ff4567e3557b4235705b88fbb4bd9e1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/traceback.py @@ -0,0 +1,100 @@ +import traceback +from contextlib import contextmanager +from typing import List, Any, Dict +from ._compatibility import compatibility + +__all__ = ['preserve_node_meta', 'has_preserved_node_meta', + 'set_stack_trace', 'set_grad_fn_seq_nr', 'reset_grad_fn_seq_nr', + 'format_stack', 'set_current_meta', 'get_current_meta'] + +current_meta: Dict[str, Any] = {} +should_preserve_node_meta = False + + +@compatibility(is_backward_compatible=False) +@contextmanager +def preserve_node_meta(): + global should_preserve_node_meta + + saved_should_preserve_node_meta = should_preserve_node_meta + try: + should_preserve_node_meta = True + yield + finally: + should_preserve_node_meta = saved_should_preserve_node_meta + + +@compatibility(is_backward_compatible=False) +def set_stack_trace(stack : List[str]): + global current_meta + + if should_preserve_node_meta and stack: + current_meta["stack_trace"] = "".join(stack) + + +@compatibility(is_backward_compatible=False) +def set_grad_fn_seq_nr(seq_nr): + global current_meta + + if should_preserve_node_meta: + # The seq_nr is captured by eager mode in the grad_fn during forward + current_meta["prev_grad_fn_seq_nr"] = current_meta.get("grad_fn_seq_nr", None) + current_meta["prev_in_grad_fn"] = current_meta.get("in_grad_fn", None) + current_meta["grad_fn_seq_nr"] = seq_nr + current_meta["in_grad_fn"] = True + + +@compatibility(is_backward_compatible=False) +def reset_grad_fn_seq_nr(): + # NB: reset state properly, this would be helpful towards supporting + # reentrant autograd if we actually wanted to do that. + global current_meta + + if should_preserve_node_meta: + if current_meta["prev_grad_fn_seq_nr"] is None: + assert current_meta["prev_in_grad_fn"] is None + del current_meta["grad_fn_seq_nr"] + del current_meta["in_grad_fn"] + current_meta["grad_fn_seq_nr"] = current_meta["prev_grad_fn_seq_nr"] + current_meta["in_grad_fn"] = current_meta["prev_in_grad_fn"] + + +@compatibility(is_backward_compatible=False) +def format_stack() -> List[str]: + if should_preserve_node_meta: + return [current_meta.get("stack_trace", "")] + else: + # fallback to traceback.format_stack() + return traceback.format_list(traceback.extract_stack()[:-1]) + + +@compatibility(is_backward_compatible=False) +def has_preserved_node_meta() -> bool: + return should_preserve_node_meta + + +@compatibility(is_backward_compatible=False) +@contextmanager +def set_current_meta(node): + global current_meta + if should_preserve_node_meta and node.meta: + saved_meta = current_meta + try: + current_meta = node.meta.copy() + + # Append (node.name, node.target) onto "from_node" for provenance tracking + if "from_node" not in current_meta: + current_meta["from_node"] = [(node.name, node.target)] + elif current_meta["from_node"][-1][0] != node.name: + current_meta["from_node"].append((node.name, node.target)) + + yield + finally: + current_meta = saved_meta + else: + yield + + +@compatibility(is_backward_compatible=False) +def get_current_meta() -> Dict[str, Any]: + return current_meta diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/quantization/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fd83d88a3e3e72385726851b1fdd5fc09086a473 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/quantization/__init__.py @@ -0,0 +1,87 @@ +from .quantize import * # noqa: F403 +from .observer import * # noqa: F403 +from .qconfig import * # noqa: F403 +from .fake_quantize import * # noqa: F403 +from .fuse_modules import fuse_modules +from .stubs import * # noqa: F403 +from .quant_type import * # noqa: F403 +from .quantize_jit import * # noqa: F403 + +# from .quantize_fx import * +from .quantization_mappings import * # noqa: F403 +from .fuser_method_mappings import * # noqa: F403 + + +def default_eval_fn(model, calib_data): + r""" + Default evaluation function takes a torch.utils.data.Dataset or a list of + input Tensors and run the model on the dataset + """ + for data, target in calib_data: + model(data) + + +__all__ = [ + "QuantWrapper", + "QuantStub", + "DeQuantStub", + # Top level API for eager mode quantization + "quantize", + "quantize_dynamic", + "quantize_qat", + "prepare", + "convert", + "prepare_qat", + # Top level API for graph mode quantization on TorchScript + "quantize_jit", + "quantize_dynamic_jit", + "_prepare_ondevice_dynamic_jit", + "_convert_ondevice_dynamic_jit", + "_quantize_ondevice_dynamic_jit", + # Top level API for graph mode quantization on GraphModule(torch.fx) + # 'fuse_fx', 'quantize_fx', # TODO: add quantize_dynamic_fx + # 'prepare_fx', 'prepare_dynamic_fx', 'convert_fx', + "QuantType", # quantization type + # custom module APIs + "get_default_static_quant_module_mappings", + "get_static_quant_module_class", + "get_default_dynamic_quant_module_mappings", + "get_default_qat_module_mappings", + "get_default_qconfig_propagation_list", + "get_default_compare_output_module_list", + "get_quantized_operator", + "get_fuser_method", + # Sub functions for `prepare` and `swap_module` + "propagate_qconfig_", + "add_quant_dequant", + "swap_module", + "default_eval_fn", + # Observers + "ObserverBase", + "WeightObserver", + "HistogramObserver", + "observer", + "default_observer", + "default_weight_observer", + "default_placeholder_observer", + "default_per_channel_weight_observer", + # FakeQuantize (for qat) + "default_fake_quant", + "default_weight_fake_quant", + "default_fixed_qparams_range_neg1to1_fake_quant", + "default_fixed_qparams_range_0to1_fake_quant", + "default_per_channel_weight_fake_quant", + "default_histogram_fake_quant", + # QConfig + "QConfig", + "default_qconfig", + "default_dynamic_qconfig", + "float16_dynamic_qconfig", + "float_qparams_weight_only_qconfig", + # QAT utilities + "default_qat_qconfig", + "prepare_qat", + "quantize_qat", + # module transformations + "fuse_modules", +] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8bd8e12a3dfa9b552d00cd5f7aee5ea98423f8d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/_numeric_suite.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/_numeric_suite.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ddd47067b8cf3cf5e3d400e2404fba9b860384da Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/_numeric_suite.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/_numeric_suite_fx.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/_numeric_suite_fx.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1ae4d6e4e6a68435128630c18cb98b0df272a1f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/_numeric_suite_fx.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/_quantized_conversions.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/_quantized_conversions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb131bb268e7b6e63fab555c644d3ea825a367a6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/_quantized_conversions.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/fake_quantize.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/fake_quantize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..85c268352c79b2f90ea1331aa79ed795b6ee6853 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/fake_quantize.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/fuse_modules.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/fuse_modules.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3acae1ad852bde57c1de4e5a12c4e52105b65558 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/fuse_modules.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/fuser_method_mappings.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/fuser_method_mappings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..129fb72228c122451bf865359391296582f0bff4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/fuser_method_mappings.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/observer.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/observer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d67a185de6e76697f1e5f1fd8c9e5e1397080289 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/observer.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/qconfig.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/qconfig.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b190f46873038fc863885a108aaee12577a899a7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/qconfig.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/quant_type.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/quant_type.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bfb37c50afee518858099cd3c3e3629361ba6646 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/quant_type.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/quantization_mappings.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/quantization_mappings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3510095ab675d5519b8414badc56196269f1e96 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/quantization_mappings.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/quantize.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/quantize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1d7d4c5dda27be23be90a63f9348c03aff0572a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/quantize.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/quantize_fx.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/quantize_fx.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c686aa3f0f8de0b01df571c58992dda3e712931a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/quantize_fx.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/quantize_jit.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/quantize_jit.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e31defe98f31e2c191eac0188d9f022baab4de21 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/quantize_jit.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/stubs.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/stubs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..80d2a6464fe2db7c819148d42c5355381a0be0cb Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/stubs.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8088048003dc01126f38baf6aa3d9a588dc2128 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/_numeric_suite.py b/env-llmeval/lib/python3.10/site-packages/torch/quantization/_numeric_suite.py new file mode 100644 index 0000000000000000000000000000000000000000..49ccc8e69523f7dbee2335b788a2cb3a7db618a2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/quantization/_numeric_suite.py @@ -0,0 +1,28 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +`torch/ao/ns/_numeric_suite.py`, while adding an import statement +here. +""" + +from torch.ao.ns._numeric_suite import ( + _convert_tuple_to_list, + _dequantize_tensor_list, + _find_match, + _get_logger_dict_helper, + _is_identical_module_type, + compare_model_outputs, + compare_model_stub, + compare_weights, + get_logger_dict, + get_matching_activations, + Logger, + NON_LEAF_MODULE_TO_ADD_OBSERVER_ALLOW_LIST, + OutputLogger, + prepare_model_outputs, + prepare_model_with_stubs, + Shadow, + ShadowLogger, +) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/_numeric_suite_fx.py b/env-llmeval/lib/python3.10/site-packages/torch/quantization/_numeric_suite_fx.py new file mode 100644 index 0000000000000000000000000000000000000000..55cd7085740d0ce8de79491acbfc4888ebba21f8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/quantization/_numeric_suite_fx.py @@ -0,0 +1,26 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +`torch/ao/ns/_numeric_suite_fx.py`, while adding an import statement +here. +""" + +from torch.ao.ns._numeric_suite_fx import ( + _add_loggers_impl, + _add_loggers_one_model, + _add_shadow_loggers_impl, + _extract_logger_info_one_model, + _extract_weights_impl, + _extract_weights_one_model, + add_loggers, + add_shadow_loggers, + extend_logger_results_with_comparison, + extract_logger_info, + extract_shadow_logger_info, + extract_weights, + NSTracer, + OutputLogger, + RNNReturnType, +) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/_quantized_conversions.py b/env-llmeval/lib/python3.10/site-packages/torch/quantization/_quantized_conversions.py new file mode 100644 index 0000000000000000000000000000000000000000..ce1bfc8fd43123ade0207e85cb17c71057354363 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/quantization/_quantized_conversions.py @@ -0,0 +1,132 @@ +import torch + + +# Pack pairs of int4 values into int8, in row major order; first int4 +# value goes into lower order bits, and second int4 value into higher +# order bits of resulting int8 value. +def pack_int4_to_int8(weight): + assert weight.dim() == 2 + assert weight.shape[1] % 2 == 0 + assert weight.dtype == torch.int8 + return ((weight[:, 1::2] & 0xF) << 4) | (weight[:, 0::2] & 0xF) + + +# Unpack quandruples of bits in int8 values into int4 values, in row +# major order; lower 4 bits go into first int4 value goes, and upper 4 +# bits go into second int4 value. +def unpack_int8_to_int4(weight): + assert weight.dim() == 2 + assert weight.dtype == torch.int8 + return torch.stack((weight & 0xF, (weight >> 4) & 0xF), dim=2).view( + weight.shape[0], 2 * weight.shape[1] + ) + + +# Transpose the weight matrix, and then reorder its elements according +# to underlying requirements of CUTLASS library, so that it could be +# used for CUTLASS-based mixed datatypes linear operation. +def quantized_weight_reorder_for_mixed_dtypes_linear_cutlass( + weight, dtypeq, transpose=False +): + assert weight.dim() == 2 + assert weight.dtype == torch.int8 + assert dtypeq == torch.int8 or dtypeq == torch.quint4x2 + assert weight.device.type == "cuda" + + device = weight.device + + # subbyte_transpose + if not transpose: + if dtypeq == torch.int8: + outp = weight.T + elif dtypeq == torch.quint4x2: + outp = pack_int4_to_int8(unpack_int8_to_int4(weight.view(torch.int8)).T) + else: + outp = weight + + ncols, nrows = outp.shape + assert nrows % (32 if dtypeq == torch.quint4x2 else 64) == 0 + assert ncols % 64 == 0 + + # permute_B_rows_for_mixed_gemm + # (permute cols actually, as transpose is applied first here) + if dtypeq == torch.quint4x2: + cols_permuted = ( + torch.tensor( + [0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15], + device=device, + ) + + (torch.arange(0, nrows // 16, device=device).reshape(-1, 1) * 16).expand( + nrows // 16, 16 + ) + ).view(-1) + else: + cols_permuted = ( + torch.tensor( + [0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 14, 15], + device=device, + ) + + (torch.arange(0, nrows // 16, device=device).reshape(-1, 1) * 16).expand( + nrows // 16, 16 + ) + ).view(-1) + outp = outp.index_copy(1, cols_permuted, outp) + + # interleave_column_major_tensor + magic0 = 4 if dtypeq == torch.quint4x2 else 2 + magic1 = 32 // magic0 + + tmp0 = ( + (torch.arange(0, ncols // magic0, device=device) * (nrows // 4 * magic0)) + .view(-1, 1) + .repeat(1, nrows // 4 * magic0) + .view(-1) + ) + tmp1 = ( + (torch.arange(0, nrows // 4 // magic1, device=device) * (magic0 * magic1)) + .view(-1, 1) + .repeat(1, magic1) + .view(-1) + .repeat(ncols) + ) + tmp2 = ( + (torch.arange(0, magic0, device=device) * magic1) + .view(-1, 1) + .repeat(1, nrows // 4) + .view(-1) + .repeat(ncols // magic0) + ) + tmp3 = torch.arange(0, magic1, device=device).repeat(nrows // 4 * ncols // magic1) + + outp_offsets = tmp0 + tmp1 + tmp2 + tmp3 + + tmp = outp.view(-1).view(torch.int32) + outp = torch.zeros_like(tmp) + outp.scatter_(0, outp_offsets, tmp) + outp = outp.view(weight.dtype) + + # add_bias_and_interleave_quantized_tensor_inplace + tmp = outp.view(-1) + + outp = torch.empty_like(tmp) + if dtypeq == torch.int8: + tmp = (tmp.to(torch.int) + 128).to(tmp.dtype) + outp[0::4] = tmp[0::4] + outp[1::4] = tmp[2::4] + outp[2::4] = tmp[1::4] + outp[3::4] = tmp[3::4] + elif dtypeq == torch.quint4x2: + tmp0 = ((tmp & 0xF) + 8) & 0xF + tmp0 = (tmp0[1::2] << 4) | tmp0[0::2] + tmp1 = (((tmp >> 4) & 0xF) + 8) & 0xF + tmp1 = (tmp1[1::2] << 4) | tmp1[0::2] + outp[0::4] = tmp0[0::2] + outp[1::4] = tmp0[1::2] + outp[2::4] = tmp1[0::2] + outp[3::4] = tmp1[1::2] + + if dtypeq == torch.quint4x2: + nrows *= 2 + ncols //= 2 + + return outp.view(nrows, ncols).view(torch.uint8) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/fake_quantize.py b/env-llmeval/lib/python3.10/site-packages/torch/quantization/fake_quantize.py new file mode 100644 index 0000000000000000000000000000000000000000..69a5d730bfb68e89e24beb04ad13fd3fa5881ae9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/quantization/fake_quantize.py @@ -0,0 +1,32 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +`torch/ao/quantization/fake_quantize.py`, while adding an import statement +here. +""" + +from torch.ao.quantization.fake_quantize import ( + _is_fake_quant_script_module, + _is_per_channel, + _is_per_tensor, + _is_symmetric_quant, + default_fake_quant, + default_fixed_qparams_range_0to1_fake_quant, + default_fixed_qparams_range_neg1to1_fake_quant, + default_fused_act_fake_quant, + default_fused_per_channel_wt_fake_quant, + default_fused_wt_fake_quant, + default_histogram_fake_quant, + default_per_channel_weight_fake_quant, + default_weight_fake_quant, + disable_fake_quant, + disable_observer, + enable_fake_quant, + enable_observer, + FakeQuantize, + FakeQuantizeBase, + FixedQParamsFakeQuantize, + FusedMovingAvgObsFakeQuantize, +) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/fuse_modules.py b/env-llmeval/lib/python3.10/site-packages/torch/quantization/fuse_modules.py new file mode 100644 index 0000000000000000000000000000000000000000..6b704fa8094e8b367e9eba47102863ba845415b9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/quantization/fuse_modules.py @@ -0,0 +1,22 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +`torch/ao/quantization/fuse_modules.py`, while adding an import statement +here. +""" + +# TODO: These functions are not used outside the `fuse_modules.py` +# Keeping here for now, need to remove them later. +from torch.ao.quantization.fuse_modules import ( + _fuse_modules, + _get_module, + _set_module, + fuse_known_modules, + fuse_modules, + get_fuser_method, +) + +# for backward compatiblity +from torch.ao.quantization.fuser_method_mappings import fuse_conv_bn, fuse_conv_bn_relu diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/fuser_method_mappings.py b/env-llmeval/lib/python3.10/site-packages/torch/quantization/fuser_method_mappings.py new file mode 100644 index 0000000000000000000000000000000000000000..cfb13ac96271fa7b926cc703918984760e6ede15 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/quantization/fuser_method_mappings.py @@ -0,0 +1,15 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +`torch/ao/quantization/fuser_method_mappings.py`, while adding an import statement +here. +""" +from torch.ao.quantization.fuser_method_mappings import ( + _DEFAULT_OP_LIST_TO_FUSER_METHOD, + fuse_conv_bn, + fuse_conv_bn_relu, + fuse_linear_bn, + get_fuser_method, +) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c01cbd457374c27e40b07daca5ae1644a701767d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/__init__.py @@ -0,0 +1,15 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate files under `torch/ao/quantization/fx/`, while adding an import statement +here. +""" + +from torch.ao.quantization.fx.convert import convert +from torch.ao.quantization.fx.fuse import fuse + +# omitting files that's unlikely to be used right now, for example +# the newly added lower_to_fbgemm etc. +from torch.ao.quantization.fx.prepare import prepare diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72271ac6710cf29b30b67dd03a1ffb6dd7c4291b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/_equalize.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/_equalize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e5457a8972a78922f1718418b9c3e783ef46cec Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/_equalize.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/convert.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/convert.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3f39473c20ac4a4059b6c7e8863bb08f9f35ff07 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/convert.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/fusion_patterns.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/fusion_patterns.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5faa415adef6c3b7afa57b82c884ae3c572cb0b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/fusion_patterns.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/graph_module.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/graph_module.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..838de8f683d9035b526cff5718e8685d425e5996 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/graph_module.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/pattern_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/pattern_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9892bc726a262ab07e27ee834a1f245a41abeca2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/pattern_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/prepare.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/prepare.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6cb83ab7a792932a06ae7e805665890ff945f122 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/prepare.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/quantization_patterns.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/quantization_patterns.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ebbfa07970629155b4fb5c7d58205b74a90586ea Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/quantization_patterns.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/quantization_types.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/quantization_types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b2d2e796a903b75a8b3adb515310e0f10855bebf Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/quantization_types.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc31d40fab8a853656839f2b14aeac83856903f7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/fuse.py b/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/fuse.py new file mode 100644 index 0000000000000000000000000000000000000000..67527080304fb31ddc54fe254533e2196f77a616 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/fuse.py @@ -0,0 +1,9 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate files under `torch/ao/quantization/fx/`, while adding an import statement +here. +""" +from torch.ao.quantization.fx.fuse import fuse diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/graph_module.py b/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/graph_module.py new file mode 100644 index 0000000000000000000000000000000000000000..a71e980a57ba141bdc5bbe9b283d69582eb8fd82 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/graph_module.py @@ -0,0 +1,17 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate files under `torch/ao/quantization/fx/`, while adding an import statement +here. +""" +from torch.ao.quantization.fx.graph_module import ( + _is_observed_module, + _is_observed_standalone_module, + FusedGraphModule, + GraphModule, + ObservedGraphModule, + ObservedStandaloneGraphModule, + QuantizedGraphModule, +) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/match_utils.py b/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/match_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8b49f7c645d8d1bc3a154d62a1295a90b155f986 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/match_utils.py @@ -0,0 +1,14 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate files under `torch/ao/quantization/fx/`, while adding an import statement +here. +""" +from torch.ao.quantization.fx.match_utils import ( + _find_matches, + _is_match, + _MatchResult, + MatchAllNode, +) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/pattern_utils.py b/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/pattern_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..26954833bb48eb5a807ac31cc558c5282cb63201 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/pattern_utils.py @@ -0,0 +1,34 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate files under `torch/ao/quantization/fx/`, while adding an import statement +here. +""" +from torch.ao.quantization.fx.pattern_utils import ( + _register_fusion_pattern, + _register_quant_pattern, + get_default_fusion_patterns, + get_default_output_activation_post_process_map, + get_default_quant_patterns, + QuantizeHandler, +) + +# QuantizeHandler.__module__ = _NAMESPACE +_register_fusion_pattern.__module__ = "torch.ao.quantization.fx.pattern_utils" +get_default_fusion_patterns.__module__ = "torch.ao.quantization.fx.pattern_utils" +_register_quant_pattern.__module__ = "torch.ao.quantization.fx.pattern_utils" +get_default_quant_patterns.__module__ = "torch.ao.quantization.fx.pattern_utils" +get_default_output_activation_post_process_map.__module__ = ( + "torch.ao.quantization.fx.pattern_utils" +) + +# __all__ = [ +# "QuantizeHandler", +# "_register_fusion_pattern", +# "get_default_fusion_patterns", +# "_register_quant_pattern", +# "get_default_quant_patterns", +# "get_default_output_activation_post_process_map", +# ] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/quantization_types.py b/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/quantization_types.py new file mode 100644 index 0000000000000000000000000000000000000000..a422cdd3142e04c8d16f495cc6cd65823451810b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/quantization/fx/quantization_types.py @@ -0,0 +1,9 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate files under `torch/ao/quantization/fx/`, while adding an import statement +here. +""" +from torch.ao.quantization.utils import Pattern, QuantizerCls diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/observer.py b/env-llmeval/lib/python3.10/site-packages/torch/quantization/observer.py new file mode 100644 index 0000000000000000000000000000000000000000..6e6c7c1917c83433fc19f016140b25d060284535 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/quantization/observer.py @@ -0,0 +1,36 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +`torch/ao/quantization/observer.py`, while adding an import statement +here. +""" +from torch.ao.quantization.observer import ( + _is_activation_post_process, + _is_per_channel_script_obs_instance, + _ObserverBase, + _PartialWrapper, + _with_args, + _with_callable_args, + ABC, + default_debug_observer, + default_dynamic_quant_observer, + default_float_qparams_observer, + default_histogram_observer, + default_observer, + default_per_channel_weight_observer, + default_placeholder_observer, + default_weight_observer, + get_observer_state_dict, + HistogramObserver, + load_observer_state_dict, + MinMaxObserver, + MovingAverageMinMaxObserver, + MovingAveragePerChannelMinMaxObserver, + NoopObserver, + ObserverBase, + PerChannelMinMaxObserver, + PlaceholderObserver, + RecordingObserver, +) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/qconfig.py b/env-llmeval/lib/python3.10/site-packages/torch/quantization/qconfig.py new file mode 100644 index 0000000000000000000000000000000000000000..6bb7e14110cb9cdc4e9c2c418c6776ea6445f0d3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/quantization/qconfig.py @@ -0,0 +1,30 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +`torch/ao/quantization/qconfig.py`, while adding an import statement +here. +""" +from torch.ao.quantization.qconfig import ( + _add_module_to_qconfig_obs_ctr, + _assert_valid_qconfig, + default_activation_only_qconfig, + default_debug_qconfig, + default_dynamic_qconfig, + default_per_channel_qconfig, + default_qat_qconfig, + default_qat_qconfig_v2, + default_qconfig, + default_weight_only_qconfig, + float16_dynamic_qconfig, + float16_static_qconfig, + float_qparams_weight_only_qconfig, + get_default_qat_qconfig, + get_default_qconfig, + per_channel_dynamic_qconfig, + QConfig, + qconfig_equals, + QConfigAny, + QConfigDynamic, +) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/quant_type.py b/env-llmeval/lib/python3.10/site-packages/torch/quantization/quant_type.py new file mode 100644 index 0000000000000000000000000000000000000000..8555f03792661f39c85c8facf3f911786cc25d0f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/quantization/quant_type.py @@ -0,0 +1,10 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +`torch/ao/quantization/quant_type.py`, while adding an import statement +here. +""" + +from torch.ao.quantization.quant_type import _get_quant_type_to_str, QuantType diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/quantization_mappings.py b/env-llmeval/lib/python3.10/site-packages/torch/quantization/quantization_mappings.py new file mode 100644 index 0000000000000000000000000000000000000000..8b44a980ce82fbfa5a81ad906499806cf99b876f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/quantization/quantization_mappings.py @@ -0,0 +1,29 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +`torch/ao/quantization/quantization_mappings.py`, while adding an import statement +here. +""" +from torch.ao.quantization.quantization_mappings import ( + _get_special_act_post_process, + _has_special_act_post_process, + _INCLUDE_QCONFIG_PROPAGATE_LIST, + DEFAULT_DYNAMIC_QUANT_MODULE_MAPPINGS, + DEFAULT_FLOAT_TO_QUANTIZED_OPERATOR_MAPPINGS, + DEFAULT_MODULE_TO_ACT_POST_PROCESS, + DEFAULT_QAT_MODULE_MAPPINGS, + DEFAULT_REFERENCE_STATIC_QUANT_MODULE_MAPPINGS, + DEFAULT_STATIC_QUANT_MODULE_MAPPINGS, + get_default_compare_output_module_list, + get_default_dynamic_quant_module_mappings, + get_default_float_to_quantized_operator_mappings, + get_default_qat_module_mappings, + get_default_qconfig_propagation_list, + get_default_static_quant_module_mappings, + get_dynamic_quant_module_class, + get_quantized_operator, + get_static_quant_module_class, + no_observer_set, +) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/quantize.py b/env-llmeval/lib/python3.10/site-packages/torch/quantization/quantize.py new file mode 100644 index 0000000000000000000000000000000000000000..600d3a46fed0346e3ae8909872cd5bf3c733860c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/quantization/quantize.py @@ -0,0 +1,30 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +`torch/ao/quantization/quantize.py`, while adding an import statement +here. +""" + +from torch.ao.quantization.quantize import ( + _add_observer_, + _convert, + _get_observer_dict, + _get_unique_devices_, + _is_activation_post_process, + _observer_forward_hook, + _propagate_qconfig_helper, + _register_activation_post_process_hook, + _remove_activation_post_process, + _remove_qconfig, + add_quant_dequant, + convert, + prepare, + prepare_qat, + propagate_qconfig_, + quantize, + quantize_dynamic, + quantize_qat, + swap_module, +) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/quantize_fx.py b/env-llmeval/lib/python3.10/site-packages/torch/quantization/quantize_fx.py new file mode 100644 index 0000000000000000000000000000000000000000..649142c7a7eee9885d96b37f70e582f3ea9a9f8d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/quantization/quantize_fx.py @@ -0,0 +1,26 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +`torch/ao/quantization/quantize_fx.py`, while adding an import statement +here. +""" + +from torch.ao.quantization.fx.graph_module import ObservedGraphModule +from torch.ao.quantization.quantize_fx import ( + _check_is_graph_module, + _convert_fx, + _convert_standalone_module_fx, + _fuse_fx, + _prepare_fx, + _prepare_standalone_module_fx, + _swap_ff_with_fxff, + convert_fx, + fuse_fx, + prepare_fx, + prepare_qat_fx, + QuantizationTracer, + Scope, + ScopeContextManager, +) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/quantize_jit.py b/env-llmeval/lib/python3.10/site-packages/torch/quantization/quantize_jit.py new file mode 100644 index 0000000000000000000000000000000000000000..aa627dc7bb51ef7ea1fde7e2e5da283c9f6c8900 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/quantization/quantize_jit.py @@ -0,0 +1,26 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +`torch/ao/quantization/quantize_jit.py`, while adding an import statement +here. +""" + +from torch.ao.quantization.quantize_jit import ( + _check_forward_method, + _check_is_script_module, + _convert_jit, + _prepare_jit, + _prepare_ondevice_dynamic_jit, + _quantize_jit, + convert_dynamic_jit, + convert_jit, + fuse_conv_bn_jit, + prepare_dynamic_jit, + prepare_jit, + quantize_dynamic_jit, + quantize_jit, + script_qconfig, + script_qconfig_dict, +) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/stubs.py b/env-llmeval/lib/python3.10/site-packages/torch/quantization/stubs.py new file mode 100644 index 0000000000000000000000000000000000000000..d3fd5c63683dc572c35cabc202ee4ddb2b0053c6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/quantization/stubs.py @@ -0,0 +1,10 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +`torch/ao/quantization/stubs.py`, while adding an import statement +here. +""" + +from torch.ao.quantization.stubs import DeQuantStub, QuantStub, QuantWrapper diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quantization/utils.py b/env-llmeval/lib/python3.10/site-packages/torch/quantization/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7d51d58f38d7462713f84ab62427852c1dd8e52c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/quantization/utils.py @@ -0,0 +1,29 @@ +# flake8: noqa: F401 +r""" +Utils shared by different modes of quantization (eager/graph) + +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +`torch/ao/quantization/utils.py`, while adding an import statement +here. +""" + +from torch.ao.quantization.utils import ( + activation_dtype, + activation_is_int8_quantized, + activation_is_statically_quantized, + calculate_qmin_qmax, + check_min_max_valid, + get_combined_dict, + get_qconfig_dtypes, + get_qparam_dict, + get_quant_type, + get_swapped_custom_module_class, + getattr_from_fqn, + is_per_channel, + is_per_tensor, + weight_dtype, + weight_is_quantized, + weight_is_statically_quantized, +)