diff --git a/ckpts/universal/global_step120/zero/20.mlp.dense_h_to_4h.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/20.mlp.dense_h_to_4h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..996fe1e0b7682c6538cf32f093844658cb9008c8 --- /dev/null +++ b/ckpts/universal/global_step120/zero/20.mlp.dense_h_to_4h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:804bfc39c42c3e6b7b273b3e574c18f8266b71f5cc2b7abb8c54419ce359d7ef +size 33555627 diff --git a/ckpts/universal/global_step120/zero/25.post_attention_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/25.post_attention_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..79af15fcb6ef6bf24814b6adee613492269197f1 --- /dev/null +++ b/ckpts/universal/global_step120/zero/25.post_attention_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aaf3920b1d3a46da3fea9d23caff1bda67796db5391c115c2742b5dbae1c7c79 +size 9387 diff --git a/ckpts/universal/global_step120/zero/25.post_attention_layernorm.weight/fp32.pt b/ckpts/universal/global_step120/zero/25.post_attention_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..e6726015d500d802555e9b6ac531bbf43954b7c3 --- /dev/null +++ b/ckpts/universal/global_step120/zero/25.post_attention_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d62ada06ca7be555e98bd9b2d36730aff6115d6d9def590f39f56c0ff90c3580 +size 9293 diff --git a/venv/lib/python3.10/site-packages/torch/_dispatch/__init__.py b/venv/lib/python3.10/site-packages/torch/_dispatch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/torch/_dispatch/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dispatch/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39b0df375b88beca7638d7ce929a7c52c0f344fc Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dispatch/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dispatch/__pycache__/python.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_dispatch/__pycache__/python.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..573c4a8bbc13779f94caa3ea356802c4a85cee3e Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_dispatch/__pycache__/python.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_dispatch/python.py b/venv/lib/python3.10/site-packages/torch/_dispatch/python.py new file mode 100644 index 0000000000000000000000000000000000000000..d80839dc7e4729b948914991c9ddf8e7f3e01cb6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_dispatch/python.py @@ -0,0 +1,178 @@ +import itertools +import unittest.mock +from contextlib import contextmanager +from typing import Iterator + +import torch +import torch._C +import torch._ops +import torch.utils._python_dispatch +import torch.utils._pytree as pytree + +__all__ = ["enable_python_dispatcher", "no_python_dispatcher", "enable_pre_dispatch"] + +no_python_dispatcher = torch._C._DisablePythonDispatcher +enable_python_dispatcher = torch._C._EnablePythonDispatcher +enable_pre_dispatch = torch._C._EnablePreDispatch + +CROSSREF_FUNCTIONALIZE = False + + +def all_py_loaded_overloads() -> Iterator[torch._ops.OpOverload]: + """ + Warning: the set of overloads this will report is very subtle. It is precisely + the set of torch.ops functions that have actually been accessed from Python + (e.g., we actually called torch.ops.aten.blah at some point. This is DIFFERENT + from the set of registered operators, which will in general be a larger set, + as this would include all operators which we ran C++ static initializers or + Python operator registration on. This does not eagerly populate the list on + torch.ops.aten; this list is lazy! + + In other words, this is good for traversing over everything that has an + OpOverload object allocated in Python. We use it for cache invalidation, but + don't rely on this list being complete. + + Note that even if we did report all C++ registered overloads, this isn't guaranteed + to be complete either, as a subsequent lazy load of a library which triggers more + registrations could add more things to the set. + """ + for ns in torch.ops: + packets = getattr(torch.ops, ns) + for op_name in packets: + packet = getattr(packets, op_name) + for overload in packet: + yield getattr(packet, overload) + + +@contextmanager +def suspend_functionalization(): + f_tls = torch._C._dispatch_tls_is_dispatch_key_included( + torch._C.DispatchKey.Functionalize + ) + f_rv = torch._C._functionalization_reapply_views_tls() + if f_tls: + torch._disable_functionalization() + try: + yield + finally: + if f_tls: + torch._enable_functionalization(reapply_views=f_rv) + + +def check_tensor_metadata_matches(nv, rv, desc): + assert callable(desc) + assert nv.size() == rv.size(), f"{desc()}: sizes {nv.size()} != {rv.size()}" + assert nv.dtype == rv.dtype, f"{desc()}: dtype {nv.dtype} != {rv.dtype}" + same_strides, idx = torch._prims_common.check_significant_strides( + nv, rv, only_cuda=False + ) + assert ( + same_strides + ), f"{desc()}: strides {nv.stride()} != {rv.stride()} (mismatch at index {idx})" + + +def check_metadata_matches(n, r, desc): + assert callable(desc) + n_vals, n_spec = pytree.tree_flatten(n) + r_vals, r_spec = pytree.tree_flatten(r) + # TODO: test the specs match; empirically sometimes we have a tuple + # on one side and a list on the other + assert len(n_vals) == len(r_vals), f"{len(n_vals)} != {len(r_vals)}" + for i, nv, rv in zip(range(len(n_vals)), n_vals, r_vals): + if not isinstance(rv, torch.Tensor): + continue + check_tensor_metadata_matches(nv, rv, lambda: f"{desc()} output {i}") + + +class Lit: + def __init__(self, s): + self.s = s + + def __repr__(self): + return self.s + + +def _fmt(a: object) -> object: + if isinstance(a, torch.Tensor): + return Lit( + f"torch.empty_strided({tuple(a.size())}, {a.stride()}, dtype={a.dtype})" + ) + else: + return a + + +def make_crossref_functionalize(op, final_key): + from torch._subclasses.fake_tensor import FakeTensorMode + + # This case is pretty weird, suppress it for now + if op == torch.ops.aten.lift_fresh.default: + return final_key + + def handler(*args, **kwargs): + fake_mode = FakeTensorMode() + + def fakeify_defun(t): + if isinstance(t, torch.Tensor): + if torch._is_functional_tensor(t): + r = torch._from_functional_tensor(t) + # NB: This assumes that the inner tensor sizes/strides match + # the outer tensor sizes/strides. This doesn't necessarily have to + # be the case, see discussion at + # https://github.com/pytorch/pytorch/pull/87610/files/401ddeda1d769bedc88a12de332c7357b60e51a4#r1007264456 + assert t.size() == r.size() + assert t.stride() == r.stride() + else: + r = t + # TODO: suppress guards + return fake_mode.from_tensor(r) + return t + + def maybe_detach(t): + if isinstance(t, torch.Tensor): + return t.detach() + else: + return t + + # TODO: This probably does the wrong thing if you're running other + # substantive modes with the normal op outside here + with torch.utils._python_dispatch._disable_current_modes(), suspend_functionalization(): + f_args, f_kwargs = pytree.tree_map(fakeify_defun, (args, kwargs)) + orig_f_args, orig_f_kwargs = pytree.tree_map( + maybe_detach, (f_args, f_kwargs) + ) + with fake_mode: + f_r = op(*f_args, **f_kwargs) + r = op._op_dk(final_key, *args, **kwargs) + + def desc(): + fmt_args = ", ".join( + itertools.chain( + (repr(pytree.tree_map(_fmt, a)) for a in orig_f_args), + ( + f"{k}={pytree.tree_map(_fmt, v)}" + for k, v in orig_f_kwargs.items() + ), + ) + ) + return f"{op}({fmt_args})" + + check_metadata_matches(f_r, r, desc) + return r + + return handler + + +# NB: enabling this is slow, don't do it in a hot loop. This is purely +# for debugging purposes. +@contextmanager +def enable_crossref_functionalize(): + for op in all_py_loaded_overloads(): + op._uncache_dispatch(torch._C.DispatchKey.Functionalize) + try: + with enable_python_dispatcher(), unittest.mock.patch( + "torch._dispatch.python.CROSSREF_FUNCTIONALIZE", True + ): + yield + finally: + for op in all_py_loaded_overloads(): + op._uncache_dispatch(torch._C.DispatchKey.Functionalize) diff --git a/venv/lib/python3.10/site-packages/torch/nested/_internal/__init__.py b/venv/lib/python3.10/site-packages/torch/nested/_internal/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/torch/nested/_internal/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/nested/_internal/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a4750067448b46aafd1a389cbab5283583bbd6a4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/nested/_internal/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/nested/_internal/__pycache__/nested_tensor.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/nested/_internal/__pycache__/nested_tensor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f47fb072809a3751c3544a7fa6b5e6adf703a38 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/nested/_internal/__pycache__/nested_tensor.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/nested/_internal/__pycache__/ops.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/nested/_internal/__pycache__/ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc47adedc6304147adf5d0db47ff400b8615b1d9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/nested/_internal/__pycache__/ops.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/nested/_internal/__pycache__/sdpa.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/nested/_internal/__pycache__/sdpa.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7a9c1754e44221308772a97ed1499a85d5081f6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/nested/_internal/__pycache__/sdpa.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/profiler/__init__.py b/venv/lib/python3.10/site-packages/torch/profiler/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e3c4145fd91f619a1f7ff9c4dca6fae5c1c16abe --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/profiler/__init__.py @@ -0,0 +1,48 @@ +r""" +PyTorch Profiler is a tool that allows the collection of performance metrics during training and inference. +Profiler's context manager API can be used to better understand what model operators are the most expensive, +examine their input shapes and stack traces, study device kernel activity and visualize the execution trace. + +.. note:: + An earlier version of the API in :mod:`torch.autograd` module is considered legacy and will be deprecated. + +""" +import os + +from torch._C._autograd import _supported_activities, DeviceType, kineto_available +from torch._C._profiler import _ExperimentalConfig, ProfilerActivity, RecordScope +from torch.autograd.profiler import KinetoStepTracker, record_function +from torch.optim.optimizer import register_optimizer_step_post_hook + +from .profiler import ( + _KinetoProfile, + ExecutionTraceObserver, + profile, + ProfilerAction, + schedule, + supported_activities, + tensorboard_trace_handler, +) + +__all__ = [ + "profile", + "schedule", + "supported_activities", + "tensorboard_trace_handler", + "ProfilerAction", + "ProfilerActivity", + "kineto_available", + "DeviceType", + "record_function", + "ExecutionTraceObserver", +] + +from . import itt + + +def _optimizer_post_hook(optimizer, args, kwargs): + KinetoStepTracker.increment_step("Optimizer") + + +if os.environ.get("KINETO_USE_DAEMON", None): + _ = register_optimizer_step_post_hook(_optimizer_post_hook) diff --git a/venv/lib/python3.10/site-packages/torch/profiler/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/profiler/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4b4ac5939ae07c61925e89002b19a27401521ce6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/profiler/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/profiler/__pycache__/_pattern_matcher.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/profiler/__pycache__/_pattern_matcher.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..009a80047064067230c26a476364ba2a72a22420 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/profiler/__pycache__/_pattern_matcher.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/profiler/__pycache__/profiler.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/profiler/__pycache__/profiler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..317ff27c734bcf5707f452841dc8d987bae1b633 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/profiler/__pycache__/profiler.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/profiler/_memory_profiler.py b/venv/lib/python3.10/site-packages/torch/profiler/_memory_profiler.py new file mode 100644 index 0000000000000000000000000000000000000000..f091dd47d031126efcf62060df0215a685da673e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/profiler/_memory_profiler.py @@ -0,0 +1,1202 @@ +import collections +import dataclasses +import enum +import itertools as it +import logging +from typing import ( + Any, + cast, + DefaultDict, + Dict, + Iterator, + List, + Optional, + Set, + Tuple, + Union, +) + +from typing_extensions import Literal + +import torch +from torch._C import FunctionSchema +from torch._C._autograd import _ProfilerResult +from torch._C._profiler import ( + _EventType, + _ExtraFields_Allocation, + _ExtraFields_TorchOp, + _ProfilerEvent, + _TensorMetadata, + RecordScope, +) +from torch._utils import _element_size +from torch.profiler import _utils + +KeyAndID = Tuple["Key", int] +TensorAndID = Tuple["TensorKey", int] + +log = logging.getLogger(__name__) + + +class Category(enum.Enum): + INPUT = enum.auto() + TEMPORARY = enum.auto() + ACTIVATION = enum.auto() + GRADIENT = enum.auto() + AUTOGRAD_DETAIL = enum.auto() + PARAMETER = enum.auto() + OPTIMIZER_STATE = enum.auto() + + +_CATEGORY_TO_COLORS = { + Category.PARAMETER: "darkgreen", + Category.OPTIMIZER_STATE: "goldenrod", + Category.INPUT: "black", + Category.TEMPORARY: "mediumpurple", + Category.ACTIVATION: "red", + Category.GRADIENT: "mediumblue", + Category.AUTOGRAD_DETAIL: "royalblue", + None: "grey", +} + +_CATEGORY_TO_INDEX = {c: i for i, c in enumerate(_CATEGORY_TO_COLORS)} + + +class Action(enum.Enum): + PREEXISTING = enum.auto() + CREATE = enum.auto() + INCREMENT_VERSION = enum.auto() + DESTROY = enum.auto() + + +_ACTION_TO_INDEX = {i: i.value for i in Action} + + +@dataclasses.dataclass(eq=True, unsafe_hash=False, frozen=True) +class Key: + device: torch.device + + +@dataclasses.dataclass +class _Storage: + """Bundle storage pointer and id. + + All profiling logic should use `allocation_id`, however it is useful to + print storage pointers for debugging and unit tests sometimes look up + values using the storage data pointer of a live Tensor.""" + + ptr: int + allocation_id: int + + def __repr__(self) -> str: + return f"{hex(self.ptr):>18} ({self.allocation_id})" + + def __eq__(self, other: object) -> bool: + return isinstance(other, _Storage) and self.allocation_id == other.allocation_id + + def __hash__(self) -> int: + return hash(self.allocation_id) + + +@dataclasses.dataclass(eq=True, unsafe_hash=True, frozen=True) +class TensorKey(Key): + """Hashable identifier for a storage which has been asigned an ID. + + A detailed description of Tensor IDs and why they are needed is given in + `torch/csrc/profiler/collection.h` when `TensorID` is declared. To + summarize, multiple Storage buffers can map to the same logical Tensor. + This dataclass is used to refer to a concrete in-memory StorageImpl of + a Tensor. + """ + + id: int + storage: _Storage + + def __repr__(self) -> str: + return f"id={self.id}: {repr(self.storage):<24} ({self.device})" + + def __lt__(self, other: "TensorKey") -> bool: + return self._as_sortable < other._as_sortable + + @staticmethod + def _make( + tensor_id: Optional[int], + storage_ptr: Optional[int], + allocation_id: Optional[int], + device: torch.device, + ) -> Optional["TensorKey"]: + if ( + tensor_id is not None + and storage_ptr is not None + and allocation_id is not None + ): + return TensorKey(device, tensor_id, _Storage(storage_ptr, allocation_id)) + return None + + @classmethod + def from_allocation(cls, alloc: _ExtraFields_Allocation) -> Optional["TensorKey"]: + return cls._make(alloc.id, alloc.ptr, alloc.allocation_id, alloc.device) + + @classmethod + def from_tensor(cls, t: Optional[_TensorMetadata]) -> Optional["TensorKey"]: + if t is not None: + return cls._make(t.id, t.storage_data_ptr, t.allocation_id, t.device) + return None + + @property + def _as_sortable(self) -> Tuple[int, int, str, int]: + return self.id, self.storage.allocation_id, self.device.type, self.device.index + + +def _extract_parameters_and_gradients( + node: _ProfilerEvent, +) -> Iterator[Tuple[Optional[TensorKey], Optional[TensorKey]]]: + children = node.children + + # AccumulateGrad is used in the Autograd engine to handle gradient updates. + # There are two possible cases: + # 1) This is a newly created gradient Tensor. In that case there is nothing + # to accumulate, so autograd simply detaches the Tensor. + # + # 2) There is a preexisting gradient Tensor and we need to add the newly + # computed update. This is done with an in-place add (aten::add_) op. + # (The underscore suffix denotes "in-place".) + if ( + node.typed[0] == _EventType.TorchOp + and node.typed[1].scope == RecordScope.BACKWARD_FUNCTION + # TODO(robieta): Move away from load bearing names + and node.name == "torch::autograd::AccumulateGrad" + and children + and children[0].typed[0] == _EventType.TorchOp + and children[0].name in ("aten::detach", "aten::add_") + and children[0].typed[1].inputs + and isinstance(children[0].typed[1].inputs[0], _TensorMetadata) + ): + yield None, TensorKey.from_tensor(children[0].typed[1].inputs[0]) + + # We directly instrument `torch.nn.Module` and `torch.optim.Optimizer` + # NOTE: The values captured by the python tracer are cached; they can be + # used to build up labels but do not imply that a Tensor was live at + # a particular time. + elif node.typed[0] == _EventType.PyCall: + typed_fields = node.typed[1] + assert typed_fields.module is None or typed_fields.optimizer is None + if typed_fields.module is not None: + for _, p, p_grad in typed_fields.module.parameters: + yield TensorKey.from_tensor(p), TensorKey.from_tensor(p_grad) + + if typed_fields.optimizer is not None: + for p, p_grad, _ in typed_fields.optimizer.parameters: + yield TensorKey.from_tensor(p), TensorKey.from_tensor(p_grad) + + +def extract_parameters(node: _ProfilerEvent) -> Iterator[TensorKey]: + for p, p_grad in _extract_parameters_and_gradients(node): + if p is not None: + yield p + + +def extract_gradients( + node: _ProfilerEvent, +) -> Iterator[Tuple[Optional[TensorKey], TensorKey]]: + for p, p_grad in _extract_parameters_and_gradients(node): + if p_grad is not None: + yield p, p_grad + + +def get_scopes(event: Optional[_ProfilerEvent]) -> Tuple[RecordScope, ...]: + scopes = [] + while event: + if event.typed[0] == _EventType.TorchOp: + scopes.append(event.typed[1].scope) + event = event.parent + return tuple(scopes) + + +class SchemaMatcher: + """Lookup operator schema based on profiled name. + + When profiling we record the operator's name but not the schema. However + some analysis requires that information. Fortunately we can look up + registered schema from the recorded name. We do not, however, record the + overload and so we must compare the profiled arguments with all overloads + to determine viable matches. + + Note: Once https://github.com/pytorch/pytorch/issues/78871 is completed + this code will be obsolete. + """ + + @classmethod + def inputs_are_mutable(cls, t: _ExtraFields_TorchOp) -> Tuple[Optional[bool], ...]: + """Determine which inputs may have mutated based on function schema. + + Note that we don't need to resolve down to a single schema to perform + this analysis. An input is mutable if it is mutable in any overload. In + practice, however, it is overwhelmingly common to match a single + overload. If we cannot find any valid schema then we must be + conservative and assume all inputs are mutable. + """ + mutable: Optional[List[bool]] = None + for schema in cls.match_schemas(t): + mutable = mutable or [False for _ in schema.arguments] + for i, arg in enumerate(schema.arguments): + mutable[i] |= getattr(arg.alias_info, "is_write", False) + + return tuple(mutable or (None for _ in t.inputs)) + + @classmethod + def match_schemas(cls, t: _ExtraFields_TorchOp) -> Tuple[FunctionSchema, ...]: + signature = tuple( + # Tensor + TensorKey.from_tensor(i) if isinstance(i, _TensorMetadata) + # + # TensorList + else [TensorKey.from_tensor(j) for j in i] if isinstance(i, list) + # + # Scalar and uncaptured inputs. + else i + for i in t.inputs + ) + + def matches(schema) -> bool: + return len(schema.arguments) == len(signature) and all( + cls._types_match(observed, schema_arg.type) + for observed, schema_arg in zip(signature, schema.arguments) + ) + + return tuple(s for s in cls.lookup_schemas(t.name) or () if matches(s)) + + @classmethod + def _types_match(cls, observed, schema_type) -> bool: + if isinstance(schema_type, torch._C.OptionalType): + schema_type = schema_type.getElementType() + return observed is None or cls._types_match(observed, schema_type) + + if isinstance(schema_type, torch._C.AnyType): + return True + + if schema_type.isSubtypeOf(torch._C.ListType.ofTensors()): + return isinstance(observed, list) and all( + isinstance(i, TensorKey) for i in observed + ) + + type_map: Tuple[Tuple[Any, Union[type, Tuple[type, ...]]], ...] = ( + (torch._C.TensorType, TensorKey), + (torch._C.NoneType, type(None)), + (torch._C.BoolType, bool), + (torch._C.IntType, int), + (torch._C.FloatType, float), + (torch._C.ComplexType, complex), + (torch._C.NumberType, (bool, int, float, complex)), + ) + + for jit_type, py_types in type_map: + if isinstance(schema_type, jit_type): + return isinstance(observed, py_types) + + # Profiler only records a subset of possible argument types. If we + # reach this point then the schema must call for a type that profiler + # does not record. Thus, the schema can only be a match if `observed` + # is also None. + return observed is None + + @staticmethod + def lookup_schemas(name: str) -> Optional[Tuple[FunctionSchema, ...]]: + # TODO(robieta): + # _jit_get_schemas_for_operator is quite expensive. (~100us / call) + # Consider adding `functools.lru_cache` if that becomes an issue. + + try: + # Schema lookup will throw if `name` is malformed. (For example, + # schemas must be namespaced and schema lookup will fail if name + # does not include "::".) We simply catch the exception and return + # `None` to denote that `name` cannot be an operator name. + # + # Note that record_function annotations also go through this path, + # so it is expected that some names will not correspond to PyTorch + # operators. + if "::" not in name: + return None + return tuple(torch._C._jit_get_schemas_for_operator(name)) + except RuntimeError: + return None + + +class OpTree: + def __init__(self, result: _ProfilerResult) -> None: + self._root_nodes = result.experimental_event_tree() + self._sorted_nodes = tuple(sorted(self.dfs(), key=lambda x: x.start_time_ns)) + + def dfs(self, *args, **kwargs) -> Iterator[_ProfilerEvent]: + yield from _utils.traverse_dfs(self._root_nodes, *args, **kwargs) + + @property + def sorted_nodes(self) -> Tuple[_ProfilerEvent, ...]: + return self._sorted_nodes + + +class SizeMap: + def __init__(self, op_tree: OpTree) -> None: + self._values: Dict[TensorKey, int] = {} + + for node in op_tree.sorted_nodes: + if node.typed[0] == _EventType.TorchOp: + for t in self._flat_tensor_inputs(node.typed[1]): + self._update_values(t) + + elif node.typed[0] == _EventType.PyCall: + typed_fields = node.typed[1] + assert typed_fields.module is None or typed_fields.optimizer is None + if typed_fields.module is not None: + for _, p, p_grad in typed_fields.module.parameters: + self._update_values(p) + self._update_values(p_grad) + + if typed_fields.optimizer is not None: + for p, p_grad, state in typed_fields.optimizer.parameters: + self._update_values(p) + self._update_values(p_grad) + for _, t in state: + self._update_values(t) + + allocations: Dict[TensorKey, int] = {} + for node in op_tree.sorted_nodes: + if node.typed[0] == _EventType.Allocation: + alloc_fields = node.typed[1] + key = TensorKey.from_allocation(alloc_fields) + if key: + new_size = abs(alloc_fields.alloc_size) + prior_size = allocations.setdefault(key, new_size) + + # It is possible to resize Storage in PyTorch, however we + # key on data pointer so most resizes will be treated as a + # change in storage. The one corner case that cannot be + # handled is `realloc` which successfully resizes the + # storage. At time of writing this is not done anywhere in + # the core PyTorch codebase. + if prior_size != new_size: + delta = f"{prior_size} vs. {new_size}" + log.warning("Mismatch between allocation and free: %s", delta) + + self._values.update(allocations) + + def _update_values(self, t: Optional[_TensorMetadata]) -> None: + key = TensorKey.from_tensor(t) + if key is not None and t is not None and t.layout == torch.strided: + # Scalars are represented as zero dim Tensors + n = max(i[0] * i[1] for i in zip(t.sizes or [1], t.strides or [1])) + + num_bytes = n * _element_size(t.dtype) + assert num_bytes >= 0, f"{num_bytes}" + self._values[key] = max(self._values.get(key, 0), num_bytes) + + @staticmethod + def _flat_tensor_inputs(op: _ExtraFields_TorchOp) -> Iterator[_TensorMetadata]: + for i in op.inputs: + if isinstance(i, _TensorMetadata): + yield i + elif isinstance(i, list): + yield from i + + def __getitem__(self, key: TensorKey): + return self._values[key] + + +@dataclasses.dataclass() +class DataFlowEdge: + input_version: Optional[int] = None + mutated: Optional[bool] = False + + @property + def is_allocation(self) -> bool: + return self.input_version is None + + @property + def is_deletion(self) -> bool: + return self.mutated is None + + +class DataFlowNode: + def __init__(self, event: _ProfilerEvent, graph: "DataFlowGraph") -> None: + self._event = event + self._graph = graph + self._edges: Dict[TensorKey, DataFlowEdge] = self._determine_edges() + + for key, edge in self._edges.items(): + if edge.mutated and not edge.is_allocation: + self._graph.bump(key) + + # Make sure the version bumping behavior matches what we expect. + versions = {k: (v, self._graph.lookup(k)) for k, v in self.outputs.items()} + assert all(i == j for i, j in versions.values()), f"{versions}, {self._edges}" + + def _determine_edges(self) -> Dict[TensorKey, DataFlowEdge]: + subtree = tuple(_utils.traverse_dfs([self._event])) + + # Start by populating edges from op inputs and outputs. + mutable_by_key: Dict[Optional[TensorKey], Set[Optional[bool]]] = {} + for op in (i.typed[1] for i in subtree if i.typed[0] == _EventType.TorchOp): + for op_input, mutable in zip( + op.inputs, SchemaMatcher.inputs_are_mutable(op) + ): + # Tensor + if isinstance(op_input, _TensorMetadata): + key = TensorKey.from_tensor(op_input) + mutable_by_key.setdefault(key, set()).add(mutable) + + # TensorList + elif isinstance(op_input, list): + for op_input_i in op_input: + key = TensorKey.from_tensor(op_input_i) + mutable_by_key.setdefault(key, set()).add(mutable) + + edges: DefaultDict[Optional[TensorKey], DataFlowEdge] + edges = collections.defaultdict(DataFlowEdge) + for key, mutable_set in mutable_by_key.items(): + if key is not None: + edges[key].input_version = self._graph.lookup(key) if key else -1 + + # We consider an op to be mutated if we encounter a schema where it + # is a mutable argument OR if it is ambiguous. (We never explicitly + # see it in any schema.) + mutated = (True in mutable_set) or (tuple(mutable_set) == (None,)) + edges[key].mutated = mutated + + # Then handle deletions. Note that deleting a Tensor implicitly adds + # it as an input edge. + for i in subtree: + if i.typed[0] == _EventType.Allocation and i.typed[1].alloc_size < 0: + key = TensorKey.from_allocation(i.typed[1]) + edge = edges[key] + assert key is None or edge.mutated is not None, f"Double delete: {key}" + edge.mutated = None + edge.input_version = self._graph.lookup(key) if key else -1 + + # And finally handle allocations. This step must be last, because the + # previous two steps optimistically add input edges. + for i in subtree: + if i.typed[0] == _EventType.Allocation and i.typed[1].alloc_size > 0: + edges[TensorKey.from_allocation(i.typed[1])].input_version = None + + # We don't need to sort the inputs, but it makes debugging and unit tests nicer. + return dict(sorted((k, v) for k, v in edges.items() if k is not None)) + + @property + def inputs(self) -> Dict[TensorKey, Tuple[bool, int]]: + return { + # MyPy can't see through `is_allocation` to know that + # `v.input_version` is not None. + k: (bool(v.mutated), cast(int, v.input_version)) + for k, v in self._edges.items() + if not v.is_allocation + } + + @property + def outputs(self) -> Dict[TensorKey, int]: + return { + k: 0 if v.input_version is None else v.input_version + 1 + for k, v in self._edges.items() + if (v.is_allocation and not v.is_deletion) or v.mutated + } + + @property + def intermediates(self) -> Tuple[TensorKey, ...]: + return tuple( + k for k, v in self._edges.items() if v.is_allocation and v.is_deletion + ) + + @property + def start_time(self) -> int: + return self._event.start_time_ns + + +class DataFlowGraph: + def __init__(self, op_tree: OpTree) -> None: + self._op_tree = op_tree + self._leaf_events = self._extract_leaf_events(op_tree) + self._active_version: Dict[TensorKey, Optional[int]] = {} + self._flow_nodes = [DataFlowNode(e, self) for e in self.leaf_events] + self._flow_nodes.sort(key=lambda x: x.start_time) + self.validate() + + @property + def flow_nodes(self) -> Tuple[DataFlowNode, ...]: + return tuple(self._flow_nodes) + + def validate(self): + # Check that each (Tensor, version) pair has a unique creation node + outputs: Set[Tuple[TensorKey, int]] = set() + for node in self.flow_nodes: + node_outputs = set(node.outputs.items()) + duplicates = outputs & node_outputs + assert not duplicates, f"{node._event.name} {node._edges} {duplicates}" + outputs |= node_outputs + + # And check that `self._nodes` forms a valid topologically sorted DAG. + tensor_versions: Dict[TensorKey, int] = {} + for node in self.flow_nodes: + for key, (_, version) in node.inputs.items(): + expected = tensor_versions.get(key, 0) + assert expected == version, (expected, version) + + for key, version in node.outputs.items(): + prior_version = tensor_versions.get(key, version) + assert version >= prior_version, (version, prior_version) + tensor_versions[key] = version + + @property + def leaf_events(self) -> Tuple[_ProfilerEvent, ...]: + return self._leaf_events + + @staticmethod + def _extract_leaf_events(op_tree: OpTree) -> Tuple[_ProfilerEvent, ...]: + """Partially traverse the op tree and extract top level ops. + + Consider the following code: + ``` + with record_function("My annotation"): + x.zero_() + y.zero_() + ``` + + The op tree (assuming no Autograd) will look like: + + TorchOp: "My annotation" + TorchOp: zero_ + TorchOp: fill_ + TorchOp: zero_ + TorchOp: fill_ + + The recursive structure of operator calls makes data flow unwieldy. + In order to simplify analysis we would like to select the highest level + ops to represent in the graph. In this case those are the `zero_` ops; + the fact that `fill_` is called is an implementation detail. We also + do not want to group everything under "My annotation" as this could + create overly coarse bundles and lose critical semantics. + + To address this issue we walk over the graph and select the topmost + torch ops ** which match at least one operator schema **. These form + the leaves of the first pass through the op tree. (As well as any + allocations or frees which do are not part of a kernel.) These events + form the logical nodes in our data flow graph. + """ + + leaf_events: List[_ProfilerEvent] = [] + + def leaf_op(e: _ProfilerEvent) -> bool: + return e.typed[0] == _EventType.TorchOp and ( + e.typed[1].scope == RecordScope.BACKWARD_FUNCTION + or bool(SchemaMatcher.match_schemas(e.typed[1])) + ) + + def children_fn(e: _ProfilerEvent): + if leaf_op(e) or e.tag == _EventType.Allocation: + leaf_events.append(e) + return [] + + return e.children + + for _ in op_tree.dfs(children_fn=children_fn): + pass + + return tuple(sorted(leaf_events, key=lambda x: x.start_time_ns)) + + def lookup(self, key: TensorKey) -> int: + version = self._active_version.setdefault(key, 0) + assert version is not None + return version + + def bump(self, key: TensorKey) -> None: + prior_version = self._active_version.get(key, None) + assert prior_version is not None + self._active_version[key] = prior_version + 1 + + def delete(self, key: TensorKey) -> None: + assert self._active_version.setdefault(key, 0) is not None + self._active_version[key] = None + + +@dataclasses.dataclass +class CategoryElement: + by_id: Optional[Category] = None + by_key: Dict[TensorKey, Category] = dataclasses.field(default_factory=dict) + by_version: Dict[TensorAndID, Category] = dataclasses.field(default_factory=dict) + + # Used by unit tests to check internals. (And consequently by + # MemoryProfile.lookup) This should not be used in any other capacity. + _by_id_keyset: Set[TensorKey] = dataclasses.field(default_factory=set) + + +@dataclasses.dataclass +class CategoryDict: + _values: DefaultDict[int, CategoryElement] = dataclasses.field( + default_factory=lambda: collections.defaultdict(CategoryElement) + ) + + def set_by_id(self, key: TensorKey, category: Category) -> None: + self._values[key.id].by_id = category + self._values[key.id]._by_id_keyset.add(key) + + def set_by_key(self, key: TensorKey, category: Category) -> None: + self._values[key.id].by_key[key] = category + + def set_by_version(self, key: TensorKey, version: int, category: Category) -> None: + self._values[key.id].by_version[(key, version)] = category + + def setdefault_by_version( + self, key: TensorKey, version: int, category: Category + ) -> None: + self._values[key.id].by_version.setdefault((key, version), category) + + def get(self, key: Key, version: int) -> Optional[Category]: + if isinstance(key, Key) and not isinstance(key, TensorKey): + return None + element = self._values[key.id] + return ( + element.by_id + or element.by_key.get(key, None) + or element.by_version.get((key, version), None) + ) + + +class MemoryProfile: + def __init__(self, result: _ProfilerResult) -> None: + self._op_tree = OpTree(result) + self._data_flow_graph = DataFlowGraph(self._op_tree) + self._size_map = SizeMap(self._op_tree) + self._categories = CategoryDict() + + self._set_gradients_and_temporaries() + self._set_parameters_using_python_tracer() + self._set_inputs() + self._set_parameters_using_data_flow() + self._set_activations() + self._set_optimizer_state() + self._set_autograd_detail() + + @property + def timeline(self) -> Tuple[Tuple[int, Action, KeyAndID, int], ...]: + output: List[Tuple[int, Action, KeyAndID, int]] = [] + allocation_times: Dict[Tuple[TensorKey, bool], int] = {} + live_unknown: Dict[Tuple[int, torch.device], Literal[True]] = {} + for event in self._op_tree.dfs(): + if event.typed[0] == _EventType.Allocation: + alloc_fields = event.typed[1] + alloc_size = alloc_fields.alloc_size + is_allocation = alloc_size > 0 + t = event.start_time_ns + + tkey = TensorKey.from_allocation(alloc_fields) + if tkey is not None: + allocation_times[(tkey, is_allocation)] = t + + else: + key = Key(alloc_fields.device) + ptr_and_device = (alloc_fields.ptr, key.device) + if is_allocation: + if ptr_and_device in live_unknown: + output.append( + (t, Action.INCREMENT_VERSION, (key, 0), alloc_size) + ) + else: + live_unknown[ptr_and_device] = True + output.append((t, Action.CREATE, (key, 0), alloc_size)) + else: + output.append((t, Action.DESTROY, (key, 0), -alloc_size)) + if not live_unknown.pop(ptr_and_device, False): + output.append( + (-1, Action.PREEXISTING, (key, 0), -alloc_size) + ) + + snapshot = self._category_snapshot() + last_version = dict(sorted(snapshot.keys())) + + events: List[Tuple[int, Action, TensorAndID]] = [ + (-1, Action.PREEXISTING, (key, version)) + for key, version in snapshot.keys() + if (key, True) not in allocation_times and version == 0 + ] + + for node in self._data_flow_graph.flow_nodes: + for key, edge in node._edges.items(): + if edge.is_allocation: + t = allocation_times[(key, True)] + events.append((t, Action.CREATE, (key, 0))) + + elif edge.mutated: + t = node._event.start_time_ns + version = edge.input_version + assert version is not None + events.append((t, Action.INCREMENT_VERSION, (key, version))) + + if edge.is_deletion: + t = allocation_times[(key, False)] + events.append((t, Action.DESTROY, (key, last_version[key]))) + + output.extend( + (time, action, (key, version), self._size_map[key]) + for time, action, (key, version) in events + ) + + output.sort(key=lambda x: (x[0], x[1].value)) + return tuple(output) + + def _is_gradient(self, *args, **kwargs) -> bool: + return self._categories.get(*args, **kwargs) == Category.GRADIENT + + def _category_snapshot(self) -> Dict[TensorAndID, Optional[Category]]: + all_tensor_versions: Set[TensorAndID] = set() + + for node in self._data_flow_graph.flow_nodes: + all_tensor_versions.update(((k, v) for k, (_, v) in node.inputs.items())) + all_tensor_versions.update((key, 0) for key in node.intermediates) + all_tensor_versions.update(node.outputs.items()) + + for i in self._categories._values.values(): + all_tensor_versions.update((key, 0) for key in i._by_id_keyset) + + return { + (key, version): self._categories.get(key, version) + for key, version in sorted(all_tensor_versions) + } + + def _any_version_depends_on_gradient(self) -> Set[int]: + """Extract IDs of Tensors which depend or will depend on a gradient. + + Note that this weakened definition of "depends" requires us to loop + over the data flow graph multiple times because it allows dependency + information to flow backward through edges and removes the guarantee + that nodes are topologically sorted. (Or indeed, even that a valid + topological order exists.) Put another way, we have converted an + acyclic data flow graph into a cyclic graph and we are attempting to + partition cycles involving a gradient from the rest of the graph. + """ + depends_on_gradient: Set[int] = set() + while True: + start_size = len(depends_on_gradient) + for node in self._data_flow_graph.flow_nodes: + ids = tuple( + key.id + for key, (_, version) in node.inputs.items() + if self._categories.get(key, version) + in (Category.GRADIENT, Category.PARAMETER) + or key.id in depends_on_gradient + ) + + if ids: + depends_on_gradient.update(ids) + depends_on_gradient.update(key.id for key in node.outputs) + + # We are guaranteed to exit because there is a finite set of + # TensorAndID pairs. In practice we do not expect to loop more than + # three times: once to identify the core parameter update loop, + # once to fold the first step into that loop, and a third time + # where no new elements are added. + if len(depends_on_gradient) == start_size: + return depends_on_gradient + + def _set_gradients_and_temporaries(self) -> None: + """Mark Tensors which are unambiguous and simple to reason about.""" + + # Gradients are straightforward to detect. We directly check the + # `.grad` property in the Python tracer, and we can detect any new + # gradient Tensors from `AccumulateGrad` ops. + for event in self._op_tree.dfs(): + for _, p_grad in extract_gradients(event): + self._categories.set_by_id(p_grad, Category.GRADIENT) + + # Similarly, temporary Tensors are easy to identify and are useful to + # flag since they can make memory use "spikier" than one would + # otherwise expect. + for node in self._data_flow_graph.flow_nodes: + for i in node.intermediates: + self._categories.set_by_key(i, Category.TEMPORARY) + + def _set_parameters_using_python_tracer(self) -> None: + for event in self._op_tree.dfs(): + for p in extract_parameters(event): + if p is not None: + self._categories.set_by_id(p, Category.PARAMETER) + + def _set_inputs(self) -> None: + """Mark inputs based on which Tensors are updated using gradients. + + The process for differentiating between inputs and activations is more + involved. Most Tensors in a training loop depend on at least one + gradient: parameters depend on them through updates, and activations + and optimizer state depend on them transitively through parameters. + Critically, we do not need to know which Tensors are parameters to + apply this method; we can simply walk the data flow graph to build the + set of all values which depend on a gradient and then obtain the set + of inputs from the conjugate set. + + There is, however, one hiccup. The first time we see a parameter is + generally on the forward pass of the first step. We know from + inspection of the data flow graph that v1 of that Tensor depends on + a gradient (provided we profile an optimizer step), but not v0. To + address this problem we weaken the definition of "depends on a + gradient" to "any version of this Tensor depends on a gradient", + which in turn strengthens the criteria for the input set enough to + filter the activations in the forward pass of the first step.""" + + # All of this analysis is predicated on using at least one training + # step (or parameters from the python tracer) to partition the graph. + # Absent that we cannot determine which Tensors are inputs and which + # ones are part of the model. + depends_on_gradient = self._any_version_depends_on_gradient() + + # We only want to annotate Tensors which actually contribute to the + # model calculation. + produces_gradient: Set[TensorAndID] = set() + for node in reversed(self._data_flow_graph.flow_nodes): + tensors = {(key, version) for key, (_, version) in node.inputs.items()} + tensors |= node.outputs.items() + if any( + self._categories.get(*i) in (Category.GRADIENT, Category.PARAMETER) + or i in produces_gradient + for i in tensors + ): + produces_gradient |= tensors + + # Don't include Tensors created in the backward pass, as these are + # generally Autograd implementation details rather than proper inputs. + input_candidates = produces_gradient.copy() + for node in self._data_flow_graph.flow_nodes: + if RecordScope.BACKWARD_FUNCTION in get_scopes(node._event): + input_candidates -= set(node.outputs.items()) + + for key, version in input_candidates: + if key.id not in depends_on_gradient: + self._categories.setdefault_by_version(key, version, Category.INPUT) + + def _set_parameters_using_data_flow(self) -> None: + """Deduce which Tensors are parameters. + + Consider the following code for the step of SGD with momentum + (nesterov=False), where `d_p` is the gradient of `param` and `buf` is + the momentum buffer. + ``` + buf.mul_(momentum).add_(d_p, alpha=1 - dampening) + d_p = buf + param.add_(d_p, alpha=-lr) + ``` + Both `param` and `buf` take a gradient and perform an in-place update. + + The python tracer will inspect calls to `nn.Module.forward` and + `optim.Optimizer.step` to extract parameter and optimizer state + respectively (including parameters), so this is generally a non-issue. + + However as a fallback we can also exploit several properties of + parameters to distinguish them from other model state. + + First, they are directly used in the forward pass. (At this point we + haven't established which parts of the graph correspond to the forward + pass but we can deduce enough to suffice.) Some mutable state such as + batch norm moving averages also contribute to the forward pass, but + optimizer state does not. + + Second, a parameter is by definition used to compute at least one + gradient and depends on at least one gradient. + """ + snapshot = self._category_snapshot() + + # Determine which Tensors might be parameters based on forward pass + # data flow. Note this these are only candidates; we filter nodes that + # we know are part of the backward pass but that doesn't guarantee that + # they are part of the forward pass. + candidate_parameters: Set[TensorAndID] = set() + candidate_fwd_tensors: Set[TensorAndID] = { + i for i, category in snapshot.items() if category == Category.INPUT + } + + for node in self._data_flow_graph.flow_nodes: + inputs = {(key, value) for key, (_, value) in node.inputs.items()} + if ( + # Don't check nodes in the backward pass. + RecordScope.BACKWARD_FUNCTION not in get_scopes(node._event) + and not any(self._is_gradient(*i) for i in inputs) + and not any(self._is_gradient(*i) for i in node.outputs.items()) + # + # and only check nodes which depend on an input. + and candidate_fwd_tensors.intersection(inputs) + ): + candidate_fwd_tensors |= node.outputs.items() + candidate_parameters |= inputs.difference(candidate_fwd_tensors) + + # Require that each parameter eventually contributes to the value of a gradient + used_for_gradient: Set[TensorAndID] = set() + for node in reversed(self._data_flow_graph.flow_nodes): + if any( + self._is_gradient(*i) or i in used_for_gradient + for i in node.outputs.items() + ): + for key, (_, version) in node.inputs.items(): + used_for_gradient.add((key, version)) + candidate_parameters.intersection_update(used_for_gradient) + + # and depends on a gradient. + parameter_keys = {key.id for key, _ in candidate_parameters} + parameter_keys &= self._any_version_depends_on_gradient() + + for key, _ in snapshot.keys(): + if key.id in parameter_keys: + self._categories.set_by_id(key, Category.PARAMETER) + + def _set_activations(self) -> None: + """Flood the graph to identify activations.""" + + required = {Category.INPUT, Category.ACTIVATION} + also_allowed = {Category.PARAMETER, Category.TEMPORARY} + for node in self._data_flow_graph.flow_nodes: + inputs = {(key, value) for key, (_, value) in node.inputs.items()} + input_categories = {self._categories.get(*i) for i in inputs} + + if ( + (input_categories & required) + and not (input_categories - (required | also_allowed)) + # + # Stop filling when we reach the backward pass. + and RecordScope.BACKWARD_FUNCTION not in get_scopes(node._event) + ): + for i in node.outputs.items(): + self._categories.setdefault_by_version(*i, Category.ACTIVATION) + + def _set_optimizer_state(self) -> None: + for event in self._op_tree.dfs(): + if event.typed[0] == _EventType.PyCall and event.typed[1].optimizer: + parameters = event.typed[1].optimizer.parameters + for _, t in it.chain(*[state for _, _, state in parameters]): + key = TensorKey.from_tensor(t) + if key is not None: + self._categories.set_by_id(key, Category.OPTIMIZER_STATE) + + def _set_autograd_detail(self): + prior = {None, Category.AUTOGRAD_DETAIL} + for node in self._data_flow_graph.flow_nodes: + if RecordScope.BACKWARD_FUNCTION in get_scopes(node._event): + for key, version in node.outputs.items(): + if version == 0 or self._categories.get(key, version - 1) in prior: + self._categories.setdefault_by_version( + key, version, Category.AUTOGRAD_DETAIL + ) + + +class MemoryProfileTimeline: + def __init__(self, memory_profile): + """The minimum representation of the memory profile timeline + includes the memory timeline and categories. The timeline + consists of [timestamp, action, (TensorKey, version), numbytes] + elements, to denote any actions (pre-existing, create, destroy, + or increment_version) that occurred to a specific Tensor for a + chunk of memory. The categories help map each (TensorKey, + version) pair into a category.""" + self.timeline = memory_profile.timeline + self.categories = memory_profile._categories + + def _coalesce_timeline(self, device_str): + """Convert the memory timeline and categories into a memory plot + consisting of timestamps and their respective sizes by category + for a given device. + + Input: device + Output: [timestamps, sizes by category] + """ + device = torch.device(device_str) + times: List[int] = [] + sizes: List[List[int]] = [] + + def update(key, version, delta): + category = ( + self.categories.get(key, version) + if isinstance(key, TensorKey) + else None + ) + index = _CATEGORY_TO_INDEX[category] + 1 + sizes[-1][index] += int(delta) + + t_min = -1 + for t, action, (key, version), numbytes in self.timeline: + if key.device != device: + continue + + # Convert timestamps from ns to us, to match trace events. + if t != -1: + t = int(t / 1000) + + # Save the smallest timestamp to populate pre-existing allocs. + if t_min == -1 or (t < t_min and t > 0): + t_min = t + + # Handle timestep + if len(times) == 0: + times.append(t) + sizes.append([0] + [0 for _ in _CATEGORY_TO_INDEX]) + + elif t != times[-1]: + times.append(t) + sizes.append(sizes[-1].copy()) + + # Handle memory and categories + if action in (Action.PREEXISTING, Action.CREATE): + update(key, version, numbytes) + + elif action == Action.INCREMENT_VERSION: + update(key, version, -numbytes) + update(key, version + 1, numbytes) + + elif action == Action.DESTROY: + update(key, version, -numbytes) + + else: + raise ValueError(f"Unknown action: {action}") + + times = [t_min if t < 0 else t for t in times] + return times, sizes + + def export_memory_timeline(self, path, device_str) -> None: + """Saves the memory timeline as [times, sizes by category] + as a JSON formatted file to the given path for the given + device.""" + times, sizes = self._coalesce_timeline(device_str) + # TODO: Write a faster serialize (orjson not available in CI) + import json + + with open(path, "w") as f: + json.dump([times, sizes], f) + + def export_memory_timeline_raw(self, path, device_str) -> None: + """Saves the memory timeline as raw memory event tuples in the + form of (timestamp, action, numbytes, category) + as a JSON formatted file to the given path for the given + device.""" + device = torch.device(device_str) + raw_events: List[Tuple[int, int, int, int]] = [] + + def get_category_index(key, version): + category = ( + self.categories.get(key, version) + if isinstance(key, TensorKey) + else None + ) + return _CATEGORY_TO_INDEX[category] + + for t, action, (key, version), numbytes in self.timeline: + if key.device != device: + continue + + if action in (Action.PREEXISTING, Action.CREATE): + raw_events.append( + ( + t, + _ACTION_TO_INDEX[action], + numbytes, + get_category_index(key, version), + ) + ) + + elif action == Action.INCREMENT_VERSION: + raw_events.append( + ( + t, + _ACTION_TO_INDEX[action], + -numbytes, + get_category_index(key, version), + ) + ) + raw_events.append( + ( + t, + _ACTION_TO_INDEX[action], + numbytes, + get_category_index(key, version + 1), + ) + ) + + elif action == Action.DESTROY: + raw_events.append( + ( + t, + _ACTION_TO_INDEX[action], + -numbytes, + get_category_index(key, version), + ) + ) + + else: + raise ValueError(f"Unknown action: {action}") + + import json + + with open(path, "w") as f: + json.dump(raw_events, f) + + def export_memory_timeline_html( + self, path, device_str, figsize=(20, 12), title=None + ) -> None: + """Exports the memory timeline as an HTML file which contains + the memory timeline plot embedded as a PNG file.""" + # Check if user has matplotlib installed, return gracefully if not. + import importlib.util + + matplotlib_spec = importlib.util.find_spec("matplotlib") + if matplotlib_spec is None: + print( + "export_memory_timeline_html failed because matplotlib was not found." + ) + return + + from base64 import b64encode + from os import remove + from tempfile import NamedTemporaryFile + + import matplotlib.pyplot as plt + import numpy as np + + mt = self._coalesce_timeline(device_str) + times, sizes = np.array(mt[0]), np.array(mt[1]) + # For this timeline, start at 0 to match Chrome traces. + t_min = min(times) + times -= t_min + stacked = np.cumsum(sizes, axis=1) / 1024**3 + device = torch.device(device_str) + max_memory_allocated = torch.cuda.max_memory_allocated(device) + max_memory_reserved = torch.cuda.max_memory_reserved(device) + + # Plot memory timeline as stacked data + fig = plt.figure(figsize=figsize, dpi=80) + axes = fig.gca() + for category, color in _CATEGORY_TO_COLORS.items(): + i = _CATEGORY_TO_INDEX[category] + axes.fill_between( + times / 1e3, stacked[:, i], stacked[:, i + 1], color=color, alpha=0.7 + ) + fig.legend(["Unknown" if i is None else i.name for i in _CATEGORY_TO_COLORS]) + # Usually training steps are in magnitude of ms. + axes.set_xlabel("Time (ms)") + axes.set_ylabel("Memory (GB)") + title = "\n\n".join( + ([title] if title else []) + + [ + f"Max memory allocated: {max_memory_allocated/(1024**3):.2f} GiB \n" + f"Max memory reserved: {max_memory_reserved/(1024**3):.2f} GiB" + ] + ) + axes.set_title(title) + + # Embed the memory timeline image into the HTML file + tmpfile = NamedTemporaryFile("wb", suffix=".png", delete=False) + tmpfile.close() + fig.savefig(tmpfile.name, format="png") + + with open(tmpfile.name, "rb") as tmp: + encoded = b64encode(tmp.read()).decode("utf-8") + html = f""" +GPU Memory Timeline HTML + + + +""" + + with open(path, "w") as f: + f.write(html) + remove(tmpfile.name) diff --git a/venv/lib/python3.10/site-packages/torch/profiler/_pattern_matcher.py b/venv/lib/python3.10/site-packages/torch/profiler/_pattern_matcher.py new file mode 100644 index 0000000000000000000000000000000000000000..02e9b014d3080ad584374db9fad7cdae97bb496e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/profiler/_pattern_matcher.py @@ -0,0 +1,662 @@ +import json +import math +import os +import re +from typing import Dict, List, Optional, Set + +import torch +import torch.utils.benchmark as benchmark +from torch._C._profiler import ( + _EventType, + _ExtraFields_PyCall, + _ExtraFields_PyCCall, + _ExtraFields_TorchOp, + _ProfilerEvent, +) +from torch.profiler import profile +from torch.profiler._utils import index_of_first_match, traverse_bfs, traverse_dfs + + +class Pattern: + """ + Base class for all patterns, subclass this class and implement match() + to define custom patterns. + + In subclass, define description and skip property. + """ + + def __init__(self, prof: profile, should_benchmark: bool = False): + self.prof = prof + self.should_benchmark = should_benchmark + self.name = "Please specify a name for pattern" + self.description = "Please specify a description for pattern" + self.url = "" + assert prof.profiler is not None and prof.profiler.kineto_results is not None + self.event_tree = prof.profiler.kineto_results.experimental_event_tree() + self.tid_root: Dict[int, List[_ProfilerEvent]] = {} + for event in self.event_tree: + self.tid_root.setdefault(event.start_tid, []).append(event) + + @property + def skip(self): + return False + + def report(self, event: _ProfilerEvent): + msg = ( + f"{self.description}\n[Source Code Location] {source_code_location(event)}" + ) + return msg + + def eventTreeTraversal(self): + """ + Traverse the event tree and yield all events. + Override this method in subclass to customize the traversal. + """ + yield from traverse_dfs(self.event_tree) + + def summary(self, events: List[_ProfilerEvent]): + default_summary = f"{self.name}: {len(events)} events matched." + if self.should_benchmark: + # If benchmark summary is not empty, use it. + return ( + self.benchmark_summary(events) + if hasattr(self, "benchmark") # type: ignore[attr-defined] + else default_summary + ) + return default_summary + + def benchmark_summary(self, events: List[_ProfilerEvent]): + def format_time(time_ns: int): + unit_lst = ["ns", "us", "ms"] + for unit in unit_lst: + if time_ns < 1000: + return f"{time_ns:.2f} {unit}" + time_ns //= 1000 + return f"{time_ns:.2f} s" + + assert hasattr(self, "benchmark"), "Please implement benchmark()" + shapes_factor_map = self.benchmark(events) # type: ignore[attr-defined] + original_time = sum(event.duration_time_ns for event in events) + new_time = sum( + shapes_factor_map[input_shapes(event)] * event.duration_time_ns + for event in events + ) + return ( + f"{self.name}: {len(events)} events matched. " + f"Total Estimated Speedup: {format_time(original_time - new_time)} ({round(original_time/new_time, 2)}X)" + ) + + def match(self, event: _ProfilerEvent): + """ + Return True if the event matches the pattern. + This method should be overriden in subclass. + """ + raise NotImplementedError + + def matched_events(self): + if self.skip: + return [] + matched_events = [] + for event in self.eventTreeTraversal(): + if self.match(event): + matched_events.append(event) + return matched_events + + def root_of(self, event: _ProfilerEvent): + while event.parent: + event = event.parent + return event + + def siblings_of(self, event: _ProfilerEvent): + if event.parent: + children = event.parent.children + else: + children = self.tid_root[event.start_tid] + index = children.index(event) + return children[:index], children[index + 1 :] + + def next_of(self, event: _ProfilerEvent): + _, next_events = self.siblings_of(event) + return next_events[0] if next_events else None + + def prev_of(self, event: _ProfilerEvent): + prev_events, _ = self.siblings_of(event) + return prev_events[-1] if prev_events else None + + def go_up_until(self, event: _ProfilerEvent, predicate): + if not event: + return None + while event.parent and not predicate(event): + event = event.parent + return event + + +# Patterns + + +class NamePattern(Pattern): + def __init__(self, prof: profile, name: str, should_benchmark: bool = False): + super().__init__(prof, should_benchmark) + self.description = f"Matched Name Event: {name}" + self.name = name + + def match(self, event: _ProfilerEvent): + return re.search(self.name, event.name) is not None + + +class ExtraCUDACopyPattern(Pattern): + """ + This pattern identifies if we creates a constant tensor on CPU and immediately moves it to GPU. + example: torch.zeros((100, 100)).to("cuda") + + Pattern: + build-in method |build-in method + ... | aten::to + aten::fill_/aten::zero_ | aten::_to_copy + + Algorithm: + We start at node aten::to, go parent events' previous events, + and check if we have a aten::fill_/aten::zero_ as we keep going down the tree. + We always select the last child in the children list when we go down the tree. + If at any step we failed, it is not a match. + """ + + def __init__(self, prof: profile, should_benchmark: bool = False): + super().__init__(prof, should_benchmark) + self.name = "Extra CUDA Copy Pattern" + self.description = "Filled a CPU tensor and immediately moved it to GPU. Please initialize it on GPU." + self.url = "https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html#create-tensors-directly-on-the-target-device" + self.init_ops = { + "aten::fill_", + "aten::zero_", + "aten::normal_", + "aten::uniform_", + } + + @property + def skip(self): + return not self.prof.with_stack or not self.prof.record_shapes + + def match(self, event): + # TODO: We should also check tensor identities + if event.name != "aten::to": + return False + to_event = event + if not event.children: + return False + event = event.children[-1] + if event.name != "aten::_to_copy": + return False + if not event.children: + return False + event = event.children[-1] + if event.name != "aten::copy_": + return False + # aten::copy_ should have the first 2 args dtype the same + dtypes = input_dtypes(event) + if len(dtypes) < 2: + return False + if dtypes[0] is None or dtypes[0] != dtypes[1]: + return False + event = to_event + # Up one level + event = event.parent + if event is None: + return False + # Check if we have a aten::fill_ in previous leaf + event = self.prev_of(event) + if event is None: + return False + while event.children: + event = event.children[-1] + # aten::zero_ is a special optimzation case where fill_ is not called + if event.name in self.init_ops: + return True + return event.name in self.init_ops + # TODO: Check if tensor is reused + + def benchmark(self, events: List[_ProfilerEvent]): + shapes_factor_map = {input_shapes(event): 0.0 for event in events} + for shape in shapes_factor_map: + size = shape[0] + to_timer = benchmark.Timer( + stmt='torch.ones(size).to("cuda")', globals={"size": size} + ) + de_timer = benchmark.Timer( + stmt='torch.ones(size, device="cuda")', globals={"size": size} + ) + to_time = to_timer.timeit(10).mean + de_time = de_timer.timeit(10).mean + shapes_factor_map[shape] = de_time / to_time + return shapes_factor_map + + +class ForLoopIndexingPattern(Pattern): + """ + This pattern identifies if we use a for loop to index a tensor that + can be vectorized. + example: + tensor = torch.empty((100, 100)) + for i in range(100): + tensor[i] = i + + Pattern: + aten::select | ... | aten::select | ... (Repeat) + + Algorithm: + We start at node aten::select, and we check if we can find this alternating patterns. + We also keep a dictionary to avoid duplicate match in the for loop. + """ + + def __init__(self, prof: profile, should_benchmark: bool = False): + super().__init__(prof, should_benchmark) + self.name = "For Loop Indexing Pattern" + self.description = "For loop indexing detected. Vectorization recommended." + self.visited: Set[int] = set() + + def eventTreeTraversal(self): + """ + We need to use BFS traversal order to avoid duplicate match. + """ + yield from traverse_bfs(self.event_tree) + + def match(self, event: _ProfilerEvent): + if event.name != "aten::select": + return False + if event.id in self.visited: + return False + repeat_count = 1 + _, next = self.siblings_of(event) + if len(next) <= 1: + return False + + # Custom event list matching + def same_ops(list1, list2): + if len(list1) != len(list2): + return False + for op1, op2 in zip(list1, list2): + if op1.name != op2.name: + return False + return True + + # Record the ops between two aten::select + next_select_idx = index_of_first_match(next, lambda e: e.name == "aten::select") + if next_select_idx is None: + return False + indexing_ops = [event] + next[:next_select_idx] + next = next[len(indexing_ops) - 1 :] + for i in range(0, len(next), len(indexing_ops)): + if same_ops(indexing_ops, next[i : i + len(indexing_ops)]): + repeat_count += 1 + self.visited.add(next[i].id) + else: + break + return repeat_count >= 10 + + +class FP32MatMulPattern(Pattern): + def __init__(self, prof: profile, should_benchmark: bool = False): + super().__init__(prof, should_benchmark) + self.name = "FP32 MatMul Pattern" + self.description = ( + "You are currently using GPU that supports TF32. " + "Please enable TF32 by setting 'torch.backends.cuda.matmul.allow_tf32 = True'" + ) + self.url = "https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" + + @property + def skip(self): + if torch.version.hip is not None: + has_tf32 = False + else: + # Anything less than sm_80 is not Ampere which doesn't support TF32 + has_tf32 = all(int(arch[3:]) >= 80 for arch in torch.cuda.get_arch_list()) + return has_tf32 is False or super().skip or not self.prof.record_shapes + + def match(self, event: _ProfilerEvent): + # If we saw this pattern once, we don't need to match it again + if event.tag != _EventType.TorchOp: + return False + assert isinstance(event.extra_fields, _ExtraFields_TorchOp) + if event.name == "aten::mm": + if event.extra_fields.allow_tf32_cublas is False: + return True + return False + + def report(self, event: _ProfilerEvent): + return self.description + + def benchmark(self, events: List[_ProfilerEvent]): + shapes_factor_map = {input_shapes(event): 0.0 for event in events} + for shape in shapes_factor_map: + matrixA = torch.randn(shape[0], device="cuda", dtype=torch.float32) + matrixB = torch.randn(shape[1], device="cuda", dtype=torch.float32) + fp32_timer = benchmark.Timer( + stmt="torch.mm(matrixA, matrixB)", + globals={"matrixA": matrixA, "matrixB": matrixB}, + ) + tf32_timer = benchmark.Timer( + stmt="torch.mm(matrixA, matrixB)", + setup="torch.backends.cuda.matmul.allow_tf32 = True", + globals={"matrixA": matrixA, "matrixB": matrixB}, + ) + torch.backends.cuda.matmul.allow_tf32 = False + fp32_time = fp32_timer.timeit(10).mean + tf32_time = tf32_timer.timeit(10).mean + shapes_factor_map[shape] = tf32_time / fp32_time + return shapes_factor_map + + +class OptimizerSingleTensorPattern(Pattern): + """ + This pattern identifies if we are using the single-tensor version of an optimizer. + example: + optimizer = torch.optim.SGD(model.parameters(), lr=0.1) + By adding foreach=True to enable multi-tensor optimizer, we can gain speedup when + the kernels are relatively small. + + Pattern: + XXXXX: _single_tenser_ + + Algorithm: + String match + """ + + def __init__(self, prof: profile, should_benchmark: bool = False): + super().__init__(prof, should_benchmark) + self.name = "Optimizer Single Tensor Pattern" + self.optimizers_with_foreach = ["adam", "sgd", "adamw"] + self.description = ( + "Deteced optimizer running with single tensor implementation. " + "Please enable multi tensor implementation by passing 'foreach=True' into optimizer." + ) + self.url = "" + + def match(self, event: _ProfilerEvent): + for optimizer in self.optimizers_with_foreach: + if event.name.endswith(f"_single_tensor_{optimizer}"): + return True + return False + + +class SynchronizedDataLoaderPattern(Pattern): + """ + This pattern identifies if we are using num_workers=0 in DataLoader. + example: + torch.utils.data.DataLoader(dataset, batch_size=batch_size) + Add num_workers=N to the arguments. N depends on system configuration. + + Pattern: + dataloader.py(...): __iter__ + dataloader.py(...): _get_iterator + NOT dataloader.py(...): check_worker_number_rationality + + Algorithm: + If we don't see check_worker_number_rationality call in the dataloader __iter__, + It is not an asynchronous dataloader. + + """ + + def __init__(self, prof: profile, should_benchmark: bool = False): + super().__init__(prof, should_benchmark) + self.name = "Synchronized DataLoader Pattern" + self.description = ( + "Detected DataLoader running with synchronized implementation. " + "Please enable asynchronous dataloading by setting num_workers > 0 when initializing DataLoader." + ) + self.url = ( + "https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html" + "#enable-async-data-loading-and-augmentation" + ) + + def match(self, event: _ProfilerEvent): + def is_dataloader_function(name: str, function_name: str): + return name.startswith( + os.path.join("torch", "utils", "data", "dataloader.py") + ) and name.endswith(function_name) + + # TODO: fixme! Due to lifetime issues of the function name, this field might + # actually point to an already freed string when the even is a PyCall. + # Just silently skip this to unblock testing. + try: + event.name + except UnicodeDecodeError: + return False + + if not is_dataloader_function(event.name, "__iter__"): + return False + if not event.children: + return False + event = event.children[0] + if not is_dataloader_function(event.name, "_get_iterator"): + return False + if not event.children: + return False + event = event.children[0] + return not is_dataloader_function(event.name, "check_worker_number_rationality") + # TODO: We should also check if the loader is bottleneck. + + +class GradNotSetToNonePattern(Pattern): + """ + This pattern identifies if we are not setting grad to None in zero_grad. + example: + optimizer.zero_grad() + By setting set_to_none=True, we can gain speedup + + Pattern: + XXXXX: _zero_grad + NOT aten::zeros + aten::zero_ + + aten::zero_ is called on each parameter in the model. + We also want to make sure it is not called by aten::zeros. + + Algorithm: + String match + """ + + def __init__(self, prof: profile, should_benchmark: bool = False): + super().__init__(prof, should_benchmark) + self.name = "Gradient Set To Zero Instead of None Pattern" + self.description = ( + "Detected gradient set to zero instead of None. " + "Please add 'set_to_none=True' when calling zero_grad()." + ) + self.url = ( + "https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html" + "#disable-gradient-calculation-for-validation-or-inference" + ) + + def match(self, event: _ProfilerEvent): + if not event.name.endswith(": zero_grad"): + return False + if not event.children: + return False + + for sub_event in traverse_dfs(event.children): + if ( + sub_event.name == "aten::zero_" + and sub_event.parent.name != "aten::zeros" + ): + return True + # TODO: We should also check if the optimizer's numerical behavior will change. + return False + + +class Conv2dBiasFollowedByBatchNorm2dPattern(Pattern): + """ + This pattern identifies if we are enabling bias in Conv2d which is followed by BatchNorm2d. + Bias doesn't do anything when followed by batchnorm. + Pattern: + nn.Module: Conv2d | nn.Module: BatchNorm2d + ... + aten::conv2d AND dtype of third argument is not null + The third argument is the bias + Algorithm: + String match + """ + + def __init__(self, prof: profile, should_benchmark: bool = False): + super().__init__(prof, should_benchmark) + self.name = "Enabling Bias in Conv2d Followed By BatchNorm Pattern" + self.description = "Detected bias enabled in Conv2d that is followed by BatchNorm2d. Please set 'bias=False' in Conv2d." + self.url = ( + "https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html" + "#disable-bias-for-convolutions-directly-followed-by-a-batch-norm" + ) + + @property + def skip(self): + return self.prof.record_shapes is False or super().skip + + def match(self, event: _ProfilerEvent): + if event.name != "aten::conv2d": + return False + if len(input_dtypes(event)) < 3 or input_dtypes(event)[2] is None: + return False + # This means bias=True + event = self.go_up_until( + event, lambda e: e.name.startswith("nn.Module: Conv2d") + ) + if not event: + return False + event = self.next_of(event) + if not event: + return False + return event.name.startswith("nn.Module: BatchNorm2d") + + +class MatMulDimInFP16Pattern(Pattern): + def __init__(self, prof: profile, should_benchmark: bool = False): + super().__init__(prof, should_benchmark) + self.name = "Matrix Multiplication Dimension Not Aligned Pattern" + self.description = "Detected matmul with dimension not aligned. Please use matmul with aligned dimension." + self.url = "https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html#use-mixed-precision-and-amp" + + @property + def skip(self): + return not self.prof.with_stack or not self.prof.record_shapes + + def match(self, event: _ProfilerEvent): + def mutiple_of(shapes, multiple): + return all(dim % multiple == 0 for shape in shapes for dim in shape[-2:]) + + if event.name not in ("aten::mm", "aten::bmm", "aten::addmm"): + return False + if not input_dtypes(event): + return False + arg_dtype = input_dtypes(event)[0] + if arg_dtype in (torch.bfloat16, torch.half) and not mutiple_of( + input_shapes(event), 8 + ): + return True + return False + + def benchmark(self, events: List[_ProfilerEvent]): + def closest_multiple(shapes, multiple): + return [multiple * math.ceil(shape / multiple) for shape in shapes] + + shapes_factor_map = {input_shapes(event): 0.0 for event in events} + for shape in shapes_factor_map: + matrixA = torch.randn(shape[0], device="cuda", dtype=torch.float16) + matrixB = torch.randn(shape[1], device="cuda", dtype=torch.float16) + not_aligned_dim_timer = benchmark.Timer( + stmt="torch.mm(matrixA, matrixB)", + globals={"matrixA": matrixA, "matrixB": matrixB}, + ) + matrixA = torch.randn( + closest_multiple(shape[0], 8), device="cuda", dtype=torch.float16 + ) + matrixB = torch.randn( + closest_multiple(shape[1], 8), device="cuda", dtype=torch.float16 + ) + aligned_dim_timer = benchmark.Timer( + stmt="torch.mm(matrixA, matrixB)", + globals={"matrixA": matrixA, "matrixB": matrixB}, + ) + not_aligned_dim_time = not_aligned_dim_timer.timeit(10).mean + aligned_dim_time = aligned_dim_timer.timeit(10).mean + shapes_factor_map[shape] = aligned_dim_time / not_aligned_dim_time + return shapes_factor_map + + +def source_code_location(event: Optional[_ProfilerEvent]): + while event: + if event.tag == _EventType.PyCall or event.tag == _EventType.PyCCall: + assert isinstance( + event.extra_fields, (_ExtraFields_PyCall, _ExtraFields_PyCCall) + ) + if not event.extra_fields.caller.file_name.startswith("torch" + os.sep): + return f"{event.extra_fields.caller.file_name}:{event.extra_fields.caller.line_number}" + event = event.parent + return "No source code location found" + + +def input_shapes(event: _ProfilerEvent): + assert isinstance(event.extra_fields, _ExtraFields_TorchOp) + return tuple(tuple(getattr(i, "sizes", ())) for i in event.extra_fields.inputs) + + +def input_dtypes(event: _ProfilerEvent): + assert isinstance(event.extra_fields, _ExtraFields_TorchOp) + return tuple(getattr(i, "dtype", None) for i in event.extra_fields.inputs) + + +def report_all_anti_patterns( + prof, + should_benchmark: bool = False, + print_enable: bool = True, + json_report_dir: Optional[str] = None, +): + report_dict: Dict = {} + anti_patterns = [ + ExtraCUDACopyPattern(prof, should_benchmark), + # ForLoopIndexingPattern(prof, should_benchmark), + FP32MatMulPattern(prof, should_benchmark), + OptimizerSingleTensorPattern(prof, should_benchmark), + SynchronizedDataLoaderPattern(prof, should_benchmark), + GradNotSetToNonePattern(prof, should_benchmark), + Conv2dBiasFollowedByBatchNorm2dPattern(prof, should_benchmark), + MatMulDimInFP16Pattern(prof, should_benchmark), + ] + reported = set() + summaries = [] + message_list = [f"{'-'*40}TorchTidy Report{'-'*40}"] + message_list.append("Matched Events:") + + for anti_pattern in anti_patterns: + matched_events = anti_pattern.matched_events() + if not matched_events: + continue + summaries.append(anti_pattern.summary(matched_events)) + for event in matched_events: + report_msg = anti_pattern.report(event) + if report_msg not in reported: + message_list.append(report_msg) + reported.add(report_msg) + src_location, line_no = source_code_location(event).split(":") + report_dict.setdefault(src_location, []).append( + { + "line_number": int(line_no), + "name": anti_pattern.name, + "url": anti_pattern.url, + "message": anti_pattern.description, + } + ) + + if json_report_dir is not None: + json_report_path = os.path.join(json_report_dir, "torchtidy_report.json") + if os.path.exists(json_report_path): + with open(json_report_path) as f: + exisiting_report = json.load(f) + exisiting_report.update(report_dict) + report_dict = exisiting_report + with open(json_report_path, "w") as f: + json.dump(report_dict, f, indent=4) + + message_list.append("Summary:") + message_list += summaries + message_list.append(f"{'-'*40}TorchTidy Report{'-'*40}") + if print_enable: + print("\n".join(message_list)) diff --git a/venv/lib/python3.10/site-packages/torch/profiler/_utils.py b/venv/lib/python3.10/site-packages/torch/profiler/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..783a69ea89ab7d3aae6932d5ae5e89d964fbe746 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/profiler/_utils.py @@ -0,0 +1,373 @@ +import functools +import re +from collections import deque +from dataclasses import dataclass +from typing import Dict, List + +from torch.autograd import _KinetoEvent +from torch.autograd.profiler import profile + +from torch.profiler import DeviceType + + +def _traverse(tree, next_fn, children_fn=lambda x: x.children, reverse: bool = False): + order = reversed if reverse else lambda x: x + remaining = deque(order(tree)) + while remaining: + curr_event = next_fn(remaining) + yield curr_event + for child_event in order(children_fn(curr_event)): + remaining.append(child_event) + + +traverse_dfs = functools.partial(_traverse, next_fn=lambda x: x.pop(), reverse=True) +traverse_bfs = functools.partial( + _traverse, next_fn=lambda x: x.popleft(), reverse=False +) + + +@dataclass +class EventMetrics: + duration_time_ns: int = 0 + self_time_ns: int = 0 + idle_time_ns: int = 0 + queue_depth: int = 0 + + @property + def fraction_idle_time(self): + if self.duration_time_ns == 0: + return 0.0 + return self.idle_time_ns / self.duration_time_ns + + +@dataclass +class Interval: + start: int + end: int + queue_depth: int = 0 + + +class EventKey: + def __init__(self, event): + self.event = event + + def __hash__(self): + return hash(self.event.id) + + def __eq__(self, other): + return self.event.id == other.event.id + + def __repr__(self): + return f"{self.event.name}" + + def intervals_overlap(self, intervals: List[Interval]): + overlap_time = 0 + intervals = sorted(intervals, key=lambda x: x.start) + + if intervals: + overlap_start = max(self.event.start_time_ns, intervals[0].start) + overlap_end = min(self.event.end_time_ns, intervals[0].end) + + if overlap_start < overlap_end: + overlap_time += overlap_end - overlap_start + + i, j = 0, 1 + while j < len(intervals): + prev_interval = intervals[i] + curr_interval = intervals[j] + j += 1 + if prev_interval.end > curr_interval.start: + # Completely subsumed by previous interval + if prev_interval.end > curr_interval.end: + j += 1 + continue + else: + curr_interval.start = prev_interval.end + i = j + + overlap_start = max(self.event.start_time_ns, curr_interval.start) + overlap_end = min(self.event.end_time_ns, curr_interval.end) + if overlap_start < overlap_end: + overlap_time += overlap_end - overlap_start + + return overlap_time + + +class BasicEvaluation: + def __init__(self, prof: profile): + self.profile = prof + self.metrics: Dict[EventKey, EventMetrics] = {} + self.compute_self_time() + self.event_keys = sorted( + (e for e in self.metrics.keys()), key=lambda x: x.event.start_time_ns + ) + self.events = [e.event for e in self.event_keys] + self.cuda_events: List[_KinetoEvent] = [] + self.queue_depth_list = self.compute_queue_depth() + self.compute_idle_time() + + def compute_self_time(self): + """ + Computes event's self time(total time - time in child ops). + """ + assert self.profile.kineto_results is not None + stack = deque(self.profile.kineto_results.experimental_event_tree()) + + # standard iterating dfs + while stack: + curr_event = stack.pop() + self_time = curr_event.duration_time_ns + for child_event in curr_event.children: + self_time -= child_event.duration_time_ns + stack.append(child_event) + assert ( + EventKey(curr_event) not in self.metrics + ), f"Duplicate id: {curr_event.id}, {curr_event.name}" + self.metrics[EventKey(curr_event)] = EventMetrics(self_time_ns=self_time) + self.metrics[ + EventKey(curr_event) + ].duration_time_ns = curr_event.duration_time_ns + + def compute_queue_depth(self): + """ + Computes queue_depth at each event. This will calculate the queue depth data for + All the events in the tree. + This will return a list of Interval of queue depth data of cuda launch and kernels. + """ + assert self.profile.kineto_results is not None + cuda_event_list = self.profile.kineto_results.events() + + def is_cuda_launch_kernel(e): + # TODO: find a better way to identify cudaLaunchKernel + return e.name == "cudaLaunchKernel" + + def is_cuda_kernel(e): + # TODO: find a better way to identify CUDA Kernel + return e.device_type() == DeviceType.CUDA and "mem" not in e.name.lower() + + cuda_launch_events = sorted( + (e for e in cuda_event_list if is_cuda_launch_kernel(e)), + key=lambda x: x.start_us(), + ) + cuda_kernel_events = sorted( + (e for e in cuda_event_list if is_cuda_kernel(e)), + key=lambda x: x.start_us(), + ) + + self.cuda_events = sorted( + cuda_launch_events + cuda_kernel_events, key=lambda x: x.start_us() + ) + + kernel_mapping: Dict[_KinetoEvent, int] = {} + last_mapped_kernel = 0 + for cuda_launch_event in cuda_launch_events: + index = index_of_first_match( + cuda_kernel_events, + lambda x: x.linked_correlation_id() + == cuda_launch_event.linked_correlation_id(), + start=last_mapped_kernel, + ) + kernel_mapping[cuda_launch_event] = index + last_mapped_kernel = index if index is not None else last_mapped_kernel + + current_kernel_index = 0 + spawned_kernel_index = -1 + + all_events = cuda_launch_events + cuda_kernel_events + self.events + + def new_old_event_comparator(event): + if hasattr(event, "start_us"): + return event.start_us() * 1000 + if hasattr(event, "start_time_ns"): + return event.start_time_ns + raise Exception("Unknown Event Type") + + queue_depth_list: List[Interval] = [] + all_events.sort(key=new_old_event_comparator) + for event in all_events: + # Find latest cuda kernel event + if hasattr(event, "start_us"): + start_time = event.start_us() * 1000 + end_time = (event.start_us() + event.duration_us()) * 1000 + # Find current spawned cuda kernel event + if event in kernel_mapping and kernel_mapping[event] is not None: + spawned_kernel_index = kernel_mapping[event] + elif hasattr(event, "start_time_ns"): + start_time = event.start_time_ns # type: ignore[attr-defined] + end_time = event.end_time_ns # type: ignore[attr-defined] + + while ( + current_kernel_index < len(cuda_kernel_events) + and (cuda_kernel_events[current_kernel_index].start_us()) * 1000 + <= start_time # type: ignore[possibly-undefined] + ): + current_kernel_index += 1 + current_queue_depth = spawned_kernel_index - current_kernel_index + 1 + current_queue_depth = max(current_queue_depth, 0) + + if hasattr(event, "start_us"): + queue_depth_list.append( + Interval(start_time, end_time, current_queue_depth) # type: ignore[possibly-undefined] + ) + elif hasattr(event, "start_time_ns"): + self.metrics[EventKey(event)].queue_depth = current_queue_depth + + return queue_depth_list + + def compute_idle_time(self): + """ + Computes idle time of the profile. + """ + # Based on queue_depth_list, we can calculate idle time for all the events + idle = False + idle_start = 0 + idle_intervals: List[Interval] = [] + if self.queue_depth_list and self.events: + idle_intervals += [ + Interval(self.events[0].start_time_ns, self.queue_depth_list[0].start), + Interval(self.queue_depth_list[-1].end, self.events[-1].end_time_ns), + ] + + for data_point in self.queue_depth_list: + if data_point.queue_depth == 0 and not idle: + idle_start = data_point.end + idle = True + if data_point.queue_depth > 0 and idle: + idle_intervals.append(Interval(idle_start, data_point.start)) + idle = False + + event_list = [e.event for e in self.metrics.keys()] + for event in event_list: + self.metrics[EventKey(event)].idle_time_ns = EventKey( + event + ).intervals_overlap(idle_intervals) + + def rank_events(self, length): + """ + Filter and Rank the events based on some heuristics: + 1) Events that are in the falling phase of the queue depth. + 2) Events that have a high idle_time, self_time difference. + + Parameters: + length: The number of events to return. + """ + + # Find the interval when qd is falling to 0 + import torch + + queue_depth_list = list(reversed(self.queue_depth_list)) + qd_values = [e.queue_depth for e in queue_depth_list] + + bottom_threashold = 0 + top_threashold = 4 + decrease_interval = [] + i = 0 + while i < len(qd_values): + if qd_values[i] > bottom_threashold: + i += 1 + continue + for j in range(i + 1, len(qd_values)): + # Find next zero and if the max value between them exceeds + # the threshold, then we have a falling interval + next_minimum_idx = index_of_first_match( + qd_values, lambda x: x <= bottom_threashold, start=j + ) + peak_idx = argmax(qd_values, start=j, end=next_minimum_idx) + + # if is a valid peak, we add to list and continue + if peak_idx is not None and qd_values[peak_idx] >= top_threashold: + decrease_interval.append( + Interval( + queue_depth_list[peak_idx].start, queue_depth_list[i].start + ) + ) + i = next_minimum_idx if next_minimum_idx is not None else i + break + i += 1 + # Filter out events that are not in the decrease interval + event_list = [ + event + for event in self.metrics.keys() + if event.intervals_overlap(decrease_interval) + ] + if event_list: + self_time = torch.tensor( + [self.metrics[event].self_time_ns for event in event_list], + dtype=torch.float32, + ) + idle_time = torch.tensor( + [self.metrics[event].fraction_idle_time for event in event_list], + dtype=torch.float32, + ) + normalized_gain = (idle_time - torch.mean(idle_time)) / torch.std(idle_time) + normalized_self = (self_time - torch.mean(self_time)) / torch.std(self_time) + heuristic_score_list = normalized_gain + 0.6 * normalized_self + + # Sort events by heuristic + event_list = [ + event + for _, event in sorted( + zip(heuristic_score_list, event_list), + key=lambda x: x[0], + reverse=True, + ) + ] + event_list = event_list[:length] + return event_list + + def get_optimizable_events(self, length: int = 1, print_enable: bool = True): + event_list = self.rank_events(length) + if not print_enable: + return event_list + output = "Optimizable events:\n" if event_list else "No events to optimize\n" + + output += "\n".join( + [ + f"""{'-'*80} +Event: {event} +Source code location: {source_code_location(event.event)} +Percentage idle time: {self.metrics[event].fraction_idle_time * 100:.2f}% +{'-'*80}""" + for event in event_list + ] + ) + if print_enable: + print(output) + return event_list + + +def index_of_first_match(seq, predicate, start=0, end=None): + if end is None or end >= len(seq): + end = len(seq) + for i in range(start, end): + if predicate(seq[i]): + return i + return None + + +def argmax(seq, key=lambda x: x, start=0, end=None): + seq = seq[start:end] + if len(seq) == 0: + return None + return seq.index(max(seq, key=key)) + start + + +def source_code_location(event): + while event is not None: + match = re.search(r"\.py\(.*\)", event.name) + if match is None: + event = event.parent + continue + return event.name + return "No source code location found" + + +# Provide an OSS workaround for cudagraphs + CUPTI issue +# https://github.com/pytorch/pytorch/issues/75504 +# TODO(dberard) - deprecate / remove workaround for CUDA >= 12, when +# we stop supporting older CUDA versions. +def _init_for_cuda_graphs(): + from torch.autograd.profiler import profile + + with profile(): + pass diff --git a/venv/lib/python3.10/site-packages/torch/profiler/itt.py b/venv/lib/python3.10/site-packages/torch/profiler/itt.py new file mode 100644 index 0000000000000000000000000000000000000000..4d072957d6fe4e611619495c98bb2ee1629e3219 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/profiler/itt.py @@ -0,0 +1,78 @@ +from contextlib import contextmanager + +try: + from torch._C import _itt +except ImportError: + + class _ITTStub: + @staticmethod + def _fail(*args, **kwargs): + raise RuntimeError( + "ITT functions not installed. Are you sure you have a ITT build?" + ) + + @staticmethod + def is_available(): + return False + + rangePush = _fail + rangePop = _fail + mark = _fail + + _itt = _ITTStub() # type: ignore[assignment] + + +__all__ = ["is_available", "range_push", "range_pop", "mark", "range"] + + +def is_available(): + """ + Check if ITT feature is available or not + """ + return _itt.is_available() + + +def range_push(msg): + """ + Pushes a range onto a stack of nested range span. Returns zero-based + depth of the range that is started. + + Arguments: + msg (str): ASCII message to associate with range + """ + return _itt.rangePush(msg) + + +def range_pop(): + """ + Pops a range off of a stack of nested range spans. Returns the + zero-based depth of the range that is ended. + """ + return _itt.rangePop() + + +def mark(msg): + """ + Describe an instantaneous event that occurred at some point. + + Arguments: + msg (str): ASCII message to associate with the event. + """ + return _itt.mark(msg) + + +@contextmanager +def range(msg, *args, **kwargs): + """ + Context manager / decorator that pushes an ITT range at the beginning + of its scope, and pops it at the end. If extra arguments are given, + they are passed as arguments to msg.format(). + + Args: + msg (str): message to associate with the range + """ + range_push(msg.format(*args, **kwargs)) + try: + yield + finally: + range_pop() diff --git a/venv/lib/python3.10/site-packages/torch/profiler/profiler.py b/venv/lib/python3.10/site-packages/torch/profiler/profiler.py new file mode 100644 index 0000000000000000000000000000000000000000..b1b611b4ec29eea651b07fb33d2f61fa93e8eac6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/profiler/profiler.py @@ -0,0 +1,839 @@ +import gzip +import json +import os +import tempfile +from abc import ABC, abstractmethod +from enum import Enum +from functools import partial +from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple +from warnings import warn + +from typing_extensions import Self + +import torch +import torch.autograd.profiler as prof +from torch._C import _get_privateuse1_backend_name +from torch._C._profiler import ( + _add_execution_trace_observer, + _disable_execution_trace_observer, + _enable_execution_trace_observer, + _ExperimentalConfig, + _remove_execution_trace_observer, +) +from torch.autograd import kineto_available, ProfilerActivity +from torch.profiler._memory_profiler import MemoryProfile, MemoryProfileTimeline + + +__all__ = [ + "supported_activities", + "ProfilerAction", + "schedule", + "tensorboard_trace_handler", + "profile", + "ExecutionTraceObserver", +] +PROFILER_STEP_NAME = "ProfilerStep" + + +def supported_activities(): + """ + Returns a set of supported profiler tracing activities. + + Note: profiler uses CUPTI library to trace on-device CUDA kernels. + In case when CUDA is enabled but CUPTI is not available, passing + ``ProfilerActivity.CUDA`` to profiler results in using the legacy CUDA + profiling code (same as in the legacy ``torch.autograd.profiler``). + This, in turn, results in including CUDA time in the profiler table output, + but not in the JSON trace. + """ + return torch.autograd._supported_activities() + + +class _ITraceObserver(ABC): + """Abstract interface for a Trace observer. + This satisfies 3 methods: start, stop and cleanup""" + + @abstractmethod + def start(self): + pass + + @abstractmethod + def stop(self): + pass + + @abstractmethod + def cleanup(self): + pass + + +class _KinetoProfile: + """Low-level profiler wrap the autograd profile + + Args: + activities (iterable): list of activity groups (CPU, CUDA) to use in profiling, supported values: + ``torch.profiler.ProfilerActivity.CPU``, ``torch.profiler.ProfilerActivity.CUDA``. + Default value: ProfilerActivity.CPU and (when available) ProfilerActivity.CUDA. + record_shapes (bool): save information about operator's input shapes. + profile_memory (bool): track tensor memory allocation/deallocation (see ``export_memory_timeline`` + for more details). + with_stack (bool): record source information (file and line number) for the ops. + with_flops (bool): use formula to estimate the FLOPS of specific operators + (matrix multiplication and 2D convolution). + with_modules (bool): record module hierarchy (including function names) + corresponding to the callstack of the op. e.g. If module A's forward call's + module B's forward which contains an aten::add op, + then aten::add's module hierarchy is A.B + Note that this support exist, at the moment, only for TorchScript models + and not eager mode models. + experimental_config (_ExperimentalConfig) : A set of experimental options + used by profiler libraries like Kineto. Note, backward compatibility is not guaranteed. + execution_trace_observer (ExecutionTraceObserver) : A PyTorch Execution Trace Observer object. + `PyTorch Execution Traces `__ offer a graph based + representation of AI/ML workloads and enable replay benchmarks, simulators, and emulators. + When this argument is included the observer start() and stop() will be called for the + same time window as PyTorch profiler. + + .. note:: + This API is experimental and subject to change in the future. + + Enabling shape and stack tracing results in additional overhead. + When record_shapes=True is specified, profiler will temporarily hold references to the tensors; + that may further prevent certain optimizations that depend on the reference count and introduce + extra tensor copies. + """ + + def __init__( + self, + *, + activities: Optional[Iterable[ProfilerActivity]] = None, + record_shapes: bool = False, + profile_memory: bool = False, + with_stack: bool = False, + with_flops: bool = False, + with_modules: bool = False, + experimental_config: Optional[_ExperimentalConfig] = None, + execution_trace_observer: Optional[_ITraceObserver] = None, + ): + self.activities = set(activities) if activities else supported_activities() + self.record_shapes = record_shapes + self.with_flops = with_flops + self.profile_memory = profile_memory + self.with_stack = with_stack + self.with_modules = with_modules + self.experimental_config = experimental_config + self.execution_trace_observer = execution_trace_observer + self.profiler: Optional[prof.profile] = None + self.mem_tl: Optional[MemoryProfileTimeline] = None + self.use_device = None + privateuse1_backend = _get_privateuse1_backend_name() + if privateuse1_backend != "privateuseone": + self.use_device = privateuse1_backend + # user-defined metadata to be amended to the trace + self.preset_metadata: Dict[str, str] = dict() + + def start(self): + self.prepare_trace() + self.start_trace() + + def stop(self): + self.stop_trace() + + def prepare_trace(self): + self.profiler = prof.profile( + use_cuda=(ProfilerActivity.CUDA in self.activities), + use_cpu=(ProfilerActivity.CPU in self.activities), + use_mtia=(ProfilerActivity.MTIA in self.activities), + use_device=None, + record_shapes=self.record_shapes, + with_flops=self.with_flops, + profile_memory=self.profile_memory, + with_stack=self.with_stack, + with_modules=self.with_modules, + use_kineto=True, + experimental_config=self.experimental_config, + ) + self.profiler._prepare_trace() + + def start_trace(self): + if self.execution_trace_observer: + self.execution_trace_observer.start() + assert self.profiler is not None + self.profiler._start_trace() + + if self.profile_memory: + self.add_metadata_json("profile_memory", "1") + if self.with_stack: + self.add_metadata_json("with_stack", "1") + if self.record_shapes: + self.add_metadata_json("record_shapes", "1") + if self.with_modules: + self.add_metadata_json("with_modules", "1") + if self.with_flops: + self.add_metadata_json("with_flops", "1") + + if kineto_available(): + dist_info = self._get_distributed_info() + if dist_info: + self.add_metadata_json("distributedInfo", json.dumps(dist_info)) + + if hasattr(torch, "_inductor"): + import torch._inductor.config as inductor_config + + if inductor_config.triton.cudagraphs: + os.environ["DISABLE_CUPTI_LAZY_REINIT"] = "1" + self.add_metadata_json("DISABLE_CUPTI_LAZY_REINIT", "1") + # FIXME: CUDA Graph does not work well with CUPTI teardown. + # 1) crashes on 1st lazy CUPTI re-init after teardown (CUDA 11) + # 2) crashes on 2nd non-lazy CUPTI re-init after teardown (CUDA 12) + # Workaround: turn off CUPTI teardown when using CUDA Graphs. + os.environ["TEARDOWN_CUPTI"] = "0" + + # Insert the preset user metadata to the trace + for k, v in self.preset_metadata.items(): + self.add_metadata_json(k, v) + + def stop_trace(self): + if self.execution_trace_observer: + self.execution_trace_observer.stop() + assert self.profiler is not None + self.profiler.__exit__(None, None, None) + + def export_chrome_trace(self, path: str): + """ + Exports the collected trace in Chrome JSON format. + """ + assert self.profiler + if path.endswith(".gz"): + fp = tempfile.NamedTemporaryFile("w+t", suffix=".json", delete=False) + fp.close() + retvalue = self.profiler.export_chrome_trace(fp.name) + with open(fp.name) as fin: + with gzip.open(path, "wt") as fout: + fout.writelines(fin) + os.remove(fp.name) + return retvalue + else: + return self.profiler.export_chrome_trace(path) + + def export_stacks(self, path: str, metric: str = "self_cpu_time_total"): + """Save stack traces in a file in a format suitable for visualization. + + Args: + path (str): save stacks file to this location; + metric (str): metric to use: "self_cpu_time_total" or "self_cuda_time_total" + + .. note:: + Example of using FlameGraph tool: + + - git clone https://github.com/brendangregg/FlameGraph + - cd FlameGraph + - ./flamegraph.pl --title "CPU time" --countname "us." profiler.stacks > perf_viz.svg + """ + assert self.profiler + return self.profiler.export_stacks(path, metric) + + def key_averages( + self, group_by_input_shape: bool = False, group_by_stack_n: int = 0 + ): + """Averages events, grouping them by operator name and (optionally) input shapes and + stack. + + .. note:: + To use shape/stack functionality make sure to set record_shapes/with_stack + when creating profiler context manager. + """ + assert self.profiler + return self.profiler.key_averages(group_by_input_shape, group_by_stack_n) + + def events(self): + """ + Returns the list of unaggregated profiler events, + to be used in the trace callback or after the profiling is finished + """ + assert self.profiler + return self.profiler.function_events + + def add_metadata(self, key: str, value: str): + """ + Adds a user defined metadata with a string key and a string value + into the trace file + """ + wrapped_value = '"' + value.replace('"', '\\"') + '"' + torch.autograd._add_metadata_json(key, wrapped_value) + + def add_metadata_json(self, key: str, value: str): + """ + Adds a user defined metadata with a string key and a valid json value + into the trace file + """ + torch.autograd._add_metadata_json(key, value) + + def preset_metadata_json(self, key: str, value: str): + """ + Preset a user defined metadata when the profiler is not started + and added into the trace file later. + Metadata is in the format of a string key and a valid json value + """ + self.preset_metadata[key] = value + + def _get_distributed_info(self): + import torch.distributed as dist + + if not dist.is_available() or not dist.is_initialized(): + return None + + backend = dist.get_backend() + dist_info = { + "backend": backend, + "rank": dist.get_rank(), + "world_size": dist.get_world_size(), + "pg_count": dist.get_pg_count(), + "pg_config": dist.distributed_c10d._get_all_pg_configs(), + } + if backend == "nccl": + nccl_version = torch.cuda.nccl.version() + dist_info["nccl_version"] = ".".join(str(v) for v in nccl_version) + return dist_info + + def _memory_profile(self) -> MemoryProfile: + required = ("record_shapes", "profile_memory", "with_stack") + missing = [f"{i}=True" for i in required if not getattr(self, i)] + if missing: + raise ValueError(f"{', '.join(missing)} required for memory profiling.") + + assert self.profiler is not None and self.profiler.kineto_results is not None + return MemoryProfile(self.profiler.kineto_results) + + def export_memory_timeline(self, path: str, device: Optional[str] = None) -> None: + """Export memory event information from the profiler collected + tree for a given device, and export a timeline plot. There are 3 + exportable files using ``export_memory_timeline``, each controlled by the + ``path``'s suffix. + + - For an HTML compatible plot, use the suffix ``.html``, and a memory timeline + plot will be embedded as a PNG file in the HTML file. + + - For plot points consisting of ``[times, [sizes by category]]``, where + ``times`` are timestamps and ``sizes`` are memory usage for each category. + The memory timeline plot will be saved a JSON (``.json``) or gzipped JSON + (``.json.gz``) depending on the suffix. + + - For raw memory points, use the suffix ``.raw.json.gz``. Each raw memory + event will consist of ``(timestamp, action, numbytes, category)``, where + ``action`` is one of ``[PREEXISTING, CREATE, INCREMENT_VERSION, DESTROY]``, + and ``category`` is one of the enums from + ``torch.profiler._memory_profiler.Category``. + + Output: Memory timeline written as gzipped JSON, JSON, or HTML. + """ + # Default to device 0, if unset. Fallback on cpu. + if device is None and self.use_device and self.use_device != "cuda": + device = self.use_device + ":0" + + if device is None: + device = "cuda:0" if torch.cuda.is_available() else "cpu" + + # Construct the memory timeline plot data + self.mem_tl = MemoryProfileTimeline(self._memory_profile()) + + # Depending on the file suffix, save the data as json.gz or json. + # For html, we can embed the image into an HTML file. + if path.endswith(".html"): + self.mem_tl.export_memory_timeline_html(path, device) + elif path.endswith(".gz"): + fp = tempfile.NamedTemporaryFile("w+t", suffix=".json", delete=False) + fp.close() + if path.endswith("raw.json.gz"): + self.mem_tl.export_memory_timeline_raw(fp.name, device) + else: + self.mem_tl.export_memory_timeline(fp.name, device) + with open(fp.name) as fin: + with gzip.open(path, "wt") as fout: + fout.writelines(fin) + os.remove(fp.name) + else: + self.mem_tl.export_memory_timeline(path, device) + + +class ProfilerAction(Enum): + """ + Profiler actions that can be taken at the specified intervals + """ + + NONE = 0 + WARMUP = 1 + RECORD = 2 + RECORD_AND_SAVE = 3 + + +def schedule( + *, wait: int, warmup: int, active: int, repeat: int = 0, skip_first: int = 0 +) -> Callable: + """ + Returns a callable that can be used as profiler ``schedule`` argument. The profiler will skip + the first ``skip_first`` steps, then wait for ``wait`` steps, then do the warmup for the next ``warmup`` steps, + then do the active recording for the next ``active`` steps and then repeat the cycle starting with ``wait`` steps. + The optional number of cycles is specified with the ``repeat`` parameter, the zero value means that + the cycles will continue until the profiling is finished. + """ + + def schedule_fn(step: int) -> ProfilerAction: + assert step >= 0 + if step < skip_first: + return ProfilerAction.NONE + else: + step -= skip_first + num_steps = wait + warmup + active + if repeat > 0 and step / num_steps >= repeat: + return ProfilerAction.NONE + mod_step = step % num_steps + if mod_step < wait: + return ProfilerAction.NONE + elif mod_step < wait + warmup: + return ProfilerAction.WARMUP + else: + return ( + ProfilerAction.RECORD + if mod_step < num_steps - 1 + else ProfilerAction.RECORD_AND_SAVE + ) + + assert ( + wait >= 0 and warmup >= 0 and active > 0 and repeat >= 0 and skip_first >= 0 + ), "Invalid profiler schedule arguments" + if warmup == 0: + warn("Profiler won't be using warmup, this can skew profiler results") + return schedule_fn + + +def _default_schedule_fn(_: int) -> ProfilerAction: + """ + Default profiler behavior - immediately starts recording the events, + keeps doing it on every profiler step. + """ + return ProfilerAction.RECORD + + +def tensorboard_trace_handler( + dir_name: str, worker_name: Optional[str] = None, use_gzip: bool = False +): + """ + Outputs tracing files to directory of ``dir_name``, then that directory can be + directly delivered to tensorboard as logdir. + ``worker_name`` should be unique for each worker in distributed scenario, + it will be set to '[hostname]_[pid]' by default. + """ + import os + import socket + import time + + def handler_fn(prof) -> None: + nonlocal worker_name + if not os.path.isdir(dir_name): + try: + os.makedirs(dir_name, exist_ok=True) + except Exception as e: + raise RuntimeError("Can't create directory: " + dir_name) from e + if not worker_name: + worker_name = f"{socket.gethostname()}_{os.getpid()}" + # Use nanosecond here to avoid naming clash when exporting the trace + file_name = f"{worker_name}.{time.time_ns()}.pt.trace.json" + if use_gzip: + file_name = file_name + ".gz" + prof.export_chrome_trace(os.path.join(dir_name, file_name)) + + return handler_fn + + +class profile(_KinetoProfile): + """Profiler context manager. + + Args: + activities (iterable): list of activity groups (CPU, CUDA) to use in profiling, supported values: + ``torch.profiler.ProfilerActivity.CPU``, ``torch.profiler.ProfilerActivity.CUDA``. + Default value: ProfilerActivity.CPU and (when available) ProfilerActivity.CUDA. + schedule (Callable): callable that takes step (int) as a single parameter and returns + ``ProfilerAction`` value that specifies the profiler action to perform at each step. + on_trace_ready (Callable): callable that is called at each step when ``schedule`` + returns ``ProfilerAction.RECORD_AND_SAVE`` during the profiling. + record_shapes (bool): save information about operator's input shapes. + profile_memory (bool): track tensor memory allocation/deallocation. + with_stack (bool): record source information (file and line number) for the ops. + with_flops (bool): use formula to estimate the FLOPs (floating point operations) of specific operators + (matrix multiplication and 2D convolution). + with_modules (bool): record module hierarchy (including function names) + corresponding to the callstack of the op. e.g. If module A's forward call's + module B's forward which contains an aten::add op, + then aten::add's module hierarchy is A.B + Note that this support exist, at the moment, only for TorchScript models + and not eager mode models. + experimental_config (_ExperimentalConfig) : A set of experimental options + used for Kineto library features. Note, backward compatibility is not guaranteed. + execution_trace_observer (ExecutionTraceObserver) : A PyTorch Execution Trace Observer object. + `PyTorch Execution Traces `__ offer a graph based + representation of AI/ML workloads and enable replay benchmarks, simulators, and emulators. + When this argument is included the observer start() and stop() will be called for the + same time window as PyTorch profiler. See the examples section below for a code sample. + use_cuda (bool): + .. deprecated:: 1.8.1 + use ``activities`` instead. + + .. note:: + Use :func:`~torch.profiler.schedule` to generate the callable schedule. + Non-default schedules are useful when profiling long training jobs + and allow the user to obtain multiple traces at the different iterations + of the training process. + The default schedule simply records all the events continuously for the + duration of the context manager. + + .. note:: + Use :func:`~torch.profiler.tensorboard_trace_handler` to generate result files for TensorBoard: + + ``on_trace_ready=torch.profiler.tensorboard_trace_handler(dir_name)`` + + After profiling, result files can be found in the specified directory. Use the command: + + ``tensorboard --logdir dir_name`` + + to see the results in TensorBoard. + For more information, see + `PyTorch Profiler TensorBoard Plugin `__ + + .. note:: + Enabling shape and stack tracing results in additional overhead. + When record_shapes=True is specified, profiler will temporarily hold references to the tensors; + that may further prevent certain optimizations that depend on the reference count and introduce + extra tensor copies. + + + Examples: + + .. code-block:: python + + with torch.profiler.profile( + activities=[ + torch.profiler.ProfilerActivity.CPU, + torch.profiler.ProfilerActivity.CUDA, + ] + ) as p: + code_to_profile() + print(p.key_averages().table( + sort_by="self_cuda_time_total", row_limit=-1)) + + Using the profiler's ``schedule``, ``on_trace_ready`` and ``step`` functions: + + .. code-block:: python + + # Non-default profiler schedule allows user to turn profiler on and off + # on different iterations of the training loop; + # trace_handler is called every time a new trace becomes available + def trace_handler(prof): + print(prof.key_averages().table( + sort_by="self_cuda_time_total", row_limit=-1)) + # prof.export_chrome_trace("/tmp/test_trace_" + str(prof.step_num) + ".json") + + with torch.profiler.profile( + activities=[ + torch.profiler.ProfilerActivity.CPU, + torch.profiler.ProfilerActivity.CUDA, + ], + + # In this example with wait=1, warmup=1, active=2, repeat=1, + # profiler will skip the first step/iteration, + # start warming up on the second, record + # the third and the forth iterations, + # after which the trace will become available + # and on_trace_ready (when set) is called; + # the cycle repeats starting with the next step + + schedule=torch.profiler.schedule( + wait=1, + warmup=1, + active=2, + repeat=1), + on_trace_ready=trace_handler + # on_trace_ready=torch.profiler.tensorboard_trace_handler('./log') + # used when outputting for tensorboard + ) as p: + for iter in range(N): + code_iteration_to_profile(iter) + # send a signal to the profiler that the next iteration has started + p.step() + + The following sample shows how to setup up an Execution Trace Observer (`execution_trace_observer`) + + .. code-block:: python + + with torch.profiler.profile( + ... + execution_trace_observer=( + ExecutionTraceObserver().register_callback("./execution_trace.json") + ), + ) as p: + for iter in range(N): + code_iteration_to_profile(iter) + p.step() + + You can also refer to test_execution_trace_with_kineto() in tests/profiler/test_profiler.py. + Note: One can also pass any object satisfying the _ITraceObserver interface. + """ + + def __init__( + self, + *, + activities: Optional[Iterable[ProfilerActivity]] = None, + schedule: Optional[Callable[[int], ProfilerAction]] = None, + on_trace_ready: Optional[Callable[..., Any]] = None, + record_shapes: bool = False, + profile_memory: bool = False, + with_stack: bool = False, + with_flops: bool = False, + with_modules: bool = False, + experimental_config: Optional[_ExperimentalConfig] = None, + execution_trace_observer: Optional[_ITraceObserver] = None, + # deprecated: + use_cuda: Optional[bool] = None, + ): + activities_set = set(activities) if activities else supported_activities() + if use_cuda is not None: + warn("use_cuda is deprecated, use activities argument instead") + if use_cuda: + activities_set.add(ProfilerActivity.CUDA) + elif ProfilerActivity.CUDA in activities_set: + activities_set.remove(ProfilerActivity.CUDA) + assert len(activities_set) > 0, "No valid profiler activities found" + + super().__init__( + activities=activities, + record_shapes=record_shapes, + profile_memory=profile_memory, + with_stack=with_stack, + with_flops=with_flops, + with_modules=with_modules, + experimental_config=experimental_config, + execution_trace_observer=execution_trace_observer, + ) + + if schedule: + self.schedule = schedule + # add step markers into the trace and table view + self.record_steps = True + else: + self.schedule = _default_schedule_fn + self.record_steps = False + self.on_trace_ready = on_trace_ready + self.step_num = 0 + self.current_action = self.schedule(self.step_num) + self.step_rec_fn: Optional[prof.record_function] = None + + self.action_map: Dict[ + Tuple[ProfilerAction, Optional[ProfilerAction]], List[Any] + ] = { + # key is (prev_action, current_action), value is action list corresponding to the state pair. + (ProfilerAction.NONE, ProfilerAction.NONE): [], + (ProfilerAction.NONE, ProfilerAction.WARMUP): [self.prepare_trace], + (ProfilerAction.NONE, ProfilerAction.RECORD): [ + self.prepare_trace, + self.start_trace, + ], + (ProfilerAction.NONE, ProfilerAction.RECORD_AND_SAVE): [ + self.prepare_trace, + self.start_trace, + ], + (ProfilerAction.WARMUP, ProfilerAction.NONE): [ + partial(warn, "Incorrect schedule: WARMUP followed by NONE"), + self.start_trace, + self.stop_trace, + ], + (ProfilerAction.WARMUP, ProfilerAction.WARMUP): [], + (ProfilerAction.WARMUP, ProfilerAction.RECORD): [self.start_trace], + (ProfilerAction.WARMUP, ProfilerAction.RECORD_AND_SAVE): [self.start_trace], + (ProfilerAction.RECORD, ProfilerAction.NONE): [ + partial(warn, "Incorrect schedule: RECORD followed by NONE"), + self.stop_trace, + ], + (ProfilerAction.RECORD, ProfilerAction.WARMUP): [ + partial(warn, "Incorrect schedule: RECORD followed by WARMUP"), + self.stop_trace, + ], + (ProfilerAction.RECORD, ProfilerAction.RECORD): [], + (ProfilerAction.RECORD, ProfilerAction.RECORD_AND_SAVE): [], + (ProfilerAction.RECORD_AND_SAVE, ProfilerAction.NONE): [ + self.stop_trace, + self._trace_ready, + ], + (ProfilerAction.RECORD_AND_SAVE, ProfilerAction.WARMUP): [ + self.stop_trace, + self._trace_ready, + self.prepare_trace, + ], + (ProfilerAction.RECORD_AND_SAVE, ProfilerAction.RECORD): [ + self.stop_trace, + self._trace_ready, + self.prepare_trace, + self.start_trace, + ], + (ProfilerAction.RECORD_AND_SAVE, ProfilerAction.RECORD_AND_SAVE): [ + self.stop_trace, + self._trace_ready, + self.prepare_trace, + self.start_trace, + ], + # used for exit action + (ProfilerAction.WARMUP, None): [self.start_trace, self.stop_trace], + (ProfilerAction.RECORD, None): [self.stop_trace, self._trace_ready], + (ProfilerAction.RECORD_AND_SAVE, None): [ + self.stop_trace, + self._trace_ready, + ], + } + # Start tracking increments to profiler step, this will be used + # by Kineto + prof.KinetoStepTracker.init_step_count(PROFILER_STEP_NAME) + + def __enter__(self): + self.start() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.stop() + prof.KinetoStepTracker.erase_step_count(PROFILER_STEP_NAME) + if self.execution_trace_observer: + self.execution_trace_observer.cleanup() + + def start(self): + self._transit_action(ProfilerAction.NONE, self.current_action) + if self.record_steps: + self.step_rec_fn = prof.record_function( + "ProfilerStep#" + str(self.step_num) + ) + self.step_rec_fn.__enter__() + + def stop(self): + if self.record_steps and self.step_rec_fn: + self.step_rec_fn.__exit__(None, None, None) + self._transit_action(self.current_action, None) + + def step(self): + """ + Signals the profiler that the next profiling step has started. + """ + if self.record_steps and self.step_rec_fn: + self.step_rec_fn.__exit__(None, None, None) + prev_action = self.current_action + self.step_num += 1 + self.current_action = self.schedule(self.step_num) + + self._transit_action(prev_action, self.current_action) + prof.KinetoStepTracker.increment_step(PROFILER_STEP_NAME) + + if self.record_steps: + self.step_rec_fn = prof.record_function( + "ProfilerStep#" + str(self.step_num) + ) + self.step_rec_fn.__enter__() + + def _trace_ready(self): + if self.on_trace_ready: + self.on_trace_ready(self) + + def _transit_action(self, prev_action, current_action): + action_list = self.action_map.get((prev_action, current_action)) + if action_list: + for action in action_list: + action() + + +class ExecutionTraceObserver(_ITraceObserver): + """Execution Trace Observer + + Each process can have a single ExecutionTraceObserver instance. The observer + can be added to record function callbacks via calling register_callback() + explicitly. Without calling unregister_callback(), repeated calls to + register_callback() will not add additional observers to record function + callbacks. Once an ExecutionTraceObserver is created, the start() and stop() + methods control when the event data is recorded. + + Deleting or calling unregister_callback() will remove the observer from the + record function callbacks, finalize the output file, and will stop + incurring any overheads. + """ + + def __init__(self): + """ + Initializes the default states. + """ + self._registered = False + self._execution_trace_running = False + + def __del__(self): + """ + Calls unregister_callback() to make sure to finalize outputs. + """ + self.unregister_callback() + + def register_callback(self, output_file_path: str) -> Self: + """ + Adds ET observer to record function callbacks. The data will be + written to output_file_path. + """ + if not self._registered: + self._output_file_path = output_file_path + self._registered = _add_execution_trace_observer(output_file_path) + return self + + def unregister_callback(self): + """ + Removes ET observer from record function callbacks. + """ + if self._registered: + self.stop() + _remove_execution_trace_observer() + self._registered = False + + @property + def is_registered(self): + """ + Returns True if the execution trace observer is registered, otherwise False. + """ + return self._registered + + def is_running(self): + """ + Returns True if the observer is running, otherwise False. + """ + return self._execution_trace_running + + def start(self): + """ + Starts to capture. + """ + if self._registered and not self._execution_trace_running: + _enable_execution_trace_observer() + self._execution_trace_running = True + + def stop(self): + """ + Stops to capture. + """ + if self._execution_trace_running: + _disable_execution_trace_observer() + self._execution_trace_running = False + + def cleanup(self): + """ + Calls unregister_callback() to make sure to finalize outputs. + """ + self.unregister_callback() + + def get_output_file_path(self) -> str: + """ + Returns the output file name. + """ + if self.is_registered: + return self._output_file_path + else: + raise RuntimeError( + "A callback to the ET profiler needs to be registered " + "first before getting the output file path" + ) diff --git a/venv/lib/python3.10/site-packages/torch/profiler/python_tracer.py b/venv/lib/python3.10/site-packages/torch/profiler/python_tracer.py new file mode 100644 index 0000000000000000000000000000000000000000..b3e624911f95812a523d4dd927a74eec7fe5171b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/profiler/python_tracer.py @@ -0,0 +1,20 @@ +import os +import site +import sys +import typing + +import torch + + +def _prefix_regex() -> typing.List[str]: + raw_paths = ( + site.getsitepackages() + + sys.path + + [site.getuserbase()] + + [site.getusersitepackages()] + + [os.path.dirname(os.path.dirname(torch.__file__))] + ) + + path_prefixes = sorted({os.path.abspath(i) for i in raw_paths}, reverse=True) + assert all(isinstance(i, str) for i in path_prefixes) + return [i + os.sep for i in path_prefixes] diff --git a/venv/lib/python3.10/site-packages/torch/quantization/__init__.py b/venv/lib/python3.10/site-packages/torch/quantization/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fd83d88a3e3e72385726851b1fdd5fc09086a473 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/quantization/__init__.py @@ -0,0 +1,87 @@ +from .quantize import * # noqa: F403 +from .observer import * # noqa: F403 +from .qconfig import * # noqa: F403 +from .fake_quantize import * # noqa: F403 +from .fuse_modules import fuse_modules +from .stubs import * # noqa: F403 +from .quant_type import * # noqa: F403 +from .quantize_jit import * # noqa: F403 + +# from .quantize_fx import * +from .quantization_mappings import * # noqa: F403 +from .fuser_method_mappings import * # noqa: F403 + + +def default_eval_fn(model, calib_data): + r""" + Default evaluation function takes a torch.utils.data.Dataset or a list of + input Tensors and run the model on the dataset + """ + for data, target in calib_data: + model(data) + + +__all__ = [ + "QuantWrapper", + "QuantStub", + "DeQuantStub", + # Top level API for eager mode quantization + "quantize", + "quantize_dynamic", + "quantize_qat", + "prepare", + "convert", + "prepare_qat", + # Top level API for graph mode quantization on TorchScript + "quantize_jit", + "quantize_dynamic_jit", + "_prepare_ondevice_dynamic_jit", + "_convert_ondevice_dynamic_jit", + "_quantize_ondevice_dynamic_jit", + # Top level API for graph mode quantization on GraphModule(torch.fx) + # 'fuse_fx', 'quantize_fx', # TODO: add quantize_dynamic_fx + # 'prepare_fx', 'prepare_dynamic_fx', 'convert_fx', + "QuantType", # quantization type + # custom module APIs + "get_default_static_quant_module_mappings", + "get_static_quant_module_class", + "get_default_dynamic_quant_module_mappings", + "get_default_qat_module_mappings", + "get_default_qconfig_propagation_list", + "get_default_compare_output_module_list", + "get_quantized_operator", + "get_fuser_method", + # Sub functions for `prepare` and `swap_module` + "propagate_qconfig_", + "add_quant_dequant", + "swap_module", + "default_eval_fn", + # Observers + "ObserverBase", + "WeightObserver", + "HistogramObserver", + "observer", + "default_observer", + "default_weight_observer", + "default_placeholder_observer", + "default_per_channel_weight_observer", + # FakeQuantize (for qat) + "default_fake_quant", + "default_weight_fake_quant", + "default_fixed_qparams_range_neg1to1_fake_quant", + "default_fixed_qparams_range_0to1_fake_quant", + "default_per_channel_weight_fake_quant", + "default_histogram_fake_quant", + # QConfig + "QConfig", + "default_qconfig", + "default_dynamic_qconfig", + "float16_dynamic_qconfig", + "float_qparams_weight_only_qconfig", + # QAT utilities + "default_qat_qconfig", + "prepare_qat", + "quantize_qat", + # module transformations + "fuse_modules", +] diff --git a/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d17a02d3a831a38e99d0db269b6d13c13dae5ee Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/_numeric_suite.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/_numeric_suite.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..83a86805301611fc8e9aa7c99024f46ea9456d9c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/_numeric_suite.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/_numeric_suite_fx.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/_numeric_suite_fx.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df5e49d0d478e5693d474a5e5e25ea1fd218b179 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/_numeric_suite_fx.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/_quantized_conversions.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/_quantized_conversions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..08bfb237e9eb2c6677560c6489ccfd0c77ee76c2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/_quantized_conversions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/fake_quantize.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/fake_quantize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe88c65a3d7a509dbab74f530d3ddb62dd12edf5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/fake_quantize.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/fuse_modules.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/fuse_modules.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f37deae835fb8f6a2d2dc403d2e01e245c2edeca Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/fuse_modules.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/fuser_method_mappings.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/fuser_method_mappings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6aec63cc9dfcc14fab5abaa791201d67d6757e9e Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/fuser_method_mappings.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/observer.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/observer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7f04e83172419a923c2b1e2c8bfa29ffcab2bbd Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/observer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/qconfig.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/qconfig.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..783a2174e1981bedd4a8a02a406971de4cdca57f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/qconfig.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/quant_type.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/quant_type.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e77e776a66ff58d188e3e7a24995bf2cc6363ec Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/quant_type.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/quantization_mappings.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/quantization_mappings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7273056f75b1e8f0f04a037841bc756c23060339 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/quantization_mappings.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/quantize.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/quantize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..60ad1ff34f77c62c84cd8721443387cae6997443 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/quantize.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/quantize_fx.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/quantize_fx.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e89fef7c53770ed809e1696275209b4e827ddc33 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/quantize_fx.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/quantize_jit.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/quantize_jit.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5afa0c70e50a737f7f7e117743fe727800ec166b Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/quantize_jit.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/stubs.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/stubs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a93a18575af6bdc4f049389a851e019ed4f66f2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/stubs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4b30961e482fb341fac485ae11a56b65f0027565 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/quantization/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/quantization/_numeric_suite.py b/venv/lib/python3.10/site-packages/torch/quantization/_numeric_suite.py new file mode 100644 index 0000000000000000000000000000000000000000..49ccc8e69523f7dbee2335b788a2cb3a7db618a2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/quantization/_numeric_suite.py @@ -0,0 +1,28 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +`torch/ao/ns/_numeric_suite.py`, while adding an import statement +here. +""" + +from torch.ao.ns._numeric_suite import ( + _convert_tuple_to_list, + _dequantize_tensor_list, + _find_match, + _get_logger_dict_helper, + _is_identical_module_type, + compare_model_outputs, + compare_model_stub, + compare_weights, + get_logger_dict, + get_matching_activations, + Logger, + NON_LEAF_MODULE_TO_ADD_OBSERVER_ALLOW_LIST, + OutputLogger, + prepare_model_outputs, + prepare_model_with_stubs, + Shadow, + ShadowLogger, +) diff --git a/venv/lib/python3.10/site-packages/torch/quantization/_numeric_suite_fx.py b/venv/lib/python3.10/site-packages/torch/quantization/_numeric_suite_fx.py new file mode 100644 index 0000000000000000000000000000000000000000..55cd7085740d0ce8de79491acbfc4888ebba21f8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/quantization/_numeric_suite_fx.py @@ -0,0 +1,26 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +`torch/ao/ns/_numeric_suite_fx.py`, while adding an import statement +here. +""" + +from torch.ao.ns._numeric_suite_fx import ( + _add_loggers_impl, + _add_loggers_one_model, + _add_shadow_loggers_impl, + _extract_logger_info_one_model, + _extract_weights_impl, + _extract_weights_one_model, + add_loggers, + add_shadow_loggers, + extend_logger_results_with_comparison, + extract_logger_info, + extract_shadow_logger_info, + extract_weights, + NSTracer, + OutputLogger, + RNNReturnType, +) diff --git a/venv/lib/python3.10/site-packages/torch/quantization/_quantized_conversions.py b/venv/lib/python3.10/site-packages/torch/quantization/_quantized_conversions.py new file mode 100644 index 0000000000000000000000000000000000000000..2b7670ea48026f22d040b7d1c73e9330ee9ece3e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/quantization/_quantized_conversions.py @@ -0,0 +1,132 @@ +import torch + + +# Pack pairs of int4 values into int8, in row major order; first int4 +# value goes into lower order bits, and second int4 value into higher +# order bits of resulting int8 value. +def pack_int4_to_int8(weight): + assert weight.dim() == 2 + assert weight.shape[1] % 2 == 0 + assert weight.dtype == torch.int8 + return ((weight[:, 1::2] & 0xF) << 4) | (weight[:, 0::2] & 0xF) + + +# Unpack quandruples of bits in int8 values into int4 values, in row +# major order; lower 4 bits go into first int4 value goes, and upper 4 +# bits go into second int4 value. +def unpack_int8_to_int4(weight): + assert weight.dim() == 2 + assert weight.dtype == torch.int8 + return torch.stack((weight & 0xF, (weight >> 4) & 0xF), dim=2).view( + weight.shape[0], 2 * weight.shape[1] + ) + + +# Transpose the weight matrix, and then reorder its elements according +# to underlying requirements of CUTLASS library, so that it could be +# used for CUTLASS-based mixed datatypes linear operation. +def quantized_weight_reorder_for_mixed_dtypes_linear_cutlass( + weight, dtypeq, transpose=False +): + assert weight.dim() == 2 + assert weight.dtype == torch.int8 + assert dtypeq == torch.int8 or dtypeq == torch.quint4x2 + assert weight.device.type == "cuda" + + device = weight.device + + # subbyte_transpose + if not transpose: + if dtypeq == torch.int8: + outp = weight.T + elif dtypeq == torch.quint4x2: + outp = pack_int4_to_int8(unpack_int8_to_int4(weight.view(torch.int8)).T) + else: + outp = weight + + ncols, nrows = outp.shape # type: ignore[possibly-undefined] + assert nrows % (32 if dtypeq == torch.quint4x2 else 64) == 0 + assert ncols % 64 == 0 + + # permute_B_rows_for_mixed_gemm + # (permute cols actually, as transpose is applied first here) + if dtypeq == torch.quint4x2: + cols_permuted = ( + torch.tensor( + [0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15], + device=device, + ) + + (torch.arange(0, nrows // 16, device=device).reshape(-1, 1) * 16).expand( + nrows // 16, 16 + ) + ).view(-1) + else: + cols_permuted = ( + torch.tensor( + [0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 14, 15], + device=device, + ) + + (torch.arange(0, nrows // 16, device=device).reshape(-1, 1) * 16).expand( + nrows // 16, 16 + ) + ).view(-1) + outp = outp.index_copy(1, cols_permuted, outp) + + # interleave_column_major_tensor + magic0 = 4 if dtypeq == torch.quint4x2 else 2 + magic1 = 32 // magic0 + + tmp0 = ( + (torch.arange(0, ncols // magic0, device=device) * (nrows // 4 * magic0)) + .view(-1, 1) + .repeat(1, nrows // 4 * magic0) + .view(-1) + ) + tmp1 = ( + (torch.arange(0, nrows // 4 // magic1, device=device) * (magic0 * magic1)) + .view(-1, 1) + .repeat(1, magic1) + .view(-1) + .repeat(ncols) + ) + tmp2 = ( + (torch.arange(0, magic0, device=device) * magic1) + .view(-1, 1) + .repeat(1, nrows // 4) + .view(-1) + .repeat(ncols // magic0) + ) + tmp3 = torch.arange(0, magic1, device=device).repeat(nrows // 4 * ncols // magic1) + + outp_offsets = tmp0 + tmp1 + tmp2 + tmp3 + + tmp = outp.view(-1).view(torch.int32) + outp = torch.zeros_like(tmp) + outp.scatter_(0, outp_offsets, tmp) + outp = outp.view(weight.dtype) + + # add_bias_and_interleave_quantized_tensor_inplace + tmp = outp.view(-1) + + outp = torch.empty_like(tmp) + if dtypeq == torch.int8: + tmp = (tmp.to(torch.int) + 128).to(tmp.dtype) + outp[0::4] = tmp[0::4] + outp[1::4] = tmp[2::4] + outp[2::4] = tmp[1::4] + outp[3::4] = tmp[3::4] + elif dtypeq == torch.quint4x2: + tmp0 = ((tmp & 0xF) + 8) & 0xF + tmp0 = (tmp0[1::2] << 4) | tmp0[0::2] + tmp1 = (((tmp >> 4) & 0xF) + 8) & 0xF + tmp1 = (tmp1[1::2] << 4) | tmp1[0::2] + outp[0::4] = tmp0[0::2] + outp[1::4] = tmp0[1::2] + outp[2::4] = tmp1[0::2] + outp[3::4] = tmp1[1::2] + + if dtypeq == torch.quint4x2: + nrows *= 2 + ncols //= 2 + + return outp.view(nrows, ncols).view(torch.uint8) diff --git a/venv/lib/python3.10/site-packages/torch/quantization/fake_quantize.py b/venv/lib/python3.10/site-packages/torch/quantization/fake_quantize.py new file mode 100644 index 0000000000000000000000000000000000000000..69a5d730bfb68e89e24beb04ad13fd3fa5881ae9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/quantization/fake_quantize.py @@ -0,0 +1,32 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +`torch/ao/quantization/fake_quantize.py`, while adding an import statement +here. +""" + +from torch.ao.quantization.fake_quantize import ( + _is_fake_quant_script_module, + _is_per_channel, + _is_per_tensor, + _is_symmetric_quant, + default_fake_quant, + default_fixed_qparams_range_0to1_fake_quant, + default_fixed_qparams_range_neg1to1_fake_quant, + default_fused_act_fake_quant, + default_fused_per_channel_wt_fake_quant, + default_fused_wt_fake_quant, + default_histogram_fake_quant, + default_per_channel_weight_fake_quant, + default_weight_fake_quant, + disable_fake_quant, + disable_observer, + enable_fake_quant, + enable_observer, + FakeQuantize, + FakeQuantizeBase, + FixedQParamsFakeQuantize, + FusedMovingAvgObsFakeQuantize, +) diff --git a/venv/lib/python3.10/site-packages/torch/quantization/fuse_modules.py b/venv/lib/python3.10/site-packages/torch/quantization/fuse_modules.py new file mode 100644 index 0000000000000000000000000000000000000000..6b704fa8094e8b367e9eba47102863ba845415b9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/quantization/fuse_modules.py @@ -0,0 +1,22 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +`torch/ao/quantization/fuse_modules.py`, while adding an import statement +here. +""" + +# TODO: These functions are not used outside the `fuse_modules.py` +# Keeping here for now, need to remove them later. +from torch.ao.quantization.fuse_modules import ( + _fuse_modules, + _get_module, + _set_module, + fuse_known_modules, + fuse_modules, + get_fuser_method, +) + +# for backward compatiblity +from torch.ao.quantization.fuser_method_mappings import fuse_conv_bn, fuse_conv_bn_relu diff --git a/venv/lib/python3.10/site-packages/torch/quantization/fuser_method_mappings.py b/venv/lib/python3.10/site-packages/torch/quantization/fuser_method_mappings.py new file mode 100644 index 0000000000000000000000000000000000000000..cfb13ac96271fa7b926cc703918984760e6ede15 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/quantization/fuser_method_mappings.py @@ -0,0 +1,15 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +`torch/ao/quantization/fuser_method_mappings.py`, while adding an import statement +here. +""" +from torch.ao.quantization.fuser_method_mappings import ( + _DEFAULT_OP_LIST_TO_FUSER_METHOD, + fuse_conv_bn, + fuse_conv_bn_relu, + fuse_linear_bn, + get_fuser_method, +) diff --git a/venv/lib/python3.10/site-packages/torch/quantization/fx/__init__.py b/venv/lib/python3.10/site-packages/torch/quantization/fx/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c01cbd457374c27e40b07daca5ae1644a701767d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/quantization/fx/__init__.py @@ -0,0 +1,15 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate files under `torch/ao/quantization/fx/`, while adding an import statement +here. +""" + +from torch.ao.quantization.fx.convert import convert +from torch.ao.quantization.fx.fuse import fuse + +# omitting files that's unlikely to be used right now, for example +# the newly added lower_to_fbgemm etc. +from torch.ao.quantization.fx.prepare import prepare diff --git a/venv/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c0aee1f0520b185b1be95494c4692e8fc5bc1bf0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/_equalize.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/_equalize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de821a59014fa65954091d0e9cc8b33239e46271 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/_equalize.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/convert.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/convert.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..209d1f979b012fb37fedeb578be9e958d2375c12 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/convert.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/fuse.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/fuse.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e73f51f37846815a95d9793cc207c72fc434e6aa Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/fuse.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/fusion_patterns.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/fusion_patterns.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b36efef40754cc62f0eb478c28c24bcc2c5f646 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/fusion_patterns.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/graph_module.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/graph_module.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c777e226d88513b030a70eeb6061ab8f67fd7a4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/graph_module.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/match_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/match_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b73462e162db571a915356a42d732fb0c489731c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/match_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/pattern_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/pattern_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf2015f9952aaa99deb83df23eb6ee425a621b5d Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/pattern_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/prepare.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/prepare.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..931214013a19550d38ad06f24cd986e14cced27b Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/prepare.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/quantization_patterns.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/quantization_patterns.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c18211ac17c8e2617e2f9e1f9e00d09dd905792a Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/quantization_patterns.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/quantization_types.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/quantization_types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..807915fc273b4fbbb5065aeda1cf6650d09b4d43 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/quantization_types.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..236fccb069fb421ed174ca325bb240709e9a6209 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/quantization/fx/_equalize.py b/venv/lib/python3.10/site-packages/torch/quantization/fx/_equalize.py new file mode 100644 index 0000000000000000000000000000000000000000..7acea4f84a2a0a82f134b6790e573f8f1cb677f2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/quantization/fx/_equalize.py @@ -0,0 +1,38 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate files under `torch/ao/quantization/fx/`, while adding an import statement +here. +""" +from torch.ao.quantization.fx._equalize import ( + _convert_equalization_ref, + _InputEqualizationObserver, + _WeightEqualizationObserver, + calculate_equalization_scale, + clear_weight_quant_obs_node, + convert_eq_obs, + CUSTOM_MODULE_SUPP_LIST, + custom_module_supports_equalization, + default_equalization_qconfig, + EqualizationQConfig, + fused_module_supports_equalization, + get_equalization_qconfig_dict, + get_layer_sqnr_dict, + get_op_node_and_weight_eq_obs, + input_equalization_observer, + is_equalization_observer, + maybe_get_next_equalization_scale, + maybe_get_next_input_eq_obs, + maybe_get_weight_eq_obs_node, + nn_module_supports_equalization, + node_supports_equalization, + remove_node, + reshape_scale, + scale_input_observer, + scale_weight_functional, + scale_weight_node, + update_obs_for_equalization, + weight_equalization_observer, +) diff --git a/venv/lib/python3.10/site-packages/torch/quantization/fx/convert.py b/venv/lib/python3.10/site-packages/torch/quantization/fx/convert.py new file mode 100644 index 0000000000000000000000000000000000000000..9d6ac350602bb7a97c773a3a09fec0780483379f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/quantization/fx/convert.py @@ -0,0 +1,9 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate files under `torch/ao/quantization/fx/`, while adding an import statement +here. +""" +from torch.ao.quantization.fx.convert import convert diff --git a/venv/lib/python3.10/site-packages/torch/quantization/fx/fuse.py b/venv/lib/python3.10/site-packages/torch/quantization/fx/fuse.py new file mode 100644 index 0000000000000000000000000000000000000000..67527080304fb31ddc54fe254533e2196f77a616 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/quantization/fx/fuse.py @@ -0,0 +1,9 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate files under `torch/ao/quantization/fx/`, while adding an import statement +here. +""" +from torch.ao.quantization.fx.fuse import fuse diff --git a/venv/lib/python3.10/site-packages/torch/quantization/fx/fusion_patterns.py b/venv/lib/python3.10/site-packages/torch/quantization/fx/fusion_patterns.py new file mode 100644 index 0000000000000000000000000000000000000000..e29337b3f861e5b54dc9f37d39d12ad975ad1315 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/quantization/fx/fusion_patterns.py @@ -0,0 +1,9 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate files under `torch/ao/quantization/fx/`, while adding an import statement +here. +""" +from torch.ao.quantization.fx.fuse_handler import DefaultFuseHandler, FuseHandler diff --git a/venv/lib/python3.10/site-packages/torch/quantization/fx/graph_module.py b/venv/lib/python3.10/site-packages/torch/quantization/fx/graph_module.py new file mode 100644 index 0000000000000000000000000000000000000000..a71e980a57ba141bdc5bbe9b283d69582eb8fd82 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/quantization/fx/graph_module.py @@ -0,0 +1,17 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate files under `torch/ao/quantization/fx/`, while adding an import statement +here. +""" +from torch.ao.quantization.fx.graph_module import ( + _is_observed_module, + _is_observed_standalone_module, + FusedGraphModule, + GraphModule, + ObservedGraphModule, + ObservedStandaloneGraphModule, + QuantizedGraphModule, +) diff --git a/venv/lib/python3.10/site-packages/torch/quantization/fx/match_utils.py b/venv/lib/python3.10/site-packages/torch/quantization/fx/match_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8b49f7c645d8d1bc3a154d62a1295a90b155f986 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/quantization/fx/match_utils.py @@ -0,0 +1,14 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate files under `torch/ao/quantization/fx/`, while adding an import statement +here. +""" +from torch.ao.quantization.fx.match_utils import ( + _find_matches, + _is_match, + _MatchResult, + MatchAllNode, +) diff --git a/venv/lib/python3.10/site-packages/torch/quantization/fx/pattern_utils.py b/venv/lib/python3.10/site-packages/torch/quantization/fx/pattern_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..26954833bb48eb5a807ac31cc558c5282cb63201 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/quantization/fx/pattern_utils.py @@ -0,0 +1,34 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate files under `torch/ao/quantization/fx/`, while adding an import statement +here. +""" +from torch.ao.quantization.fx.pattern_utils import ( + _register_fusion_pattern, + _register_quant_pattern, + get_default_fusion_patterns, + get_default_output_activation_post_process_map, + get_default_quant_patterns, + QuantizeHandler, +) + +# QuantizeHandler.__module__ = _NAMESPACE +_register_fusion_pattern.__module__ = "torch.ao.quantization.fx.pattern_utils" +get_default_fusion_patterns.__module__ = "torch.ao.quantization.fx.pattern_utils" +_register_quant_pattern.__module__ = "torch.ao.quantization.fx.pattern_utils" +get_default_quant_patterns.__module__ = "torch.ao.quantization.fx.pattern_utils" +get_default_output_activation_post_process_map.__module__ = ( + "torch.ao.quantization.fx.pattern_utils" +) + +# __all__ = [ +# "QuantizeHandler", +# "_register_fusion_pattern", +# "get_default_fusion_patterns", +# "_register_quant_pattern", +# "get_default_quant_patterns", +# "get_default_output_activation_post_process_map", +# ] diff --git a/venv/lib/python3.10/site-packages/torch/quantization/fx/prepare.py b/venv/lib/python3.10/site-packages/torch/quantization/fx/prepare.py new file mode 100644 index 0000000000000000000000000000000000000000..ca65dcc04dd0021f0065892ca86e209a1c218473 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/quantization/fx/prepare.py @@ -0,0 +1,9 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate files under `torch/ao/quantization/fx/`, while adding an import statement +here. +""" +from torch.ao.quantization.fx.prepare import prepare diff --git a/venv/lib/python3.10/site-packages/torch/quantization/fx/quantization_patterns.py b/venv/lib/python3.10/site-packages/torch/quantization/fx/quantization_patterns.py new file mode 100644 index 0000000000000000000000000000000000000000..34ee88a4713c5d7016d8a50193555b6ec7c3dfe2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/quantization/fx/quantization_patterns.py @@ -0,0 +1,47 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate files under `torch/ao/quantization/fx/`, while adding an import statement +here. +""" +from torch.ao.quantization.fx.quantize_handler import ( + BatchNormQuantizeHandler, + BinaryOpQuantizeHandler, + CatQuantizeHandler, + ConvReluQuantizeHandler, + CopyNodeQuantizeHandler, + CustomModuleQuantizeHandler, + DefaultNodeQuantizeHandler, + EmbeddingQuantizeHandler, + FixedQParamsOpQuantizeHandler, + GeneralTensorShapeOpQuantizeHandler, + LinearReLUQuantizeHandler, + QuantizeHandler, + RNNDynamicQuantizeHandler, + StandaloneModuleQuantizeHandler, +) + +QuantizeHandler.__module__ = "torch.ao.quantization.fx.quantization_patterns" +BinaryOpQuantizeHandler.__module__ = "torch.ao.quantization.fx.quantization_patterns" +CatQuantizeHandler.__module__ = "torch.ao.quantization.fx.quantization_patterns" +ConvReluQuantizeHandler.__module__ = "torch.ao.quantization.fx.quantization_patterns" +LinearReLUQuantizeHandler.__module__ = "torch.ao.quantization.fx.quantization_patterns" +BatchNormQuantizeHandler.__module__ = "torch.ao.quantization.fx.quantization_patterns" +EmbeddingQuantizeHandler.__module__ = "torch.ao.quantization.fx.quantization_patterns" +RNNDynamicQuantizeHandler.__module__ = "torch.ao.quantization.fx.quantization_patterns" +DefaultNodeQuantizeHandler.__module__ = "torch.ao.quantization.fx.quantization_patterns" +FixedQParamsOpQuantizeHandler.__module__ = ( + "torch.ao.quantization.fx.quantization_patterns" +) +CopyNodeQuantizeHandler.__module__ = "torch.ao.quantization.fx.quantization_patterns" +CustomModuleQuantizeHandler.__module__ = ( + "torch.ao.quantization.fx.quantization_patterns" +) +GeneralTensorShapeOpQuantizeHandler.__module__ = ( + "torch.ao.quantization.fx.quantization_patterns" +) +StandaloneModuleQuantizeHandler.__module__ = ( + "torch.ao.quantization.fx.quantization_patterns" +) diff --git a/venv/lib/python3.10/site-packages/torch/quantization/fx/quantization_types.py b/venv/lib/python3.10/site-packages/torch/quantization/fx/quantization_types.py new file mode 100644 index 0000000000000000000000000000000000000000..a422cdd3142e04c8d16f495cc6cd65823451810b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/quantization/fx/quantization_types.py @@ -0,0 +1,9 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate files under `torch/ao/quantization/fx/`, while adding an import statement +here. +""" +from torch.ao.quantization.utils import Pattern, QuantizerCls diff --git a/venv/lib/python3.10/site-packages/torch/quantization/fx/utils.py b/venv/lib/python3.10/site-packages/torch/quantization/fx/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ef35559884b7c430f1d5c72b21f72979108469a5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/quantization/fx/utils.py @@ -0,0 +1,20 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate files under `torch/ao/quantization/fx/`, while adding an import statement +here. +""" +from torch.ao.quantization.fx.utils import ( + all_node_args_have_no_tensors, + assert_and_get_unique_device, + create_getattr_from_value, + get_custom_module_class_keys, + get_linear_prepack_op_for_dtype, + get_new_attr_name_with_prefix, + get_non_observable_arg_indexes_and_types, + get_qconv_prepack_op, + graph_module_from_producer_nodes, + maybe_get_next_module, +) diff --git a/venv/lib/python3.10/site-packages/torch/quantization/observer.py b/venv/lib/python3.10/site-packages/torch/quantization/observer.py new file mode 100644 index 0000000000000000000000000000000000000000..6e6c7c1917c83433fc19f016140b25d060284535 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/quantization/observer.py @@ -0,0 +1,36 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +`torch/ao/quantization/observer.py`, while adding an import statement +here. +""" +from torch.ao.quantization.observer import ( + _is_activation_post_process, + _is_per_channel_script_obs_instance, + _ObserverBase, + _PartialWrapper, + _with_args, + _with_callable_args, + ABC, + default_debug_observer, + default_dynamic_quant_observer, + default_float_qparams_observer, + default_histogram_observer, + default_observer, + default_per_channel_weight_observer, + default_placeholder_observer, + default_weight_observer, + get_observer_state_dict, + HistogramObserver, + load_observer_state_dict, + MinMaxObserver, + MovingAverageMinMaxObserver, + MovingAveragePerChannelMinMaxObserver, + NoopObserver, + ObserverBase, + PerChannelMinMaxObserver, + PlaceholderObserver, + RecordingObserver, +) diff --git a/venv/lib/python3.10/site-packages/torch/quantization/qconfig.py b/venv/lib/python3.10/site-packages/torch/quantization/qconfig.py new file mode 100644 index 0000000000000000000000000000000000000000..6bb7e14110cb9cdc4e9c2c418c6776ea6445f0d3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/quantization/qconfig.py @@ -0,0 +1,30 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +`torch/ao/quantization/qconfig.py`, while adding an import statement +here. +""" +from torch.ao.quantization.qconfig import ( + _add_module_to_qconfig_obs_ctr, + _assert_valid_qconfig, + default_activation_only_qconfig, + default_debug_qconfig, + default_dynamic_qconfig, + default_per_channel_qconfig, + default_qat_qconfig, + default_qat_qconfig_v2, + default_qconfig, + default_weight_only_qconfig, + float16_dynamic_qconfig, + float16_static_qconfig, + float_qparams_weight_only_qconfig, + get_default_qat_qconfig, + get_default_qconfig, + per_channel_dynamic_qconfig, + QConfig, + qconfig_equals, + QConfigAny, + QConfigDynamic, +) diff --git a/venv/lib/python3.10/site-packages/torch/quantization/quant_type.py b/venv/lib/python3.10/site-packages/torch/quantization/quant_type.py new file mode 100644 index 0000000000000000000000000000000000000000..8555f03792661f39c85c8facf3f911786cc25d0f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/quantization/quant_type.py @@ -0,0 +1,10 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +`torch/ao/quantization/quant_type.py`, while adding an import statement +here. +""" + +from torch.ao.quantization.quant_type import _get_quant_type_to_str, QuantType diff --git a/venv/lib/python3.10/site-packages/torch/quantization/quantization_mappings.py b/venv/lib/python3.10/site-packages/torch/quantization/quantization_mappings.py new file mode 100644 index 0000000000000000000000000000000000000000..8b44a980ce82fbfa5a81ad906499806cf99b876f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/quantization/quantization_mappings.py @@ -0,0 +1,29 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +`torch/ao/quantization/quantization_mappings.py`, while adding an import statement +here. +""" +from torch.ao.quantization.quantization_mappings import ( + _get_special_act_post_process, + _has_special_act_post_process, + _INCLUDE_QCONFIG_PROPAGATE_LIST, + DEFAULT_DYNAMIC_QUANT_MODULE_MAPPINGS, + DEFAULT_FLOAT_TO_QUANTIZED_OPERATOR_MAPPINGS, + DEFAULT_MODULE_TO_ACT_POST_PROCESS, + DEFAULT_QAT_MODULE_MAPPINGS, + DEFAULT_REFERENCE_STATIC_QUANT_MODULE_MAPPINGS, + DEFAULT_STATIC_QUANT_MODULE_MAPPINGS, + get_default_compare_output_module_list, + get_default_dynamic_quant_module_mappings, + get_default_float_to_quantized_operator_mappings, + get_default_qat_module_mappings, + get_default_qconfig_propagation_list, + get_default_static_quant_module_mappings, + get_dynamic_quant_module_class, + get_quantized_operator, + get_static_quant_module_class, + no_observer_set, +) diff --git a/venv/lib/python3.10/site-packages/torch/quantization/quantize.py b/venv/lib/python3.10/site-packages/torch/quantization/quantize.py new file mode 100644 index 0000000000000000000000000000000000000000..600d3a46fed0346e3ae8909872cd5bf3c733860c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/quantization/quantize.py @@ -0,0 +1,30 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +`torch/ao/quantization/quantize.py`, while adding an import statement +here. +""" + +from torch.ao.quantization.quantize import ( + _add_observer_, + _convert, + _get_observer_dict, + _get_unique_devices_, + _is_activation_post_process, + _observer_forward_hook, + _propagate_qconfig_helper, + _register_activation_post_process_hook, + _remove_activation_post_process, + _remove_qconfig, + add_quant_dequant, + convert, + prepare, + prepare_qat, + propagate_qconfig_, + quantize, + quantize_dynamic, + quantize_qat, + swap_module, +) diff --git a/venv/lib/python3.10/site-packages/torch/quantization/quantize_fx.py b/venv/lib/python3.10/site-packages/torch/quantization/quantize_fx.py new file mode 100644 index 0000000000000000000000000000000000000000..649142c7a7eee9885d96b37f70e582f3ea9a9f8d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/quantization/quantize_fx.py @@ -0,0 +1,26 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +`torch/ao/quantization/quantize_fx.py`, while adding an import statement +here. +""" + +from torch.ao.quantization.fx.graph_module import ObservedGraphModule +from torch.ao.quantization.quantize_fx import ( + _check_is_graph_module, + _convert_fx, + _convert_standalone_module_fx, + _fuse_fx, + _prepare_fx, + _prepare_standalone_module_fx, + _swap_ff_with_fxff, + convert_fx, + fuse_fx, + prepare_fx, + prepare_qat_fx, + QuantizationTracer, + Scope, + ScopeContextManager, +) diff --git a/venv/lib/python3.10/site-packages/torch/quantization/quantize_jit.py b/venv/lib/python3.10/site-packages/torch/quantization/quantize_jit.py new file mode 100644 index 0000000000000000000000000000000000000000..aa627dc7bb51ef7ea1fde7e2e5da283c9f6c8900 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/quantization/quantize_jit.py @@ -0,0 +1,26 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +`torch/ao/quantization/quantize_jit.py`, while adding an import statement +here. +""" + +from torch.ao.quantization.quantize_jit import ( + _check_forward_method, + _check_is_script_module, + _convert_jit, + _prepare_jit, + _prepare_ondevice_dynamic_jit, + _quantize_jit, + convert_dynamic_jit, + convert_jit, + fuse_conv_bn_jit, + prepare_dynamic_jit, + prepare_jit, + quantize_dynamic_jit, + quantize_jit, + script_qconfig, + script_qconfig_dict, +) diff --git a/venv/lib/python3.10/site-packages/torch/quantization/stubs.py b/venv/lib/python3.10/site-packages/torch/quantization/stubs.py new file mode 100644 index 0000000000000000000000000000000000000000..d3fd5c63683dc572c35cabc202ee4ddb2b0053c6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/quantization/stubs.py @@ -0,0 +1,10 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +`torch/ao/quantization/stubs.py`, while adding an import statement +here. +""" + +from torch.ao.quantization.stubs import DeQuantStub, QuantStub, QuantWrapper diff --git a/venv/lib/python3.10/site-packages/torch/quantization/utils.py b/venv/lib/python3.10/site-packages/torch/quantization/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7d51d58f38d7462713f84ab62427852c1dd8e52c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/quantization/utils.py @@ -0,0 +1,29 @@ +# flake8: noqa: F401 +r""" +Utils shared by different modes of quantization (eager/graph) + +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +`torch/ao/quantization/utils.py`, while adding an import statement +here. +""" + +from torch.ao.quantization.utils import ( + activation_dtype, + activation_is_int8_quantized, + activation_is_statically_quantized, + calculate_qmin_qmax, + check_min_max_valid, + get_combined_dict, + get_qconfig_dtypes, + get_qparam_dict, + get_quant_type, + get_swapped_custom_module_class, + getattr_from_fqn, + is_per_channel, + is_per_tensor, + weight_dtype, + weight_is_quantized, + weight_is_statically_quantized, +)