diff --git a/ckpts/universal/global_step120/zero/17.mlp.dense_h_to_4h.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/17.mlp.dense_h_to_4h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..9be60abf5cf2a0cfc7d5ccf319cbbc0efa9e1dc9 --- /dev/null +++ b/ckpts/universal/global_step120/zero/17.mlp.dense_h_to_4h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:25612e6021c8a2f6823366cc30e9f3a49f28c9ededc00355e7e8eea1782cf823 +size 33555627 diff --git a/ckpts/universal/global_step120/zero/26.attention.query_key_value.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/26.attention.query_key_value.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..bf3ca5e012d897e6a8e16085ed0dd672cde9c72a --- /dev/null +++ b/ckpts/universal/global_step120/zero/26.attention.query_key_value.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ea88e8425126684a61bc38a0a113a144c30767b09eb289f7344d2cedabb941b +size 50332828 diff --git a/ckpts/universal/global_step120/zero/5.mlp.dense_h_to_4h.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/5.mlp.dense_h_to_4h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..176cb659d1e5132d899dc9d2d84fbb85f6db2ce7 --- /dev/null +++ b/ckpts/universal/global_step120/zero/5.mlp.dense_h_to_4h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1405102430b95a45777f24793899366dfa3252bb2789c5c3a8086a5cff0e1d40 +size 33555612 diff --git a/ckpts/universal/global_step120/zero/5.mlp.dense_h_to_4h.weight/fp32.pt b/ckpts/universal/global_step120/zero/5.mlp.dense_h_to_4h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..c2ab69ff71aee14bc879dab9d73d1510ac8bded4 --- /dev/null +++ b/ckpts/universal/global_step120/zero/5.mlp.dense_h_to_4h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71023c52f8eb8c1d3f7b088a0fabea788e5623d7c2c1d524be094f58f19f712f +size 33555533 diff --git a/venv/lib/python3.10/site-packages/torch/__pycache__/_VF.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/__pycache__/_VF.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9bd6fcbb682576cb7c82ca5c78ad46dd14f19a40 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/__pycache__/_VF.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/__pycache__/__config__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/__pycache__/__config__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ada75b9a8efca9c20ea9734a92de1805980e9a7a Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/__pycache__/__config__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/__pycache__/__future__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/__pycache__/__future__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bffbad628165a1cb41dab026a85dab7367661b1d Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/__pycache__/__future__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..906cd215e6fae28ef22ac964280453bc45d2a260 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/__pycache__/_appdirs.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/__pycache__/_appdirs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c602c133561fe17c2c62b714f5d37bfbbef8a70 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/__pycache__/_appdirs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/__pycache__/_classes.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/__pycache__/_classes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..47f0089abc0ffedd973002d39efb5db593750bf4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/__pycache__/_classes.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/__pycache__/_compile.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/__pycache__/_compile.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c7e6d91a85959066afce1ad3b96688f7c5bd35dd Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/__pycache__/_compile.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/__pycache__/_custom_ops.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/__pycache__/_custom_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c977f82d5657572d3c8812b4d0bb54aa20c77293 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/__pycache__/_custom_ops.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/__pycache__/_deploy.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/__pycache__/_deploy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..044af993e011e3b32016fd4194d125995a2970e2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/__pycache__/_deploy.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/__pycache__/_guards.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/__pycache__/_guards.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..375d22c5049336fca234d1f4eaca289548704736 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/__pycache__/_guards.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/__pycache__/_jit_internal.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/__pycache__/_jit_internal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd87dc78b612b58935da782ac8b5690ea2818426 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/__pycache__/_jit_internal.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/__pycache__/_linalg_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/__pycache__/_linalg_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c1e3a380f8ea966b7d95c7da2c0eb370add25825 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/__pycache__/_linalg_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/__pycache__/_lobpcg.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/__pycache__/_lobpcg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a4c6b7da55b2ce6111d81c8ed138587466ad85c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/__pycache__/_lobpcg.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/__pycache__/_lowrank.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/__pycache__/_lowrank.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab8d4d3fa8eeabc21f4aa5a47c4372e65c96cdf9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/__pycache__/_lowrank.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/__pycache__/_meta_registrations.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/__pycache__/_meta_registrations.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c54e7169bc50f11e5e12ee3086b2c6380f27128a Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/__pycache__/_meta_registrations.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/__pycache__/_namedtensor_internals.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/__pycache__/_namedtensor_internals.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..06d2e7822c4d51cf64732003e42ec93e73c82f66 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/__pycache__/_namedtensor_internals.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/__pycache__/_ops.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/__pycache__/_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1dc47b496fe32819311adf33bb4dd007f5ad9be2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/__pycache__/_ops.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/__pycache__/_python_dispatcher.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/__pycache__/_python_dispatcher.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e29e5e776492edc2c410369a51acf7a7ba33976 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/__pycache__/_python_dispatcher.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/__pycache__/_sources.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/__pycache__/_sources.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8fe7048d177b453f0451dddf2b663e091a1ce3b9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/__pycache__/_sources.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/__pycache__/_storage_docs.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/__pycache__/_storage_docs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eaa9cc09849cbab7325c8e5a0f1ab20937e7e51e Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/__pycache__/_storage_docs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/__pycache__/_streambase.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/__pycache__/_streambase.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8e7424af75c128da065682b780a9465ae75013a Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/__pycache__/_streambase.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/__pycache__/_tensor.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/__pycache__/_tensor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bfc2afe3d35bfab14f0bfee5ae04f91724a07721 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/__pycache__/_tensor.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/__pycache__/_tensor_docs.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/__pycache__/_tensor_docs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..298bbfea5ca22396513b9e66a91411f52c8c4f14 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/__pycache__/_tensor_docs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/__pycache__/_tensor_str.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/__pycache__/_tensor_str.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..000d91fe31f940ab8cf3cd3f6d1cd527abc66497 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/__pycache__/_tensor_str.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/__pycache__/_torch_docs.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/__pycache__/_torch_docs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b98b134cc5833eead0b79b6c80315f3065de2427 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/__pycache__/_torch_docs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/__pycache__/_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00f64a5b1f3caf88b6ecd84763777b49eff9cb7a Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/__pycache__/_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/__pycache__/_utils_internal.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/__pycache__/_utils_internal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2fb51c4e0888af62c455b88c84517f9606f3e3fd Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/__pycache__/_utils_internal.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/__pycache__/_vmap_internals.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/__pycache__/_vmap_internals.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1aa1cb3ded6c91d8a8598ef8ab1003adf793668b Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/__pycache__/_vmap_internals.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/__pycache__/_weights_only_unpickler.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/__pycache__/_weights_only_unpickler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d53945937e729a2732402681072ef627fb2a6778 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/__pycache__/_weights_only_unpickler.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/__pycache__/functional.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/__pycache__/functional.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf67fe22194807d9e4d74615d601bdbf9f272458 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/__pycache__/functional.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/__pycache__/hub.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/__pycache__/hub.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89bed0161e315504d9b7efb560a2aced3a18ac20 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/__pycache__/hub.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/__pycache__/library.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/__pycache__/library.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..99472c79387c879e7df0b59392722b7a6a4a2140 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/__pycache__/library.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/__pycache__/overrides.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/__pycache__/overrides.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c7f2cc1d11548b9f29fb76f893c681f87fb129b Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/__pycache__/overrides.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/__pycache__/quasirandom.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/__pycache__/quasirandom.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc3734dc59c18d85ee4a5cd192e3af4d9c6c76d7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/__pycache__/quasirandom.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/__pycache__/random.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/__pycache__/random.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c98bd2d48260990899b7b9440d558bf15c29f46 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/__pycache__/random.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/__pycache__/return_types.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/__pycache__/return_types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc93afe8e7f977d0be7846a2f2a6e52dcff565fd Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/__pycache__/return_types.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/__pycache__/serialization.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/__pycache__/serialization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc4479124263693b1f5a9cd515c12c927b5a58af Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/__pycache__/serialization.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/__pycache__/storage.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/__pycache__/storage.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b910ff9792fc1c6a8b85cd311a020d39d67410df Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/__pycache__/storage.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/__pycache__/torch_version.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/__pycache__/torch_version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff63a3c19862fb22ff225b350505e10c6b931912 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/__pycache__/torch_version.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/__pycache__/types.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/__pycache__/types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2ac03f576735d905c5d1eae40c55495e50ccbbf Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/__pycache__/types.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/__pycache__/version.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/__pycache__/version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc6a63e42eb0c9e12fffe0ad21dd4d0605e8fc92 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/__pycache__/version.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_prims/__init__.py b/venv/lib/python3.10/site-packages/torch/_prims/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..106ab5cc81a3635124c65d37f60696874aa26cfe --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_prims/__init__.py @@ -0,0 +1,3031 @@ +import contextlib +import itertools +import operator +import weakref +from enum import Enum +from functools import partial, reduce +from typing import Any, Callable, List, Optional, Sequence, Tuple, Type, Union + +import torch + +import torch._prims_common as utils +import torch.library +from torch import sym_float, Tensor, TypedStorage +from torch._C import _get_default_device +from torch._prims.debug_prims import register_debug_prims +from torch._prims.rng_prims import register_rng_prims +from torch._prims_common import ( + Dim, + DimsSequenceType, + DimsType, + IntLike, + Number, + NumberType, + RETURN_TYPE, + ShapeType, + StrideType, + TensorLike, + TensorLikeType, + type_to_dtype, +) +from torch._prims_common.wrappers import backwards_not_supported +from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode +from torch.overrides import handle_torch_function, has_torch_function +from torch.utils._pytree import tree_flatten, tree_map, tree_unflatten + +prim = torch.library.Library("prims", "DEF") +prim_impl = torch.library.Library("prims", "IMPL", "CompositeExplicitAutograd") +prim_backend_select_impl = torch.library.Library("prims", "IMPL", "BackendSelect") +prim_autograd_impl = torch.library.Library("prims", "IMPL", "Autograd") +prim_meta_impl = torch.library.Library("prims", "IMPL", "Meta") + +# Experimental module containing prototype "primitive" operations. + +__all__ = [ + # + # Common datastructures and helpers + # + "RETURN_TYPE", + # + # Elementwise unary prims + # + "abs", + "acos", + "acosh", + "asin", + "asinh", + "atan", + "atanh", + "cos", + "cosh", + "bessel_i0", + "bessel_i0e", + "bessel_i1", + "bessel_i1e", + "bessel_j0", + "bessel_j1", + "bitwise_not", + "cbrt", + "ceil", + "conj_physical", + "digamma", + "erf", + "erf_inv", + "erfc", + "erfcx", + "exp", + "expm1", + "exp2", + "fill", + "floor", + "imag", + "isfinite", + "lgamma", + "log", + "log1p", + "log2", + "log10", + "ndtri", + "neg", + "real", + "reciprocal", + "round", + "sign", + "signbit", + "sin", + "sinh", + "spherical_bessel_j0", + "sqrt", + "tan", + "tanh", + "trunc", + # + # Elementwise binary prims + # + "add", + "atan2", + "bitwise_and", + "bitwise_or", + "bitwise_xor", + # 'complex', # needs custom meta + "div", + "eq", + "fmax", + "fmin", + "fmod", + "frexp", + "gcd", + "ge", + "gt", + "hypot", + "igamma", + "igammac", + "le", + "lt", + "maximum", + "minimum", + "mul", + "ne", + "nextafter", + "pow", + "remainder", + "rsqrt", + "shift_left", + "shift_right_arithmetic", + "shift_right_logical", # not implemented + "sub", + "zeta", + # + # View prims + # + "as_strided", + "broadcast_in_dim", + "collapse_view", + "conj", + "expand_dims", + "slice", + "slice_in_dim", # implemented using slice -- make this a ref? + "split_dim", + "squeeze", + "transpose", + "view_of", + "view_element_type", + # + # Functionalized view mutations + # + "as_strided_scatter", + # + # Shape prims + # + "collapse", + "cat", + "reshape", + "rev", + # + # Conditional prims + # + "where", + # + # Data conversion and movement prims + # + "clone", + "convert_element_type", + "device_put", + "item", + "maximum_value", + "minimum_value", + "copy_strided", + # + # Inplace prims + # + "copy_to", + "resize", + # "_set", # Commented out, see note below + # + # Reduction prims + # + "amax", + "amin", + "prod", + "sum", + "xor_sum", + "var", + # + # Tensor Creation Prims + # + "empty_strided", + "empty_permuted", + "scalar_tensor", + "iota", + # + # Linear algebra (linalg) Prims + # + "svd", + # + # Randomness Prims + # + "normal", + "_uniform_helper", + # + # FFT prims + # + "fft_r2c", + "fft_c2c", + "fft_c2r", +] + + +def TensorMeta( + tensorlike: Optional[Union[NumberType, torch.Tensor]] = None, + *, + shape: Optional[ShapeType] = None, + strides: Optional[StrideType] = None, + dtype: Optional[torch.dtype] = None, + device: Optional[Union[torch.device, str]] = None, +): + if isinstance(tensorlike, Number): + assert not shape and (shape is None or isinstance(shape, Sequence)) + assert not strides and (strides is None or isinstance(strides, Sequence)) + inferred_shape: Tuple[int, ...] = () + inferred_strides: Tuple[int, ...] = () + inferred_dtype = type_to_dtype(type(tensorlike)) + inferred_device = torch.device("cpu") + # TODO: This looks wrong, a number that is wrapped into a tensor + # needs to behave differently than a scalar tensor for type + # promotion purposes + elif tensorlike is not None: + assert isinstance(tensorlike, torch.Tensor) + inferred_shape = tuple(tensorlike.shape) + inferred_strides = tuple(tensorlike.stride()) + inferred_dtype = tensorlike.dtype + inferred_device = tensorlike.device + else: + # If no tensorlike "example" is given then all metadata + # must be provided explicitly + assert shape is not None + assert strides is not None + assert dtype is not None + assert device is not None + + shape = inferred_shape if shape is None else tuple(shape) # type: ignore[possibly-undefined] + strides = inferred_strides if strides is None else tuple(strides) # type: ignore[possibly-undefined] + dtype = inferred_dtype if dtype is None else dtype # type: ignore[possibly-undefined] + device = inferred_device if device is None else device # type: ignore[possibly-undefined] + + if isinstance(device, str): + device = torch.device(device) + + return torch.empty_strided(shape, strides, dtype=dtype, device=device) + + +def _make_prim( + *, + schema: str, + return_type: Union[RETURN_TYPE, Tuple[RETURN_TYPE, ...]], + meta: Callable, + impl_aten: Callable, + doc: str, + tags: Optional[Sequence[torch.Tag]] = None, +): + """ + Creates a primitive operation. + + """ + + prim.define(schema, tags=torch.Tag.pt2_compliant_tag) + + def _prim_impl(*args, **kwargs): + # always run the meta function because aten implementation will + # typically accept more inputs (e.g., it will do promotion and + # broadcasting) which we want to reject + meta(*args, **kwargs) + return impl_aten(*args, **kwargs) + + # Right now prims don't support autograd (we can and should add an + # argument that provides an implementation for backward here.) Because we + # don't have derivative formulas, we must setup a custom autograd function + # that raises an error if backwards is invoked + def _autograd_impl(*args, **kwargs): + return backwards_not_supported(_prim)(*args, **kwargs) + + def _backend_select_impl(*args, **kwargs): + if kwargs.get("device") and kwargs["device"].type == "meta": + return meta(*args, **kwargs) + if any(isinstance(x, torch.device) and x.type == "meta" for x in args): + return meta(*args, **kwargs) + else: + return _prim_impl(*args, **kwargs) + + name = schema.split("(")[0] + prim_impl.impl(name, _prim_impl) + prim_autograd_impl.impl(name, _autograd_impl) + prim_meta_impl.impl(name, meta) + + _prim_packet = getattr(torch._ops.ops.prims, name) + _prim = _prim_packet.default + if tags: + _prim._tags = tags + + from torch._subclasses.fake_tensor import contains_tensor_types + + if not any(contains_tensor_types(a.type) for a in _prim._schema.arguments) or str( + _prim + ) in [ + # See https://github.com/pytorch/pytorch/issues/103532 + "prims.device_put.default" + ]: + prim_backend_select_impl.impl(name, _backend_select_impl) + + for p in (_prim_packet, _prim): + p.__doc__ = doc + p.return_type = return_type # type: ignore[attr-defined] + + p.schema = schema + p.prim_impl = _prim_impl + p.prim_meta_impl = meta + p.impl_aten = impl_aten + + return _prim + + +class ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND(Enum): + DEFAULT = (0,) + INT_TO_FLOAT = (2,) + ALWAYS_BOOL = (3,) + COMPLEX_TO_FLOAT = (4,) + + +# TODO: implement dtype validation here, too, or on the corresponding refs +def _prim_elementwise_meta( + *args, + type_promotion: ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND, + args_with_fixed_dtypes: Optional[Tuple[TensorLikeType, ...]] = None, +) -> FakeTensor: + """ + Meta function for elementwise operations that produce outputs in the same dtype + as their inputs. + + Stride logic is currently incorrect. + """ + + assert len(args) > 0 + + utils.check_same_dtype(*args) + + args_ = list(args) + if args_with_fixed_dtypes is not None: + args_ = list(args_with_fixed_dtypes) + args_ + + utils.check_same_device(*args_, allow_cpu_scalar_tensors=True) + utils.check_same_shape(*args_, allow_cpu_scalar_tensors=True) + + l2p_perm = utils.compute_elementwise_output_logical_to_physical_perm(*args_) + shape = utils.extract_shape(*args_, allow_cpu_scalar_tensors=True) + + # Acquires the dtype + dtype = None + scalar_type = None + for arg in args: + if isinstance(arg, TensorLike): + if not utils.is_cpu_scalar_tensor(arg): + dtype = arg.dtype + break + else: + dtype = arg.dtype + elif isinstance(arg, Number): + scalar_type = type(arg) + + if dtype is None and scalar_type is not None: + dtype = utils.type_to_dtype(scalar_type) + + # Acquires the device (if it exists) or number + device = None + number = None + for arg in args_: + if isinstance(arg, TensorLike): + if utils.is_cpu_scalar_tensor(arg): + if device is None: + device = arg.device + # keep going, in case there is a cuda tensor later + else: + device = arg.device + break + + elif isinstance(arg, Number): + if number is None: + number = arg + + # NOTE: type promotion behavior here is mostly hidden from tests because + # references will typically handle the type promotion properly even if this doesn't + # (but getting it wrong will cause too many casts to be inserted in traces!) + if device is not None: + assert dtype is not None + if type_promotion == ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT: + dtype = dtype + elif type_promotion == ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.ALWAYS_BOOL: + dtype = torch.bool + elif type_promotion == ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.INT_TO_FLOAT: + if utils.is_integer_dtype(dtype) or utils.is_boolean_dtype(dtype): + dtype = torch.get_default_dtype() + elif type_promotion == ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT: + if utils.is_complex_dtype(dtype): + dtype = utils.corresponding_real_dtype(dtype) + else: + dtype = dtype + + assert shape is not None + return torch.empty_permuted(shape, l2p_perm, device=device, dtype=dtype) # type: ignore[return-value] + + # Number case + # TODO: fix number type promotion (bool, complex->float) + + # For now for symint/float, just implementing the common / simple cases of (int,float,symint,symfloat) + seen_float = False + if isinstance(number, (torch.SymInt, torch.SymFloat)): + for a in args: + assert isinstance(a, (int, float, torch.SymInt, torch.SymFloat)), "NYI" + seen_float = seen_float or isinstance(a, (float, torch.SymFloat)) + if seen_float: + number = sym_float(number) + + return TensorMeta(number) # type: ignore[arg-type] + + +def _complex_only_elementwise_meta(*args, **kwargs): + torch._check( + utils.is_complex_dtype(args[0].dtype), lambda: "Only complex dtype is supported" + ) + return _prim_elementwise_meta(*args, **kwargs) + + +def _make_elementwise_unary_prim( + name: str, *, type_promotion: ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND, **kwargs +): + """ + Creates an elementwise unary prim. + """ + + return _make_prim( + schema=f"{name}(Tensor self) -> Tensor", + meta=partial(_prim_elementwise_meta, type_promotion=type_promotion), + return_type=RETURN_TYPE.NEW, + **kwargs, + ) + + +def _make_elementwise_binary_prim( + name: str, *, type_promotion: ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND, **kwargs +): + """ + Creates an elementwise binary prim. + """ + + return _make_prim( + schema=f"{name}(Tensor self, Tensor other) -> Tensor", + meta=partial(_prim_elementwise_meta, type_promotion=type_promotion), + return_type=RETURN_TYPE.NEW, + **kwargs, + ) + + +def _not_impl(*args, **kwargs): + raise NotImplementedError + + +# +# Elementwise unary operations +# + + +abs = _make_elementwise_unary_prim( + "abs", + impl_aten=torch.abs, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT, +) + +acos = _make_elementwise_unary_prim( + "acos", + impl_aten=torch.acos, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +acosh = _make_elementwise_unary_prim( + "acosh", + impl_aten=torch.acosh, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +asin = _make_elementwise_unary_prim( + "asin", + impl_aten=torch.asin, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +asinh = _make_elementwise_unary_prim( + "asinh", + impl_aten=torch.asinh, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +atan = _make_elementwise_unary_prim( + "atan", + impl_aten=torch.atan, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +atanh = _make_elementwise_unary_prim( + "atanh", + impl_aten=torch.atanh, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +cos = _make_elementwise_unary_prim( + "cos", + impl_aten=torch.cos, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +cosh = _make_elementwise_unary_prim( + "cosh", + impl_aten=torch.cosh, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +bessel_j0 = _make_elementwise_unary_prim( + "bessel_j0", + impl_aten=torch.special.bessel_j0, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +bessel_j1 = _make_elementwise_unary_prim( + "bessel_j1", + impl_aten=torch.special.bessel_j1, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +bessel_i0 = _make_elementwise_unary_prim( + "bessel_i0", + impl_aten=torch.i0, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +bessel_i0e = _make_elementwise_unary_prim( + "bessel_i0e", + impl_aten=torch.special.i0e, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +bessel_i1 = _make_elementwise_unary_prim( + "bessel_i1", + impl_aten=torch.special.i1, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +bessel_i1e = _make_elementwise_unary_prim( + "bessel_i1e", + impl_aten=torch.special.i1e, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +bitwise_not = _make_elementwise_unary_prim( + "bitwise_not", + impl_aten=torch.bitwise_not, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + + +def _cbrt_aten(a: torch.Tensor) -> Tensor: + torch._check( + not a.is_complex(), + lambda: "cbrt: Complex inputs not supported. Consider calling torch.pow(a, 1.0/3.0)", + ) + # Returns the real cubic root of the number. + # Note that if a < 0, pow(a, (1. / 3.)) returns th complex number + # exp(1/3 * log(a)) = exp(1/3 * (log(abs(a)) + pi*i)) = cbrt(abs(a)) * e^{pi/3*i} + # which is a complex number. + # For more info see the section Note in + # https://en.cppreference.com/w/cpp/numeric/math/cbrt + return torch.copysign(torch.pow(a.abs(), 1 / 3), a) + + +cbrt = _make_elementwise_unary_prim( + "cbrt", + impl_aten=_cbrt_aten, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +ceil = _make_elementwise_unary_prim( + "ceil", + impl_aten=torch.ceil, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + + +def _conj_physical_meta(input: TensorLikeType) -> TensorLikeType: + if not input.dtype.is_complex: + raise RuntimeError("prims.conj_physical is only defined for complex dtypes") + + strides = utils.compute_elementwise_output_strides(input) + return TensorMeta(input, strides=strides) + + +conj_physical = _make_prim( + schema="conj_physical(Tensor self) -> Tensor", + meta=_conj_physical_meta, + impl_aten=torch._conj_physical, + doc="Returns the physical conjugation of a complex tensor", + return_type=RETURN_TYPE.NEW, +) + + +def _clone_meta( + input: TensorLikeType, *, memory_format: torch.memory_format = torch.preserve_format +) -> TensorLikeType: + if memory_format != torch.preserve_format: + return torch.empty( + input.shape, + dtype=input.dtype, + layout=input.layout, + device=input.device, + memory_format=memory_format, + ) + + # memory_format == torch.preserve_format + strides = utils.compute_elementwise_output_strides(input) + return torch.empty_strided( + input.shape, + strides, + dtype=input.dtype, + layout=input.layout, + device=input.device, + ) + + +clone = _make_prim( + schema="clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor", + meta=_clone_meta, + impl_aten=torch.clone, + doc="Returns the copy of a tensor", + return_type=RETURN_TYPE.NEW, +) + +digamma = _make_elementwise_unary_prim( + "digamma", + impl_aten=torch.digamma, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +erf = _make_elementwise_unary_prim( + "erf", + impl_aten=torch.erf, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +erf_inv = _make_elementwise_unary_prim( + "erf_inv", + impl_aten=torch.special.erfinv, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +erfc = _make_elementwise_unary_prim( + "erfc", + impl_aten=torch.special.erfc, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +erfcx = _make_elementwise_unary_prim( + "erfcx", + impl_aten=torch.special.erfcx, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +exp = _make_elementwise_unary_prim( + "exp", + impl_aten=torch.exp, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +expm1 = _make_elementwise_unary_prim( + "expm1", + impl_aten=torch.special.expm1, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +exp2 = _make_elementwise_unary_prim( + "exp2", + impl_aten=torch.special.exp2, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + + +def _fill_meta(a: TensorLikeType, value: NumberType) -> TensorLikeType: + return _prim_elementwise_meta( + a, type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT + ) + + +# NOTE: fill uses _make_prim directly because it has a value parameter +fill = _make_prim( + schema="fill(Tensor self, Scalar value) -> Tensor", + return_type=RETURN_TYPE.NEW, + meta=_fill_meta, + impl_aten=torch.fill, + doc="", +) + +floor = _make_elementwise_unary_prim( + "floor", + impl_aten=torch.floor, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +imag = _make_prim( + schema="imag(Tensor self) -> Tensor", + meta=partial( + _complex_only_elementwise_meta, + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT, + ), + return_type=RETURN_TYPE.VIEW, + impl_aten=torch.imag, + doc="", +) + +isfinite = _make_elementwise_unary_prim( + "isfinite", + impl_aten=torch.isfinite, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.ALWAYS_BOOL, +) + +lgamma = _make_elementwise_unary_prim( + "lgamma", + impl_aten=torch.lgamma, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +log = _make_elementwise_unary_prim( + "log", + impl_aten=torch.log, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +log1p = _make_elementwise_unary_prim( + "log1p", + impl_aten=torch.log1p, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +log2 = _make_elementwise_unary_prim( + "log2", + impl_aten=torch.log2, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +log10 = _make_elementwise_unary_prim( + "log10", + impl_aten=torch.log10, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +real = _make_prim( + schema="real(Tensor self) -> Tensor", + meta=partial( + _complex_only_elementwise_meta, + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT, + ), + return_type=RETURN_TYPE.VIEW, + impl_aten=torch.real, + doc="", +) + +reciprocal = _make_elementwise_unary_prim( + "reciprocal", + impl_aten=torch.reciprocal, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +ndtri = _make_elementwise_unary_prim( + "ndtri", + impl_aten=torch.special.ndtri, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +neg = _make_elementwise_unary_prim( + "neg", + impl_aten=torch.neg, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +round = _make_elementwise_unary_prim( + "round", + impl_aten=torch.round, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +rsqrt = _make_elementwise_unary_prim( + "rsqrt", + impl_aten=torch.rsqrt, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +sign = _make_elementwise_unary_prim( + "sign", + impl_aten=torch.sign, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +signbit = _make_elementwise_unary_prim( + "signbit", + impl_aten=torch.signbit, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +sin = _make_elementwise_unary_prim( + "sin", + impl_aten=torch.sin, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +sinh = _make_elementwise_unary_prim( + "sinh", + impl_aten=torch.sinh, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +spherical_bessel_j0 = _make_elementwise_unary_prim( + "spherical_bessel_j0", + impl_aten=torch.special.spherical_bessel_j0, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +sqrt = _make_elementwise_unary_prim( + "sqrt", + impl_aten=torch.sqrt, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +tan = _make_elementwise_unary_prim( + "tan", + impl_aten=torch.tan, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +tanh = _make_elementwise_unary_prim( + "tanh", + impl_aten=torch.tanh, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +trunc = _make_elementwise_unary_prim( + "trunc", + impl_aten=torch.trunc, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +# +# Elementwise binary operations +# + +add = _make_elementwise_binary_prim( + name="add", + impl_aten=torch.add, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +atan2 = _make_elementwise_binary_prim( + name="atan2", + impl_aten=torch.atan2, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +bitwise_and = _make_elementwise_binary_prim( + "bitwise_and", + impl_aten=torch.bitwise_and, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +bitwise_or = _make_elementwise_binary_prim( + "bitwise_or", + impl_aten=torch.bitwise_or, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +bitwise_xor = _make_elementwise_binary_prim( + "bitwise_xor", + impl_aten=torch.bitwise_xor, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +# TODO: complex needs a special meta to account for its float -> complex behavior +# complex = _make_elementwise_binary_prim( +# impl_aten=torch.complex, +# doc="", +# ) + + +# div prim performs truncation division on integer inputs +# and true division for floating and complex inputs +def _div_aten(a, b): + is_integral = isinstance(a, (bool, int, torch.SymInt)) or ( + isinstance(a, torch.Tensor) and utils.is_integer_dtype(a.dtype) + ) + + if is_integral: + return torch.div(a, b, rounding_mode="trunc") + else: + return torch.true_divide(a, b) + + +div = _make_elementwise_binary_prim( + "div", + impl_aten=_div_aten, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +eq = _make_elementwise_binary_prim( + "eq", + impl_aten=torch.eq, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.ALWAYS_BOOL, +) + +fmax = _make_elementwise_binary_prim( + "fmax", + impl_aten=torch.fmax, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +fmin = _make_elementwise_binary_prim( + "fmin", + impl_aten=torch.fmin, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +fmod = _make_elementwise_binary_prim( + "fmod", + impl_aten=torch.fmod, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + + +gcd = _make_elementwise_binary_prim( + "gcd", + impl_aten=torch.gcd, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + + +ge = _make_elementwise_binary_prim( + "ge", + impl_aten=torch.ge, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.ALWAYS_BOOL, +) + +gt = _make_elementwise_binary_prim( + "gt", + impl_aten=torch.gt, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.ALWAYS_BOOL, +) + +hypot = _make_elementwise_binary_prim( + "hypot", + impl_aten=torch.hypot, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +igamma = _make_elementwise_binary_prim( + "igamma", + impl_aten=torch.special.gammainc, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +igammac = _make_elementwise_binary_prim( + "igammac", + impl_aten=torch.special.gammaincc, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +le = _make_elementwise_binary_prim( + "le", + impl_aten=torch.le, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.ALWAYS_BOOL, +) + +lt = _make_elementwise_binary_prim( + "lt", + impl_aten=torch.lt, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.ALWAYS_BOOL, +) + + +# Note: the following impls are because torch.maximum and torch.minimum do not support scalar inputs +def _maximum_aten( + a: Union[TensorLikeType, NumberType], b: Union[TensorLikeType, NumberType] +) -> TensorLikeType: + if isinstance(a, TensorLike) and isinstance(b, Number): + b = scalar_tensor(b, dtype=a.dtype, device=a.device) + elif isinstance(b, TensorLike) and isinstance(a, Number): + a = scalar_tensor(a, dtype=b.dtype, device=b.device) + + return torch.maximum(a, b) # type: ignore[arg-type] + + +maximum = _make_elementwise_binary_prim( + "maximum", + impl_aten=_maximum_aten, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + + +def _minimum_aten( + a: Union[TensorLikeType, NumberType], b: Union[TensorLikeType, NumberType] +) -> TensorLikeType: + if isinstance(a, TensorLike) and isinstance(b, Number): + b = scalar_tensor(b, dtype=a.dtype, device=a.device) + elif isinstance(b, TensorLike) and isinstance(a, Number): + a = scalar_tensor(a, dtype=b.dtype, device=b.device) + + return torch.minimum(a, b) # type: ignore[arg-type] + + +minimum = _make_elementwise_binary_prim( + "minimum", + impl_aten=_minimum_aten, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +mul = _make_elementwise_binary_prim( + "mul", + impl_aten=torch.mul, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +ne = _make_elementwise_binary_prim( + "ne", + impl_aten=torch.ne, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.ALWAYS_BOOL, +) + +nextafter = _make_elementwise_binary_prim( + "nextafter", + impl_aten=torch.nextafter, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +pow = _make_elementwise_binary_prim( + "pow", + impl_aten=torch.pow, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +remainder = _make_elementwise_binary_prim( + "remainder", + impl_aten=torch.remainder, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + + +shift_left = _make_elementwise_binary_prim( + "shift_left", + impl_aten=torch.bitwise_left_shift, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +shift_right_arithmetic = _make_elementwise_binary_prim( + "shift_right_arithmetic", + impl_aten=torch.bitwise_right_shift, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +shift_right_logical = _not_impl + +sub = _make_elementwise_binary_prim( + "sub", + impl_aten=torch.sub, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + +zeta = _make_elementwise_binary_prim( + "zeta", + impl_aten=torch.special.zeta, + doc="", + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, +) + + +# +# View operations +def _as_strided_meta( + a: TensorLikeType, size: ShapeType, stride: StrideType, storage_offset: int +) -> TensorLikeType: + assert len(size) == len(stride) + assert storage_offset >= 0 + utils.validate_strides(stride) + utils.validate_shape(size) + + if reduce(operator.mul, size) == 0: + # NOTE: This special case is to avoid having to acquire the storage below + # as_strided to shapes with no elements are trivially valid, so it's OK + pass + elif isinstance(a, torch.Tensor): + utils.check_in_bounds_for_storage( + a._typed_storage(), size, stride, storage_offset + ) + + return torch.as_strided(a, size, stride, storage_offset) + + +def _as_strided_aten( + a: Tensor, size: ShapeType, stride: StrideType, storage_offset: int +) -> Tensor: + return torch.as_strided(a, size, stride, storage_offset) + + +_as_strided_doc = """ + Creates a view of the tensor with the given shape (size), strides (stride) and + storage offset (storage_offset). +""" + +as_strided = _make_prim( + schema="as_strided(Tensor(a!) a, SymInt[] size, SymInt[] stride, SymInt storage_offset) -> Tensor(a!)", + meta=_as_strided_meta, + impl_aten=_as_strided_aten, + return_type=RETURN_TYPE.VIEW, + doc=_as_strided_doc, +) + + +def _broadcast_in_dim_meta( + a: TensorLikeType, shape: ShapeType, broadcast_dimensions: Sequence[int] +): + from torch.fx.experimental.symbolic_shapes import guard_size_oblivious + + # Type checks + assert isinstance(a, TensorLike) + assert isinstance(shape, Sequence) + assert isinstance(broadcast_dimensions, Sequence) + + # every dimension must be accounted for + assert a.ndim == len(broadcast_dimensions) + + # broadcast shape must have weakly more dimensions + assert len(shape) >= a.ndim + + # broadcast_dimensions must be an ascending sequence + # (no relative reordering of dims) of integers and + # each dimension must be within the new shape + def _greater_than_reduce(acc, x): + assert isinstance(x, Dim) + assert x > acc + assert x < len(shape) + + return x + + reduce(_greater_than_reduce, broadcast_dimensions, -1) + + # shape must be broadcastable to + for idx, new_idx in enumerate(broadcast_dimensions): + if not guard_size_oblivious(a.shape[idx] == 1): + torch._check( + a.shape[idx] == shape[new_idx], + lambda: f"{a.shape[idx]} must be broadcastable to {shape[new_idx]}", + ) + + new_strides = [] + original_idx = 0 + for idx in range(len(shape)): + if idx in broadcast_dimensions: + # Assigns a stride of zero to dimensions + # which were actually broadcast + if guard_size_oblivious(a.shape[original_idx] != shape[idx]): + new_strides.append(0) + else: + new_strides.append(a.stride()[original_idx]) + original_idx = original_idx + 1 + else: + if guard_size_oblivious(shape[idx] != 1): + new_strides.append(0) + elif original_idx == a.ndim: + new_strides.append(1) + else: + new_strides.append(a.stride()[original_idx] * a.size()[original_idx]) + + return a.as_strided(shape, new_strides, a.storage_offset()) + + +def _broadcast_in_dim_aten(a, shape, broadcast_dimensions): + s = list(shape) + for broadcast_dimension in broadcast_dimensions: + s[broadcast_dimension] = -1 + + v = a + for idx, x in enumerate(s): + if x != -1: + v = v.unsqueeze(idx) + + return v.expand(shape) + + +_broadcast_in_dim_doc = """ + Creates a view of a with the specified shape. + + Allows adding dimensions of any length and broadcasting + dimensions of length one in a to any length. + + The location of the broadcast dimensions must be specified + using the broadcast_dimensions argument. Changing the + relative order of dimensions is not supported. + """ + +broadcast_in_dim = _make_prim( + schema="broadcast_in_dim(Tensor(a) a, SymInt[] shape, int[] broadcast_dimensions) -> Tensor(a)", + meta=_broadcast_in_dim_meta, + impl_aten=_broadcast_in_dim_aten, + return_type=RETURN_TYPE.VIEW, + doc=_broadcast_in_dim_doc, +) + + +def _validate_collapse_args(a: Tensor, start: int, end: int) -> None: + # Special-case for zero dimensional tensors + ndim = max(1, a.dim()) + utils.validate_idx(ndim, start) + utils.validate_idx(ndim, end) + + # Verifies end is strictly greater than start + # (Collapse requires a non-empty interval) + torch._check_value( + end >= start, + lambda: f"Attempting to collapse but end, {end}, is less than start, {start}!", + ) + + +def _collapsed_shape(shape: ShapeType, start: int, end: int) -> Tuple[int, ...]: + """ + Returns the shape of a with dims in [start, end) merged into a single dimension. + """ + # Special-case for zero dimensional tensors + shape = (1,) if len(shape) == 0 else tuple(shape) + + dim_length = 1 + for s in shape[start : end + 1]: + dim_length = dim_length * s + + return shape[0:start] + (dim_length,) + shape[end + 1 :] + + +def _collapse_view_helper( + a: TensorLikeType, start: int, end: int +) -> Tuple[Optional[ShapeType], Optional[StrideType]]: + assert isinstance(a, TensorLike) + + from torch.fx.experimental.symbolic_shapes import guard_size_oblivious + + _validate_collapse_args(a, start, end) + + # Special-case for zero dimensional tensors + if a.ndim == 0: + shape = (1,) + strides = (1,) + else: + shape = a.shape # type: ignore[assignment] + strides = a.stride() # type: ignore[assignment] + + if a.ndim == 0 or (end == start): + return shape, strides + + length = shape[end] + stride = strides[end] + for idx in range(end - 1, start - 1, -1): + if guard_size_oblivious(shape[idx] == 0) or guard_size_oblivious( + shape[idx + 1] == 0 + ): + length = 0 + stride = 0 + break + + if guard_size_oblivious(shape[idx] == 1): + continue + + length = length * shape[idx] + stride = min(stride, strides[idx]) + + if ( + guard_size_oblivious(a.numel() > 0) + and guard_size_oblivious(shape[idx + 1] != 1) + and not guard_size_oblivious( + strides[idx] == strides[idx + 1] * shape[idx + 1] + ) + ): + return None, None + + new_shape = shape[:start] + (length,) + shape[end + 1 :] + new_strides = strides[:start] + (stride,) + strides[end + 1 :] + + # NOTE: when the input has no elements it's restrided as if it were contiguous + if guard_size_oblivious(a.numel() == 0): + new_strides = utils.make_contiguous_strides_for(new_shape) + + return new_shape, new_strides + + +def _collapse_view_meta(a: TensorLikeType, start: int, end: int) -> TensorLikeType: + new_shape, new_strides = _collapse_view_helper(a, start, end) + + if new_shape is None: + msg = "Attempting to view a collapsed tensor, but no such view exists!" + raise ValueError(msg) + + assert new_strides is not None + return a.as_strided(new_shape, new_strides, a.storage_offset()) + + +def _collapse_view_aten(a: Tensor, start: int, end: int) -> Tensor: + new_shape = _collapsed_shape(a.shape, start, end) + return a.view(new_shape) + + +_collapse_view_doc = """ + Creates a view of a with the dimensions between + start (inclusive) and end (exclusive) merged into a + single dimension. + + If it's not possible to take such a view then an error + is thrown. See collapse instead. + + The dimensions can be merged if and only if + they are all "nested" with each other. That is, they all + have the property that + + stride[i] = stride[i+1] * shape[i+1] + + for all i in [start, end - 1). + """ + +collapse_view = _make_prim( + schema="collapse_view(Tensor(a) a, int start, int end) -> Tensor(a)", + meta=_collapse_view_meta, + impl_aten=_collapse_view_aten, + return_type=RETURN_TYPE.VIEW, + doc=_collapse_view_doc, +) + + +def _conj_meta(a: TensorLikeType) -> TensorLikeType: + if not a.dtype.is_complex: + raise RuntimeError("Expected complex dtype in prims.conj") + out = a.as_strided(a.shape, a.stride(), a.storage_offset()) + torch._C._set_conj(out, not a.is_conj()) + return out + + +_conj_doc = """ +Returns a conjugated view of the original tensor +""" + +conj = _make_prim( + schema="conj(Tensor(a) a) -> Tensor(a)", + meta=_conj_meta, + impl_aten=torch.conj, + return_type=RETURN_TYPE.VIEW, + doc=_conj_doc, +) + + +def expand_dims( + a: TensorLikeType, dimensions: DimsSequenceType, ndim=None +) -> TensorLikeType: + """ + Creates a view of a with a.ndim + len(dimensions) dimensions, with new + dimensions of length one at the dimensions specified by dimensions. + """ + if ndim is not None: + # TODO: this is only here to support the unsqueeze ref + dims = sorted(utils.canonicalize_dims(ndim, dimensions)) # type: ignore[arg-type] + else: + dims = sorted(utils.canonicalize_dims(a.ndim, dimensions)) # type: ignore[arg-type] + if len(set(dims)) != len(dims): + msg = f"Received duplicate dimensions to expand in {str(dimensions)}" + raise ValueError(msg) + + new_shape = list(a.shape) + for idx in dims: + new_shape.insert(idx, 1) + + broadcast_dimensions = [ + idx for idx in range(len(new_shape)) if idx not in dimensions + ] + return broadcast_in_dim(a, new_shape, broadcast_dimensions) + + +# Note: saves the Python slice object because we're about to clobber its name with the slice prim +pyslice: Type[slice] = slice # type: ignore[has-type] + + +def _slice_meta( + a: TensorLikeType, + start_indices: DimsSequenceType, + limit_indices: DimsSequenceType, + strides: Optional[StrideType] = None, +) -> TensorLikeType: + _strides = strides if strides is not None else [1] * len(start_indices) + + if a.ndim != len(start_indices): + msg = f"Attempting to slice tensor of rank {a.ndim} with start_indices of length {len(start_indices)}!" + raise ValueError(msg) + + if a.ndim != len(limit_indices): + msg = f"Attempting to slice tensor of rank {a.ndim} with limit_indices of length {len(limit_indices)}!" + raise ValueError(msg) + + if a.ndim != len(_strides): + msg = f"Attempting to slice tensor of rank {a.ndim} with strides of length {len(limit_indices)}!" + raise ValueError(msg) + + for x, y in zip(start_indices, a.shape): + if x < 0: + msg = f"Attempting to slice a tensor with a negative start index of {x}!" + raise ValueError(msg) + if x > y: + msg = ( + f"Attempting to slice a tensor but a start index in {start_indices} is greater than" + f" the length of its corresponding dimension in shape {a.shape}" + ) + raise ValueError(msg) + + for x, y, z in zip(limit_indices, a.shape, start_indices): + if x < 0: + msg = f"Attempting to slice a tensor with a negative stop index of {x}!" + raise ValueError(msg) + if x > y: + msg = ( + f"Attempting to slice a tensor but a stop index in {limit_indices} is greater than the length of " + f" its corresponding dimension in shape {a.shape}" + ) + raise ValueError(msg) + if x < z: + msg = ( + f"Attempting to slice a tensor but a start index in {x} is greater than " + f" its corresponding stop index {z}" + ) + + for x in _strides: + if x <= 0: + msg = f"Attempting to slice a tensor with a non-positive step of {x}!" + raise ValueError(msg) + + new_shape = [] + for x, y, z in zip(start_indices, limit_indices, _strides): + new_shape.append(1 + (y - x - 1) // z) + + new_strides = [] + for x, y in zip(a.stride(), _strides): + new_strides.append(x * y) + + return a.as_strided(new_shape, new_strides, a.storage_offset()) + + +def _slice_aten( + a: Tensor, + start_indices: DimsSequenceType, + limit_indices: DimsSequenceType, + strides: Optional[StrideType] = None, +) -> Tensor: + _strides = strides if strides is not None else [1] * len(start_indices) + + slices = [] + for start, stop, step in zip(start_indices, limit_indices, _strides): + slices.append(pyslice(start, stop, step)) + + return operator.getitem(a, slices) # type: ignore[call-overload] + + +_slice_doc = """ + Creates a view of a "bounding box" within the tensor. + + The bounding box is specified independently in each of the tensor's dimensions. + start_indices and limit_indices describe the box's boundaries for their corresponding + dimensions. If strides is specified then they specify the step size between elements + in their corresponding dimension. + + This operation is analogous to slicing in NumPy, but does not permit slices where + the stop indices are less than the start indices. + """ + +slice = _make_prim( + schema="slice(Tensor(a) a, SymInt[] start_indices, SymInt[] limit_indices, SymInt[]? strides=None) -> Tensor(a)", + meta=_slice_meta, + impl_aten=_slice_aten, + return_type=RETURN_TYPE.VIEW, + doc=_slice_doc, +) + + +def _slice_in_dim_meta( + a: TensorLikeType, + start_index: int, + limit_index: int, + stride: int = 1, + axis: int = 0, +) -> TensorLikeType: + if axis < 0: + msg = f"slice_in_dim: received a negative axis {axis}" + raise ValueError(msg) + if axis >= a.ndim: + msg = f"slice_in_dim: axis {axis} is greater or equal to the rank {a.ndim} of the tensor" + raise ValueError(msg) + + if start_index < 0: + msg = f"slice_in_dim: received a negative start_index {start_index}" + raise ValueError(msg) + + if start_index > a.shape[axis]: + msg = f"slice_in_dim: start_index is greater than the length {start_index} of dimension {axis}" + raise ValueError(msg) + + if limit_index > a.shape[axis]: + msg = f"slice_in_dim: limit_index is greater than the length {limit_index} of dimension {axis}" + raise ValueError(msg) + + if limit_index < start_index: + msg = f"slice_in_dim: received a limit_index {limit_index} less than the start_index {start_index}" + raise ValueError(msg) + + if stride < 0: + msg = f"slice_in_dim: received a non-positive stride of {stride}!" + raise ValueError(msg) + + start_indices = [0] * a.ndim + limit_indices = list(a.shape) + strides = [1] * a.ndim + + start_indices[axis] = start_index + limit_indices[axis] = limit_index + strides[axis] = stride + + return _slice_meta(a, start_indices, limit_indices, strides) + + +def _slice_in_dim_aten( + a: Tensor, + start_index: int, + limit_index: int, + stride: int = 1, + axis: int = 0, +) -> Tensor: + start_indices = [0] * a.ndim + limit_indices = list(a.shape) + strides = [1] * a.ndim + + start_indices[axis] = start_index + limit_indices[axis] = limit_index + strides[axis] = stride + + return slice(a, start_indices, limit_indices, strides) + + +_slice_in_dim_doc = """ + Convenience wrapper for slicing just one dimension using slice. + """ + +# TODO: make stride SymInt +slice_in_dim = _make_prim( + schema="slice_in_dim(Tensor(a) a, SymInt start_index, SymInt limit_index, int stride=1, int axis=0) -> Tensor(a)", + meta=_slice_in_dim_meta, + impl_aten=_slice_in_dim_aten, + return_type=RETURN_TYPE.VIEW, + doc=_slice_in_dim_doc, +) + + +def _split_dim_meta(a: TensorLikeType, dim: int, outer_length: int) -> TensorLikeType: + assert isinstance(a, TensorLike) + utils.validate_idx(a.ndim, dim) + utils.validate_dim_length(outer_length) + + # Verifies the dim can be split with the specified lhs_length + inner_length = a.shape[dim] // outer_length + + if (a.shape[dim] % outer_length) != 0: + msg = "Attempting to split dimension of length {}, but outer length of {} divides it with a remainder!".format( + a.shape[dim], outer_length + ) + raise ValueError(msg) + + new_shape: List[int] = [] + new_strides: List[int] = [] + for idx in range(a.ndim): + if idx == dim: + new_shape.extend((outer_length, inner_length)) + new_strides.extend((a.stride()[idx] * inner_length, a.stride()[idx])) + else: + new_shape.append(a.shape[idx]) + new_strides.append(a.stride()[idx]) + + return a.as_strided(new_shape, new_strides, a.storage_offset()) + + +def _split_dim_aten(a: Tensor, dim: int, outer_length: int) -> Tensor: + inner_length = a.shape[dim] // outer_length + new_shape = a.shape[0:dim] + (outer_length, inner_length) + a.shape[dim + 1 :] + + return a.view(new_shape) + + +_split_dim_doc = """ + Creates a view of a with the given dimension (of length l) split + into two dimensions, with the outer of the two having + length outer_length and the inner of the two having computed + length inner_length such outer_length * inner_length = l. + """ + +# TODO: consider renaming split_dim_view +split_dim = _make_prim( + schema="split_dim(Tensor(a) a, int dim, SymInt outer_length) -> Tensor(a)", + meta=_split_dim_meta, + impl_aten=_split_dim_aten, + return_type=RETURN_TYPE.VIEW, + doc=_split_dim_doc, +) + + +# Note: allows dimensions to be specified redundantly +def _squeeze_meta(a: TensorLikeType, dimensions: Sequence) -> TensorLikeType: + assert isinstance(a, TensorLike) + + for idx in dimensions: + utils.validate_idx(a.ndim, idx) + assert a.shape[idx] == 1 + + new_shape = [] + new_strides = [] + for idx in range(len(a.shape)): + if idx in dimensions: + continue + + new_shape.append(a.shape[idx]) + new_strides.append(a.stride()[idx]) + + return a.as_strided(new_shape, new_strides, a.storage_offset()) + + +_squeeze_doc = """ + Creates a view of the tensor with the specified dimensions removed. + + The removed dimensions must each have length one. + """ + +squeeze = _make_prim( + schema="squeeze(Tensor(a) a, int[] dimensions) -> Tensor(a)", + meta=_squeeze_meta, + impl_aten=torch.squeeze, + return_type=RETURN_TYPE.VIEW, + doc=_squeeze_doc, +) + + +def _transpose_meta(a: TensorLikeType, permutation: DimsSequenceType) -> TensorLikeType: + if a.ndim != len(permutation): + msg = "Attempting to permute a tensor of rank {}, but received a permutation of length {}!".format( + a.ndim, len(permutation) + ) + raise ValueError(msg) + + if not utils.is_valid_permutation(a.ndim, permutation): + msg = f"Received an invalid permutation, {permutation}!" + raise ValueError(msg) + + new_shape = [0] * a.ndim + new_strides = [0] * a.ndim + for idx, dim in enumerate(permutation): + new_shape[idx] = a.shape[dim] + new_strides[idx] = a.stride()[dim] + + return a.as_strided(tuple(new_shape), tuple(new_strides), a.storage_offset()) + + +def _transpose_aten(a: Tensor, permutation: DimsSequenceType) -> Tensor: + return torch.permute(a, permutation) + + +_transpose_doc = """ + Creates a view of the tensor with its dimensions permuted. + + The length of the permutation must be the rank of the tensor, + and each element of the permutation specifies the new order + for the corresponding dimension. + """ + +transpose = _make_prim( + schema="transpose(Tensor(a) a, int[] permutation) -> Tensor(a)", + meta=_transpose_meta, + impl_aten=_transpose_aten, + return_type=RETURN_TYPE.VIEW, + doc=_transpose_doc, +) + + +def _view_of_meta(a: TensorLikeType) -> TensorLikeType: + return a.as_strided(a.shape, a.stride(), a.storage_offset()) + + +def _view_of_aten(a: Tensor) -> Tensor: + return a.view(a.shape) + + +_view_of_doc = """ + Creates a view of the tensor. + """ + +view_of = _make_prim( + schema="view_of(Tensor(a) a) -> Tensor", + meta=_view_of_meta, + impl_aten=_view_of_aten, + return_type=RETURN_TYPE.VIEW, + doc=_view_of_doc, +) + + +def _view_element_type_meta(a: TensorLikeType, dtype: torch.dtype) -> TensorLikeType: + return a.view(dtype) + + +def _view_element_type_aten(a: Tensor, dtype: torch.dtype) -> Tensor: + return a.view(dtype) + + +_view_element_type_doc = """ + Creates a view of the tensor with a different dtype. + """ + +view_element_type = _make_prim( + schema="view_of_dtype(Tensor(a) a, ScalarType dtype) -> Tensor", + meta=_view_element_type_meta, + impl_aten=_view_element_type_aten, + return_type=RETURN_TYPE.VIEW, + doc=_view_element_type_doc, +) + +# +# Functionalized view mutations +# + + +def _as_strided_scatter_meta( + input: TensorLikeType, + src: TensorLikeType, + size: ShapeType, + stride: StrideType, + storage_offset: int, +) -> TensorLikeType: + utils.validate_shape(size) + utils.validate_strides(stride) + + required_size = utils.compute_required_storage_length(size, stride, storage_offset) + torch._check( + input.numel() >= required_size, + lambda: ( + f"as_strided_scatter: sizes {size}, strides {stride}, storage offset {storage_offset} " + f" and itemsize {input.element_size()} requiring a storage size of " + f"{required_size * input.element_size()} are out of bounds " + f"for storage of size {input.numel() * input.element_size()}" + ), + ) + torch._check( + utils.is_same_shape(src.shape, size), + lambda: f"expected src to have a size equal to the slice of self. src size = {src.shape}, slice size = {size}", + ) + + return utils.clone_preserve_strides(input) + + +_as_strided_scatter_doc = """ + Creates a new tensor equivalent to ``out = input.clone()`` after mutation by + ``out.as_strided(size, stride, storage_offset).copy_(src)``. +""" + +as_strided_scatter = _make_prim( + schema="as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt storage_offset) -> Tensor", + meta=_as_strided_scatter_meta, + impl_aten=torch.as_strided_scatter, + return_type=RETURN_TYPE.NEW, + doc=_as_strided_scatter_doc, +) + + +# +# Shape operations +# + + +def _collapse_meta(a: Tensor, start: int, end: int) -> Tensor: + # Special-case for zero dimensional tensors + _validate_collapse_args(a, start, end) + new_shape = _collapsed_shape(a.shape, start, end) + return a.new_empty(new_shape) + + +def _collapse_aten(a: Tensor, start: int, end: int) -> Tensor: + new_shape = _collapsed_shape(a.shape, start, end) + out = a.new_empty(new_shape) + with torch.no_grad(): + out.view_as(a).copy_(a) + return out + + +_collapse_doc = """ +Collapse a span of neighboring dimensions into one. + +See collapse_view for the corresponding view operation. +""" +collapse = _make_prim( + schema="collapse(Tensor a, int start, int end) -> Tensor", + meta=_collapse_meta, + impl_aten=_collapse_aten, + return_type=RETURN_TYPE.NEW, + doc=_collapse_doc, +) + + +# TODO: review stride logic +# NB: unlike torch.cat, this is more strict about empty tensors and dim is +# never negative +def _cat_meta(tensors: Sequence[TensorLikeType], dim: int) -> TensorLikeType: + # Verifies same shape (except in the concat dimension) + assert dim >= 0 + shape = tensors[0].shape + concat_length = 0 + for tensor_idx, tensor in enumerate(tensors): + assert len(shape) == len(tensor.shape) + for idx, (common_length, length) in enumerate(zip(shape, tensor.shape)): + if idx == dim: + concat_length = concat_length + length + else: + torch._check( + length == common_length, + lambda: f"Sizes of tensors must match except in dimension {dim}. " + f"Expected {common_length} but got {length} for tensor number " + f"{tensor_idx} in the list", + ) + + new_shape = list(tensors[0].shape).copy() + new_shape[dim] = concat_length + return TensorMeta( + tensors[0], + shape=new_shape, + strides=utils.make_contiguous_strides_for(new_shape), + ) + + +def _cat_aten(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: int) -> Tensor: + return torch.cat(tensors, dim) + + +_cat_doc = """ + Concatenates tensors along the specified dimension. + + The tensors' shapes must have the same rank and same length for other dimensions. + """ + +cat = _make_prim( + schema="cat(Tensor[] tensors, int dim) -> Tensor", + meta=_cat_meta, + impl_aten=_cat_aten, + return_type=RETURN_TYPE.NEW, + doc=_cat_doc, +) + + +def _reshape_meta(a: TensorLikeType, shape: ShapeType): + assert isinstance(a, TensorLike) + utils.validate_shape(shape) + + # Validates the tensor and the requested shape have the + # same number of elements + numel = reduce(operator.mul, shape) + if numel != a.numel(): + msg = f"Attempting to reshape a tensor with {a.numel()} elements to a shape with {numel} elements!" + raise ValueError(msg) + + return TensorMeta(a, shape=shape, strides=utils.make_contiguous_strides_for(shape)) + + +def _reshape_aten(a: Tensor, shape: ShapeType) -> Tensor: + return a.reshape(shape).contiguous().clone() + + +_reshape_doc = """ + Creates a contiguous tensor with the specified shape + containing a copy of the data in a. + """ +reshape = _make_prim( + schema="reshape(Tensor a, SymInt[] shape) -> Tensor", + meta=_reshape_meta, + impl_aten=_reshape_aten, + return_type=RETURN_TYPE.NEW, + doc=_reshape_doc, +) + + +def _rev_meta(a: TensorLikeType, dims: DimsSequenceType) -> TensorLikeType: + utils.validate_dimension_indices(a.ndim, dims) + return torch.empty_like(a, memory_format=torch.preserve_format) + + +_rev_doc = """ + Reverses the order of elements along the given dimensions. + """ + +rev = _make_prim( + schema="rev(Tensor a, int[] dims) -> Tensor", + meta=_rev_meta, + impl_aten=torch.flip, + return_type=RETURN_TYPE.NEW, + doc=_rev_doc, +) + +# +# Conditional prims +# + + +def _where_meta( + pred: TensorLikeType, a: TensorLikeType, b: TensorLikeType +) -> TensorLikeType: + return _prim_elementwise_meta( + a, + b, + type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT, + args_with_fixed_dtypes=(pred,), + ) + + +_where_doc = """ + Selects elements from a and b according to pred. + + Where pred is true the result contains the element from a, and + where pred is false the result contains the element from b. + """ + +where = _make_prim( + schema="where(Tensor pred, Tensor a, Tensor b) -> Tensor", + meta=_where_meta, + impl_aten=torch.where, + return_type=RETURN_TYPE.NEW, + doc=_where_doc, +) + + +# +# Type conversions +# +def _convert_element_type_meta(a: TensorLikeType, dtype: torch.dtype) -> TensorLikeType: + # Type checks + assert isinstance(a, TensorLike) + assert isinstance(dtype, torch.dtype) + + # dtype conversion preserves dense strides + if torch._prims_common.is_non_overlapping_and_dense(a): + strides = a.stride() + else: + strides = utils.compute_elementwise_output_strides(a) + + return TensorMeta(a, strides=strides, dtype=dtype) + + +def _convert_element_type_aten(a: Tensor, dtype: torch.dtype) -> Tensor: + # Propagates requires grad when possible + if not utils.is_grad_dtype(dtype): + requires_grad = False + else: + # TODO: update meta objects so this can be acquired directly + try: + requires_grad = a.requires_grad + except Exception as e: + requires_grad = False + + result = torch.empty_like( + a, device=a.device, dtype=dtype, requires_grad=requires_grad + ) + with torch.no_grad(): + return copy_to(result, a) + + +_convert_element_type_doc = """ + Creates a copy of a tensor with the given dtype. + """ + +convert_element_type = _make_prim( + schema="convert_element_type(Tensor a, ScalarType dtype) -> Tensor", + meta=_convert_element_type_meta, + impl_aten=_convert_element_type_aten, + return_type=RETURN_TYPE.NEW, + doc=_convert_element_type_doc, + tags=(torch.Tag.pointwise,), +) + + +def _device_put_meta( + a: TensorLikeType, device: Union[str, torch.device] +) -> TensorLikeType: + assert isinstance(a, TensorLike) + assert isinstance(device, (str, torch.device)) + + return TensorMeta(a, device=utils.canonicalize_device(device)) + + +def _device_put_aten(a: Tensor, device: Union[str, torch.device]) -> Tensor: + return a.to(device) + + +_device_put_doc = """ + Creates a copy of a tensor on the given device. + """ + +device_put = _make_prim( + schema="device_put(Tensor a, Device device) -> Tensor", + meta=_device_put_meta, + impl_aten=_device_put_aten, + return_type=RETURN_TYPE.NEW, + doc=_device_put_doc, +) + + +# NOTE: need to model meta scalars +# See https://github.com/pytorch/pytorch/issues/78070 +def _item_meta(a: TensorLikeType) -> FakeTensor: + number_type = utils.dtype_to_type(a.dtype) + return TensorMeta(number_type(-1)) + + +_item_doc = """ + Converts a tensor with one element to a Python number. +""" + +# TODO: create a new return type for scalars? +# FIXME: currently returns integers for boolean tensors +# https://github.com/pytorch/pytorch/issues/78071 +item = _make_prim( + schema="item(Tensor a) -> Scalar", + meta=_item_meta, + impl_aten=torch.Tensor.item, + return_type=RETURN_TYPE.NEW, + doc=_item_doc, +) + + +# NOTE: need to model meta scalars +# See https://github.com/pytorch/pytorch/issues/78070 +def _maximum_value_meta(dtype: torch.dtype) -> FakeTensor: + number_type = utils.dtype_to_type(dtype) + return TensorMeta(number_type(-1)) + + +def _maximum_value_aten(dtype: torch.dtype): + if dtype == torch.bool: + return True + elif dtype.is_complex or dtype.is_floating_point: + return torch.finfo(dtype).max + else: + return torch.iinfo(dtype).max + + +_maximum_value_doc = """ + Return the maximum finite value for a dtype. +""" + +# TODO: create a new return type for scalars? +# FIXME: currently returns integers for boolean tensors +# https://github.com/pytorch/pytorch/issues/78071 +maximum_value = _make_prim( + schema="maximum_value(ScalarType dtype) -> Scalar", + meta=_maximum_value_meta, + impl_aten=_maximum_value_aten, + return_type=RETURN_TYPE.NEW, + doc=_maximum_value_doc, +) + + +# NOTE: need to model meta scalars +# See https://github.com/pytorch/pytorch/issues/78070 +def _minimum_value_meta(dtype: torch.dtype) -> FakeTensor: + number_type = utils.dtype_to_type(dtype) + return TensorMeta(number_type(-1)) + + +def _minimum_value_aten(dtype: torch.dtype): + if dtype == torch.bool: + return False + elif dtype.is_complex or dtype.is_floating_point: + return torch.finfo(dtype).min + else: + return torch.iinfo(dtype).min + + +_minimum_value_doc = """ + Return the minimum finite value for a dtype. +""" + +# TODO: create a new return type for scalars? +# FIXME: currently returns integers for boolean tensors +# https://github.com/pytorch/pytorch/issues/78071 +minimum_value = _make_prim( + schema="minimum_value(ScalarType dtype) -> Scalar", + meta=_minimum_value_meta, + impl_aten=_minimum_value_aten, + return_type=RETURN_TYPE.NEW, + doc=_minimum_value_doc, +) + +# +# Inplace operators +# + + +def _copy_to_meta(a: TensorLikeType, b: TensorLikeType): + assert isinstance(a, TensorLike) + assert isinstance(b, TensorLike) + + # Validates the cast is safe + # TODO: move this as an option on the reference + # a_typ = utils.dtype_to_type(a.dtype) + # b_typ = utils.dtype_to_type(b.dtype) + # if a_typ is not utils.get_higher_type(a_typ, b_typ): + # raise RuntimeError(str(b.dtype), " can't be cast safely to ", str(a.dtype), "!") + + # Validates the tensors have the same number of elements + if a.numel() != b.numel(): + msg = f"Attempting to copy {b.numel()} elements to a tensor with {a.numel()} elements!" + raise RuntimeError(msg) + + return a + + +def _copy_to_aten(a: Tensor, b: Tensor) -> Tensor: + return a.copy_(b) + + +_copy_to_doc = """ + Copies the data in b to a and returns the modified a. + """ + +# TODO: Remove safe casting and implement on reference instead +copy_to = _make_prim( + schema="copy_to(Tensor(a!) a, Tensor b) -> Tensor(a!)", + meta=_copy_to_meta, + impl_aten=_copy_to_aten, + return_type=RETURN_TYPE.INPLACE, + doc=_copy_to_doc, +) + + +def _copy_strided_meta(a: TensorLikeType, stride: ShapeType): + assert isinstance(a, TensorLike) + return torch.empty_strided( + a.shape, + stride, + dtype=a.dtype, + layout=a.layout, + device=a.device, + requires_grad=a.requires_grad, + ) + + +def _copy_strided_aten(a: Tensor, stride: ShapeType) -> Tensor: + out = torch.empty_strided( + a.size(), + stride=stride, + dtype=a.dtype, + layout=a.layout, + device=a.device, + requires_grad=a.requires_grad, + ) + out.copy_(a) + return out + + +_copy_strided_doc = """ + Copies the data in a to a new tensor, the new tensor has same shape with a size, but has different stride. + """ + + +copy_strided = _make_prim( + schema="copy_strided(Tensor a, SymInt[] stride) -> Tensor", + meta=_copy_strided_meta, + impl_aten=_copy_strided_aten, + return_type=RETURN_TYPE.NEW, + doc=_copy_strided_doc, +) + + +def _resize_meta(a: TensorLikeType, shape: ShapeType): + return a.resize_(shape) + + +def _resize_aten(a: Tensor, shape: ShapeType) -> Tensor: + return a.resize_(shape) + + +_resize_doc = """ + Gives a tensor with no elements a new shape, returning the modified tensor. + + The tensor's strides are contiguous and its values are unitialized. + """ + +# TODO: review support arbitrary resizes +resize = _make_prim( + schema="resize(Tensor(a!) a, SymInt[] shape) -> Tensor(a!)", + meta=_resize_meta, + impl_aten=_resize_aten, + return_type=RETURN_TYPE.INPLACE, + doc=_resize_doc, +) + + +def _reduction_meta(inp, dims, *, output_dtype=None): + """ + Meta function for single output reduction operations + Stride logic is incorrect + """ + assert isinstance(inp, TensorLike) + if output_dtype is None: + output_dtype = inp.dtype + output_shape = utils.compute_reduction_output_shape(inp.shape, dims) + return TensorMeta( + shape=output_shape, + strides=utils.make_contiguous_strides_for(output_shape), + dtype=output_dtype, + device=inp.device, + ) + + +def _var_reduction_meta(inp, dims, *, correction): + if utils.is_complex_dtype(inp.dtype): + output_dtype = utils.corresponding_real_dtype(inp.dtype) + else: + output_dtype = inp.dtype + return _reduction_meta(inp, dims, output_dtype=output_dtype) + + +_sum_doc = """ + Computes the sum of elements in the input tensor over the list of dimensions + specified in the dim argument + """ +_xor_sum_doc = """ + Computes the xor sum of elements in the input tensor over the list of dimensions + specified in the dim argument + """ +_prod_doc = """ + Computes the product of elements in the input tensor over the list of dimensions + specified in the dim argument + """ +_amax_doc = """ + Computes the maximum value of elements in the input tensor over the list of dimensions + specified in the dim argument + """ +_amin_doc = """ + Computes the minimum value of elements in the input tensor over the list of dimensions + specified in the dim argument + """ +_var_doc = """ + Computes the biased variance of x over the list of dimensions specified in the dim argument + """ + + +def _make_reduction_prim(name: str, impl_aten, doc): + """Creates a reduction prim.""" + return _make_prim( + schema=f"{name}(Tensor inp, int[]? dims, *, ScalarType? output_dtype=None) -> Tensor", + meta=_reduction_meta, + impl_aten=impl_aten, + return_type=RETURN_TYPE.NEW, + doc=doc, + ) + + +def _make_var_reduction_prim(name: str, impl_aten, doc): + """Creates a reduction prim.""" + return _make_prim( + schema=f"{name}(Tensor inp, int[]? dims, *, float correction, ScalarType? output_dtype=None) -> Tensor", + meta=_var_reduction_meta, + impl_aten=impl_aten, + return_type=RETURN_TYPE.NEW, + doc=doc, + ) + + +sum = _make_reduction_prim( + name="sum", + impl_aten=torch.sum, + doc=_sum_doc, +) + + +def _xor_sum_aten( + inp: TensorLikeType, + dims: Optional[DimsSequenceType], + *, + dtype: Optional[torch.dtype] = None, +) -> Tensor: + raise NotImplementedError("xor_sum only implemented with inductor") + + +xor_sum = _make_reduction_prim( + name="xor_sum", + impl_aten=_xor_sum_aten, + doc=_xor_sum_doc, +) + + +def _prod_aten( + inp: TensorLikeType, + dims: Optional[DimsSequenceType], + *, + dtype: Optional[torch.dtype] = None, +) -> Tensor: + if dims is not None: + for d in sorted(dims, reverse=True): + assert d >= 0 + inp = torch.prod(inp, d, dtype=dtype) + return inp + else: + return torch.prod(inp, dims, dtype=dtype) + + +prod = _make_reduction_prim( + name="prod", + impl_aten=_prod_aten, + doc=_prod_doc, +) + +var = _make_var_reduction_prim( + name="var", + impl_aten=torch.var, + doc=_var_doc, +) + +amax = _make_reduction_prim( + name="amax", + impl_aten=torch.amax, + doc=_amax_doc, +) + +amin = _make_reduction_prim( + name="amin", + impl_aten=torch.amin, + doc=_amin_doc, +) + + +_iota_doc = """ + Constructs a 1-D tensor t where ``t[i] == start + i * step``. +""" + + +# TODO: layout, pin_memory, memory_format +# TODO: model requires_grad on TensorMeta +def _iota_meta( + length: int, + *, + start: int, + step: int, + dtype: torch.dtype, + device: torch.device, + requires_grad: bool, +) -> TensorLikeType: + torch._check( + utils.is_integer_dtype(dtype), + lambda: "prims.iota only supports integer dtypes", + ) + torch._check(step != 0, lambda: "step must be nonzero") + return torch.empty( + length, + dtype=dtype, + device=device, + requires_grad=requires_grad, + ) + + +def _iota_aten( + length: int, + *, + start: int, + step: int, + dtype: torch.dtype, + device: torch.device, + requires_grad: bool, +) -> TensorLikeType: + end = start + length * step + return torch.arange( + start, end, step, dtype=dtype, device=device, requires_grad=requires_grad + ) + + +iota = _make_prim( + schema="iota(SymInt length, *, SymInt start, SymInt step, ScalarType dtype, Device device, bool requires_grad) -> Tensor", # noqa: B950 + return_type=RETURN_TYPE.NEW, + meta=_iota_meta, + impl_aten=_iota_aten, + doc=_iota_doc, +) + + +# TODO: layout, pin_memory, memory_format +# TODO: model requires_grad on TensorMeta +def _empty_meta( + shape: ShapeType, *, dtype: torch.dtype, device: torch.device, requires_grad: bool +) -> TensorLikeType: + strides = utils.make_contiguous_strides_for(shape) + return TensorMeta(shape=shape, strides=strides, dtype=dtype, device=device) + + +def _empty_aten( + shape: ShapeType, *, dtype: torch.dtype, device: torch.device, requires_grad: bool +) -> Tensor: + return torch.empty(shape, dtype=dtype, device=device, requires_grad=requires_grad) + + +_empty_doc = """ + Creates a tensor with uninitialized values and the specified shape, dtype, and device. +""" + +empty = _make_prim( + schema="empty(SymInt[] shape, *, ScalarType dtype, Device device, bool requires_grad) -> Tensor", + meta=_empty_meta, + impl_aten=_empty_aten, + return_type=RETURN_TYPE.NEW, + doc=_empty_doc, +) + + +def _empty_strided_meta( + shape: ShapeType, + strides: StrideType, + *, + dtype: torch.dtype, + device: torch.device, + requires_grad: bool, +) -> TensorLikeType: + return TensorMeta(shape=shape, strides=strides, dtype=dtype, device=device) + + +_empty_strided_doc = """ + Creates a tensor with uninitialized values. +""" + +# TODO: add layout, pin_memory +empty_strided = _make_prim( + schema="empty_strided(SymInt[] shape, SymInt[] strides, *, ScalarType dtype, Device device, bool requires_grad) -> Tensor", + return_type=RETURN_TYPE.NEW, + meta=_empty_strided_meta, + impl_aten=torch.empty_strided, + doc=_empty_strided_doc, +) + + +def _empty_permuted_meta( + shape: ShapeType, + physical_layout: DimsSequenceType, + *, + dtype: torch.dtype, + device: torch.device, + requires_grad: bool, +) -> TensorLikeType: + p_strides = utils.make_contiguous_strides_for([shape[l] for l in physical_layout]) + dim = len(shape) + torch._check( + len(physical_layout) == dim, + lambda: ( + "Number of dimensions in the tensor input does not match the " + f"length of the physical layout; i.e. len(size) = {dim} " + f"is not equal to len(physical_layout) = {len(physical_layout)}" + ), + ) + strides = [0] * len(shape) + seen_dims = set() + for p, l in enumerate(physical_layout): + torch._check( + 0 <= l < dim, + lambda: ( + f"Dimension out of range (expected to be between 0 and {dim - 1}, but got " + f"{l} at index {p}). NB: negative dims " + "not currently supported; file an issue if you want it." + ), + ) + torch._check(l not in seen_dims, lambda: "Duplicate dim not allowed") + strides[l] = p_strides[p] + seen_dims.add(l) + return TensorMeta( + shape=shape, + strides=strides, + dtype=dtype, + device=device, + ) + + +_empty_permuted_doc = """ + Creates a tensor with uninitialized values according to some physical layout, + that is guaranteed to be non-overlapping and dense. +""" + +# TODO: add layout, pin_memory +empty_permuted = _make_prim( + schema="empty_permuted(SymInt[] shape, int[] physical_layout, *, ScalarType dtype, Device device, bool requires_grad) -> Tensor", # noqa: B950 + return_type=RETURN_TYPE.NEW, + meta=_empty_permuted_meta, + impl_aten=torch.empty_permuted, + doc=_empty_permuted_doc, +) + + +def _full_meta( + shape: ShapeType, + fill_value: NumberType, + *, + dtype: torch.dtype, + device: torch.device, + requires_grad: bool, +) -> TensorLikeType: + strides = utils.make_contiguous_strides_for(shape) + return TensorMeta(shape=shape, strides=strides, dtype=dtype, device=device) + + +def _full_aten( + shape: ShapeType, + fill_value: NumberType, + *, + dtype: torch.dtype, + device: torch.device, + requires_grad: bool, +) -> Tensor: + # Note that Mypy thinks torch.full can't accept a complex fill_value + return torch.full( + shape, fill_value, dtype=dtype, device=device, requires_grad=requires_grad # type: ignore[arg-type] + ) + + +_full_doc = """ + Creates a tensor filled with the given fill value, and with the specified shape, dtype, and device. +""" + +# TODO: add layout +full = _make_prim( + schema="full(SymInt[] shape, Scalar fill_value, *, ScalarType dtype, Device device, bool requires_grad) -> Tensor", + meta=_full_meta, + impl_aten=_full_aten, + return_type=RETURN_TYPE.NEW, + doc=_full_doc, +) + + +def _full_like_meta( + a: TensorLikeType, + fill_value: NumberType, + *, + dtype: torch.dtype, + device: torch.device, + requires_grad: bool, +) -> TensorLikeType: + strides = utils.compute_elementwise_output_strides(a) + if a.numel() == 0: + strides = a.stride() + + return TensorMeta(a, strides=strides, dtype=dtype, device=device) + + +def _full_like_aten( + a: Tensor, + fill_value: NumberType, + *, + dtype: torch.dtype, + device: torch.device, + requires_grad: bool, +) -> Tensor: + # Note that Mypy thinks torch.full can't accept a complex fill_value + return torch.full_like( + a, fill_value, dtype=dtype, device=device, requires_grad=requires_grad # type: ignore[arg-type] + ) + + +_full_like_doc = """ + Creates a tensor filled with the given fill value, and the same shape, dtype, and device as the + given tensor by default. The dtype and device settings can be overridden + by specifying them explicitly. +""" + +full_like = _make_prim( + schema="full_like(Tensor a, Scalar fill_value, *, ScalarType dtype, Device device, bool requires_grad) -> Tensor", + meta=_full_like_meta, + impl_aten=_full_like_aten, + return_type=RETURN_TYPE.NEW, + doc=_full_like_doc, +) + + +def _scalar_tensor_meta( + scalar: NumberType, + *, + dtype: torch.dtype, + device: torch.device, +) -> TensorLikeType: + shape: ShapeType = [] + strides = utils.make_contiguous_strides_for(shape) + return TensorMeta(scalar, shape=shape, strides=strides, dtype=dtype, device=device) + + +def _scalar_tensor_aten( + scalar: NumberType, + *, + dtype: torch.dtype, + device: torch.device, +) -> Tensor: + if isinstance(scalar, complex) and ( + dtype is None or not utils.is_complex_dtype(dtype) + ): + raise TypeError("Complex scalar requires complex tensor dtype.") + # Note that Mypy thinks torch.scalar can't accept a complex scalar + return torch.scalar_tensor(scalar, dtype=dtype, device=device) # type: ignore[arg-type] + + +_scalar_tensor_doc = """ + Wraps a Number into a Tensor with the specified dtype and device. +""" + +# TODO: add layout and pin_memory support +scalar_tensor = _make_prim( + schema="scalar_tensor(Scalar s, *, ScalarType? dtype=None, Device? device=None) -> Tensor", + meta=_scalar_tensor_meta, + impl_aten=_scalar_tensor_aten, + return_type=RETURN_TYPE.NEW, + doc=_scalar_tensor_doc, +) + + +# +# Linear algebra (linalg) prims +# + + +def _svd_meta( + A: TensorLikeType, *, full_matrices: bool +) -> Tuple[TensorLikeType, TensorLikeType, TensorLikeType]: + utils.check_is_matrix(A, "linalg.svd") + utils.check_fp_or_complex(A.dtype, "linalg.svd", allow_low_precision_dtypes=False) + + A_shape = A.shape + batch = A_shape[:-2] + m, n = A_shape[-2:] + k = min(m, n) + + shape_U = batch + (m, m if full_matrices else k) + strides_U = utils.make_contiguous_strides_for(shape_U, row_major=False) + U = TensorMeta(shape=shape_U, strides=strides_U, dtype=A.dtype, device=A.device) + + shape_S = batch + (k,) + strides_S = utils.make_contiguous_strides_for(shape_S) + S = TensorMeta( + shape=shape_S, + strides=strides_S, + dtype=utils.corresponding_real_dtype(A.dtype) if A.is_complex() else A.dtype, + device=A.device, + ) + + shape_Vh = batch + (n if full_matrices else k, n) + # The CPU backend returns V, but the cuSolver backend returns V^H + # TODO The MAGMA backend returns V, so this is wrong if used with the MAGMA backend + is_cuda = A.device.type == "cuda" + strides_Vh = utils.make_contiguous_strides_for(shape_Vh, row_major=is_cuda) + Vh = TensorMeta(shape=shape_Vh, strides=strides_Vh, dtype=A.dtype, device=A.device) + # Also makes sure this is CUDA or HIP: + # https://pytorch.org/docs/stable/notes/hip.html#checking-for-hip + if A.numel() != 0 and Vh.is_complex() and torch.cuda.is_available(): + Vh = Vh.conj() + return U, S, Vh + + +def _svd_aten( + A: TensorLikeType, *, full_matrices: bool +) -> Tuple[Tensor, Tensor, Tensor]: + return torch.linalg.svd(A, full_matrices=full_matrices) + + +_svd_doc = """ + Returns the SVD of a matrix or batch of matrices. + + The `full_matrices` flag controls whether the full or reduced SVD decomposition is returned. +""" + +svd = _make_prim( + schema="svd(Tensor A, *, bool full_matrices) -> (Tensor U, Tensor S, Tensor Vh)", + meta=_svd_meta, + impl_aten=_svd_aten, + return_type=(RETURN_TYPE.NEW, RETURN_TYPE.NEW, RETURN_TYPE.NEW), + doc=_svd_doc, +) + + +# +# Randomness Prims +# + + +def _normal_meta( + shape: ShapeType, + *, + mean: Union[float, complex], + std: float, + dtype: torch.dtype, + device: torch.device, + requires_grad: bool, + generator: Optional[torch.Generator] = None, +) -> TensorLikeType: + torch._check( + std >= 0.0, + lambda: f"expected non-negative standard deviation, but got std={std}", + ) + + torch._check( + utils.is_float_dtype(dtype) or utils.is_complex_dtype(dtype), + lambda: f"expected a floating-point or complex dtype, but got dtype={dtype}", + ) + + strides = utils.make_contiguous_strides_for(shape) + return TensorMeta(shape=shape, strides=strides, dtype=dtype, device=device) + + +def _normal_aten( + shape: ShapeType, + *, + mean: Union[float, complex], + std: float, + dtype: torch.dtype, + device: torch.device, + requires_grad: bool, + generator: Optional[torch.Generator] = None, +) -> Tensor: + a = torch.empty(shape, dtype=dtype, device=device, requires_grad=requires_grad) + with torch.no_grad(): + # NOTE: normal_ is incorrectly annotated to expect mean to be a float + a.normal_(mean, std, generator=generator) # type: ignore[arg-type] + return a + + +_normal_doc = """ + Constructs a tensor filled with values drawn from a normal distribution with the specified mean + and standard deviation. + + Only supports floating-point types. +""" + +normal = _make_prim( + schema=( + "normal(SymInt[] shape, *, Scalar mean, Scalar std, ScalarType dtype, Device device, bool requires_grad, Generator? generator=None) -> Tensor" # noqa: B950 + ), + return_type=RETURN_TYPE.NEW, + meta=_normal_meta, + impl_aten=_normal_aten, + doc=_normal_doc, +) + + +def _uniform_meta( + shape: ShapeType, + *, + low: float, + high: float, + dtype: torch.dtype, + device: torch.device, + generator: Optional[torch.Generator] = None, +) -> TensorLikeType: + strides = utils.make_contiguous_strides_for(shape) + return TensorMeta(shape=shape, strides=strides, dtype=dtype, device=device) + + +def _uniform_aten( + shape: ShapeType, + *, + low: float, + high: float, + dtype: torch.dtype, + device: torch.device, + generator: Optional[torch.Generator] = None, +) -> Tensor: + a = torch.empty(shape, dtype=dtype, device=device) + a.uniform_(low, high, generator=generator) + return a + + +_uniform_doc = """ + Constructs a tensor filled with values drawn uniformly from low to high. +""" + +# TODO: we should more seriously review randomness modeling and prims +_uniform_helper = _make_prim( + schema=( + "uniform(SymInt[] shape, *, Scalar low, Scalar high, ScalarType dtype, Device device, Generator? generator=None) -> Tensor" + ), + return_type=RETURN_TYPE.NEW, + meta=_uniform_meta, + impl_aten=_uniform_aten, + doc=_uniform_doc, +) + +# +# FFT prims +# + + +def _fft_r2c_meta( + input: TensorLike, + *, + dim: DimsSequenceType, + onesided: bool, +) -> TensorLikeType: + dim = utils.canonicalize_dims(input.ndim, dim) + utils.validate_no_repeating_dims(dim) + + shape = list(input.shape) + if onesided: + last_dim = dim[-1] + shape[last_dim] = shape[last_dim] // 2 + 1 + + dtype = utils.corresponding_complex_dtype(input.dtype) + strides = utils.make_contiguous_strides_for(shape) + return TensorMeta(shape=shape, strides=strides, dtype=dtype, device=input.device) + + +def _fft_r2c_aten( + input: TensorLike, + *, + dim: DimsSequenceType, + onesided: bool, +) -> TensorLikeType: + normalization = 0 # No normalization + return torch._fft_r2c(input, dim, normalization, onesided) + + +_fft_r2c_doc = """ + Performs a real to complex Fast Fourier Transform +""" + + +fft_r2c = _make_prim( + schema="fft_r2c(Tensor self, *, int[] dim, bool onesided) -> Tensor", + meta=_fft_r2c_meta, + impl_aten=_fft_r2c_aten, + return_type=RETURN_TYPE.NEW, + doc=_fft_r2c_doc, +) + + +def _fft_c2c_meta( + input: TensorLike, + *, + dim: DimsSequenceType, + forward: bool, +) -> TensorLikeType: + dim = utils.canonicalize_dims(input.ndim, dim) + utils.validate_no_repeating_dims(dim) + + shape = input.shape + strides = utils.make_contiguous_strides_for(shape) + return TensorMeta( + shape=shape, strides=strides, dtype=input.dtype, device=input.device + ) + + +def _fft_c2c_aten( + input: TensorLike, + *, + dim: DimsSequenceType, + forward: bool, +) -> TensorLikeType: + normalization = 0 # No normalization + return torch._fft_c2c(input, dim, normalization, forward) + + +_fft_c2c_doc = """ + Performs either a Fast Fourier Transform, or its inverse +""" + + +fft_c2c = _make_prim( + schema="fft_c2c(Tensor self, *, int[] dim, bool forward) -> Tensor", + meta=_fft_c2c_meta, + impl_aten=_fft_c2c_aten, + return_type=RETURN_TYPE.NEW, + doc=_fft_c2c_doc, +) + + +def _fft_c2r_meta( + input: TensorLike, + *, + dim: DimsSequenceType, + last_dim_size: int, +) -> TensorLikeType: + dim = utils.canonicalize_dims(input.ndim, dim) + utils.validate_no_repeating_dims(dim) + + shape = list(input.shape) + shape[dim[-1]] = last_dim_size + dtype = utils.corresponding_real_dtype(input.dtype) + strides = utils.make_contiguous_strides_for(shape) + return TensorMeta(shape=shape, strides=strides, dtype=dtype, device=input.device) + + +def _fft_c2r_aten( + input: TensorLike, + *, + dim: DimsSequenceType, + last_dim_size: int, +) -> TensorLikeType: + normalization = 0 # No normalization + return torch._fft_c2r(input, dim, normalization, last_dim_size) + + +_fft_c2r_doc = """ + Performs a complex to real Inverse Fast Fourier Transform +""" + + +fft_c2r = _make_prim( + schema="fft_c2r(Tensor self, *, int[] dim, SymInt last_dim_size) -> Tensor", + meta=_fft_c2r_meta, + impl_aten=_fft_c2r_aten, + return_type=RETURN_TYPE.NEW, + doc=_fft_c2r_doc, +) + + +def _frexp_meta(self: TensorLikeType) -> Tuple[TensorLikeType, TensorLikeType]: + torch._check( + self.dtype.is_floating_point, + lambda: "torch.frexp() only supports floating-point dtypes", + ) + return torch.empty_like(self), torch.empty_like(self, dtype=torch.int32) + + +frexp = _make_prim( + schema="frexp(Tensor self) -> (Tensor mantissa, Tensor exponent)", + meta=_frexp_meta, + return_type=(RETURN_TYPE.NEW, RETURN_TYPE.NEW), + impl_aten=torch.frexp, + doc="", +) + +register_rng_prims() +register_debug_prims() diff --git a/venv/lib/python3.10/site-packages/torch/_prims/context.py b/venv/lib/python3.10/site-packages/torch/_prims/context.py new file mode 100644 index 0000000000000000000000000000000000000000..2c7a030b35093689d9171000054d8a3362d52d25 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_prims/context.py @@ -0,0 +1,144 @@ +import functools +from contextlib import nullcontext +from typing import Any, Callable, Dict, Optional, Sequence + +import torch + +import torch._decomp +import torch._prims + +import torch._refs +import torch._refs.nn +import torch._refs.nn.functional +import torch._refs.special +import torch.overrides + +from torch._prims_common import torch_function_passthrough + + +@functools.lru_cache(None) +def torch_to_refs_map(): + """ + Mapping of torch API functions to torch._refs functions. + E.g. torch_to_refs_map()[torch.add] == torch._refs.add + """ + modules = [ + (torch, torch._refs), + (torch.nn, torch._refs.nn), + (torch.nn.functional, torch._refs.nn.functional), + (torch.special, torch._refs.special), + (torch.fft, torch._refs.fft), + (torch.linalg, torch._refs.linalg), + ] + r: Dict[Any, Any] = { + torch.Tensor.__invert__: torch._refs.bitwise_not, + torch.Tensor.__xor__: torch._refs.bitwise_xor, + torch.Tensor.__and__: torch._refs.bitwise_and, + torch.Tensor.__or__: torch._refs.bitwise_or, + torch.Tensor.__eq__: torch._refs.eq, + torch.Tensor.__rsub__: torch._refs.rsub, + torch.Tensor.__rtruediv__: torch._refs.rtruediv, + torch.Tensor.__floordiv__: torch._refs.floor_divide, + torch.Tensor.__rfloordiv__: torch._refs.rfloordiv, + torch.Tensor.__pow__: torch._refs.pow, + torch.Tensor.__rpow__: torch._refs.rpow, + torch.Tensor.new_empty: torch._refs.new_empty, + torch.Tensor.new_full: torch._refs.new_full, + torch.Tensor.new_zeros: torch._refs.new_zeros, + torch.Tensor.new_ones: torch._refs.new_ones, + torch.Tensor.fill_: torch._refs.fill_, + torch.Tensor.zero_: torch._refs.zero_, + torch.Tensor.to: torch._refs.to, + torch.Tensor.sum_to_size: torch._refs.sum_to_size, + # TODO: Should these methods be mapped some other way? + torch.Tensor.copy_: torch._prims.copy_to, + torch.Tensor.resize: torch._prims.resize, + } + for mod_torch, mod_refs in modules: + for s in mod_refs.__all__: # type: ignore[attr-defined] + r[mod_torch.__dict__.get(s)] = mod_refs.__dict__.get(s) + + # Support remapping torch.Tensor.foo to _refs.foo + for s in dir(torch.Tensor): + if s in torch._refs.__all__: + r[getattr(torch.Tensor, s)] = torch._refs.__dict__.get(s) + + # Support conversions + for s in torch._refs._conversions.__all__: + tensor_attr = getattr(torch.Tensor, s, None) or getattr(torch, s) + r[tensor_attr] = torch._refs._conversions.__dict__.get(s) + + return r + + +@functools.lru_cache(None) +def all_prims(): + """ + Set of all prim functions, e.g., torch._prims.add in all_prims() + """ + return {torch._prims.__dict__.get(s) for s in torch._prims.__all__} + + +class TorchRefsMode(torch.overrides.TorchFunctionMode): + """ + Switches the interpretation of torch.* functions and Tensor methods to + use PrimTorch refs in torch._refs. (Direct calls to _refs are unaffected.) + + >>> # xdoctest: +SKIP + >>> with TorchRefsMode(): + ... torch.add(x, y) # calls torch._refs.add(x, y) + + By default, this context manager will fall back on the torch.* if the + ref does not exist; set strict=True to error if this occurs. + If the ref exists we still would like to fall back on the torch.* sometimes, + this behavior can be customized by passing a function to should_fallback_fn. + """ + + def __init__( + self, + strict=False, + should_fallback_fn=lambda *_: False, + prims_mode_cls=nullcontext, + ): + self.strict = strict + self.should_fallback_fn = should_fallback_fn + self.prims_mode_cls = prims_mode_cls + + def __torch_function__( + self, + orig_func: Callable, + types: Sequence, + args: Sequence[Any] = (), + kwargs: Optional[Dict] = None, + ): + if kwargs is None: + kwargs = {} + # For primitive operations, run them as is without interception + # Unless we are in prims_mode, in which case we want to use nvprims + if orig_func in torch_function_passthrough or orig_func in all_prims(): + with self.prims_mode_cls(): + return orig_func(*args, **kwargs) + mapping = torch_to_refs_map() + func = mapping.get(orig_func, None) + + # For torch.ops.aten.*, use registered decompositions from torch._decomp + # torch._decomp.decomposition_table provides a mapping from + # torch.ops.aten.* to torch._refs or torch._decomp.decompositions + # implementations. + # There're other ways to implement this functionality, + # see https://github.com/pytorch/pytorch/pull/82657#discussion_r939776417 + if func is None and isinstance(orig_func, torch._ops.OpOverload): + func = torch._decomp.decomposition_table.get(orig_func, None) + + if func is not None: + # If the ref exists query whether we should use it or not + if self.should_fallback_fn(self, orig_func, func, args, kwargs): + return orig_func(*args, **kwargs) + # torch calls inside func should be interpreted as refs calls + with self: + return func(*args, **kwargs) + if self.strict: + raise RuntimeError( + f"no _refs support for {torch.overrides.resolve_name(orig_func)}" + ) + return orig_func(*args, **kwargs) diff --git a/venv/lib/python3.10/site-packages/torch/_prims/debug_prims.py b/venv/lib/python3.10/site-packages/torch/_prims/debug_prims.py new file mode 100644 index 0000000000000000000000000000000000000000..d4d7a0c9999cbbd2b99cf0bdd859c2c532428bdf --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_prims/debug_prims.py @@ -0,0 +1,59 @@ +import contextlib +from typing import Optional, Sequence + +import torch +from torch._custom_op.impl import custom_op +from torch.utils._content_store import ContentStoreReader + +LOAD_TENSOR_READER: Optional[ContentStoreReader] = None + + +@contextlib.contextmanager +def load_tensor_reader(loc): + global LOAD_TENSOR_READER + assert LOAD_TENSOR_READER is None + # load_tensor is an "op", and we will play merry hell on + # Inductor's memory planning if we return a tensor that + # aliases another tensor that we previously returned from + # an operator. So unlike standard ContentStoreReader use, + # we disable the cache so that you always get fresh storages + # (no aliasing for you!) + LOAD_TENSOR_READER = ContentStoreReader(loc, cache=False) + try: + yield + finally: + LOAD_TENSOR_READER = None + + +def register_debug_prims(): + @custom_op("debugprims::load_tensor") + def load_tensor( # type: ignore[empty-body] + name: str, + size: Sequence[int], + stride: Sequence[int], + *, + dtype: torch.dtype, + device: torch.device, + ) -> torch.Tensor: + ... + + @load_tensor.impl_factory() + def load_tensor_factory(name, size, stride, dtype, device): + if LOAD_TENSOR_READER is None: + from torch._dynamo.testing import rand_strided + + return rand_strided(size, stride, dtype, device) + else: + from torch._dynamo.utils import clone_input + + # device argument here takes care of coercion + r = LOAD_TENSOR_READER.read_tensor(name, device=device) + assert list(r.size()) == size, f"{r.size()} != {size}" + assert list(r.stride()) == stride, f"{r.stride()} != {stride}" + assert r.device == device, f"{r.device} != {device}" + + # Unlike the other properties, we will do coercions for dtype + # mismatch + if r.dtype != dtype: + r = clone_input(r, dtype=dtype) + return r diff --git a/venv/lib/python3.10/site-packages/torch/_prims/executor.py b/venv/lib/python3.10/site-packages/torch/_prims/executor.py new file mode 100644 index 0000000000000000000000000000000000000000..bb2fafce872603d73e66ff3fb1b4fd3e1e639622 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_prims/executor.py @@ -0,0 +1,60 @@ +from typing import Callable, Optional + +from torch._prims.context import TorchRefsMode + +from torch.fx import GraphModule +from torch.fx.experimental.proxy_tensor import make_fx, wrapper_and_args_for_make_fx + + +def execute( + gm: GraphModule, + *args, + executor: str = "aten", + executor_parameters: Optional[dict] = None, +): + """ + Prototype ATen executor. + + Just executes the context's graph. + """ + + if executor == "aten": + return gm.forward(*args) + + msg = f"Received unexpected value for 'executor': {executor}. Allowed values are: aten." + raise ValueError(msg) + + +def make_traced(fn: Callable): + """ + Returns a function that, when called, will + trace its torch operations to prims and then + execute those prims on the requested trace executor + (possibly lowering them to that trace executor first). + + Only supports the torch operations defined in _torch_to_reference_map + in context.py and operations with positional args. All args must + be tensors. + In the near future all these restrictions will be lifted. + + Example usage: + + def foo(a, b): + return torch.add(a, b) + + traced_foo = make_traced(foo) + + a = torch.randn((1, 2, 3, 4, 5), device='cuda') + b = torch.randn((1, 2, 3, 4, 5), device='cuda') + result = traced_foo(a, b, executor='aten') + """ + + def _traced(*args, executor="aten", **kwargs): + # TODO: caching + wrapped, all_args = wrapper_and_args_for_make_fx(fn, args, kwargs) + + with TorchRefsMode(): + gm = make_fx(wrapped)(all_args) + return execute(gm, all_args, executor=executor) + + return _traced diff --git a/venv/lib/python3.10/site-packages/torch/amp/__init__.py b/venv/lib/python3.10/site-packages/torch/amp/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e0be696975584bbc0460e513eea0dd0d73736ea8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/amp/__init__.py @@ -0,0 +1,2 @@ +from .autocast_mode import _enter_autocast, _exit_autocast, autocast +from .grad_scaler import GradScaler diff --git a/venv/lib/python3.10/site-packages/torch/amp/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/amp/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d26d0b2a238bd238b68e08281f482eb7ec8ac4b1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/amp/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/amp/__pycache__/autocast_mode.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/amp/__pycache__/autocast_mode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..77922fca0ea267d9e8e9357e39c259e57f4cbb15 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/amp/__pycache__/autocast_mode.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/amp/__pycache__/grad_scaler.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/amp/__pycache__/grad_scaler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c19cbefe61be7b4a664a2f822d2d82f1db966925 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/amp/__pycache__/grad_scaler.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/amp/autocast_mode.py b/venv/lib/python3.10/site-packages/torch/amp/autocast_mode.py new file mode 100644 index 0000000000000000000000000000000000000000..30c6aefcf1bdaf24943b408694c59971a8033ca6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/amp/autocast_mode.py @@ -0,0 +1,436 @@ +import functools +import warnings + +from typing import Any, Optional + +import torch +from torch.types import _dtype + +__all__ = ["autocast_decorator", "autocast"] + + +def autocast_decorator(autocast_instance, func): + @functools.wraps(func) + def decorate_autocast(*args, **kwargs): + with autocast_instance: + return func(*args, **kwargs) + + decorate_autocast.__script_unsupported = "@autocast() decorator is not supported in script mode" # type: ignore[attr-defined] + return decorate_autocast + + +class autocast: + r""" + Instances of :class:`autocast` serve as context managers or decorators that + allow regions of your script to run in mixed precision. + + In these regions, ops run in an op-specific dtype chosen by autocast + to improve performance while maintaining accuracy. + See the :ref:`Autocast Op Reference` for details. + + When entering an autocast-enabled region, Tensors may be any type. + You should not call ``half()`` or ``bfloat16()`` on your model(s) or inputs when using autocasting. + + :class:`autocast` should wrap only the forward pass(es) of your network, including the loss + computation(s). Backward passes under autocast are not recommended. + Backward ops run in the same type that autocast used for corresponding forward ops. + + Example for CUDA Devices:: + + # Creates model and optimizer in default precision + model = Net().cuda() + optimizer = optim.SGD(model.parameters(), ...) + + for input, target in data: + optimizer.zero_grad() + + # Enables autocasting for the forward pass (model + loss) + with torch.autocast(device_type="cuda"): + output = model(input) + loss = loss_fn(output, target) + + # Exits the context manager before backward() + loss.backward() + optimizer.step() + + See the :ref:`CUDA Automatic Mixed Precision examples` for usage (along with gradient scaling) + in more complex scenarios (e.g., gradient penalty, multiple models/losses, custom autograd functions). + + :class:`autocast` can also be used as a decorator, e.g., on the ``forward`` method of your model:: + + class AutocastModel(nn.Module): + ... + @torch.autocast(device_type="cuda") + def forward(self, input): + ... + + Floating-point Tensors produced in an autocast-enabled region may be ``float16``. + After returning to an autocast-disabled region, using them with floating-point + Tensors of different dtypes may cause type mismatch errors. If so, cast the Tensor(s) + produced in the autocast region back to ``float32`` (or other dtype if desired). + If a Tensor from the autocast region is already ``float32``, the cast is a no-op, + and incurs no additional overhead. + CUDA Example:: + + # Creates some tensors in default dtype (here assumed to be float32) + a_float32 = torch.rand((8, 8), device="cuda") + b_float32 = torch.rand((8, 8), device="cuda") + c_float32 = torch.rand((8, 8), device="cuda") + d_float32 = torch.rand((8, 8), device="cuda") + + with torch.autocast(device_type="cuda"): + # torch.mm is on autocast's list of ops that should run in float16. + # Inputs are float32, but the op runs in float16 and produces float16 output. + # No manual casts are required. + e_float16 = torch.mm(a_float32, b_float32) + # Also handles mixed input types + f_float16 = torch.mm(d_float32, e_float16) + + # After exiting autocast, calls f_float16.float() to use with d_float32 + g_float32 = torch.mm(d_float32, f_float16.float()) + + CPU Training Example:: + + # Creates model and optimizer in default precision + model = Net() + optimizer = optim.SGD(model.parameters(), ...) + + for epoch in epochs: + for input, target in data: + optimizer.zero_grad() + + # Runs the forward pass with autocasting. + with torch.autocast(device_type="cpu", dtype=torch.bfloat16): + output = model(input) + loss = loss_fn(output, target) + + loss.backward() + optimizer.step() + + + CPU Inference Example:: + + # Creates model in default precision + model = Net().eval() + + with torch.autocast(device_type="cpu", dtype=torch.bfloat16): + for input in data: + # Runs the forward pass with autocasting. + output = model(input) + + CPU Inference Example with Jit Trace:: + + class TestModel(nn.Module): + def __init__(self, input_size, num_classes): + super().__init__() + self.fc1 = nn.Linear(input_size, num_classes) + def forward(self, x): + return self.fc1(x) + + input_size = 2 + num_classes = 2 + model = TestModel(input_size, num_classes).eval() + + # For now, we suggest to disable the Jit Autocast Pass, + # As the issue: https://github.com/pytorch/pytorch/issues/75956 + torch._C._jit_set_autocast_mode(False) + + with torch.cpu.amp.autocast(cache_enabled=False): + model = torch.jit.trace(model, torch.randn(1, input_size)) + model = torch.jit.freeze(model) + # Models Run + for _ in range(3): + model(torch.randn(1, input_size)) + + Type mismatch errors *in* an autocast-enabled region are a bug; if this is what you observe, + please file an issue. + + ``autocast(enabled=False)`` subregions can be nested in autocast-enabled regions. + Locally disabling autocast can be useful, for example, if you want to force a subregion + to run in a particular ``dtype``. Disabling autocast gives you explicit control over + the execution type. In the subregion, inputs from the surrounding region + should be cast to ``dtype`` before use:: + + # Creates some tensors in default dtype (here assumed to be float32) + a_float32 = torch.rand((8, 8), device="cuda") + b_float32 = torch.rand((8, 8), device="cuda") + c_float32 = torch.rand((8, 8), device="cuda") + d_float32 = torch.rand((8, 8), device="cuda") + + with torch.autocast(device_type="cuda"): + e_float16 = torch.mm(a_float32, b_float32) + with torch.autocast(device_type="cuda", enabled=False): + # Calls e_float16.float() to ensure float32 execution + # (necessary because e_float16 was created in an autocasted region) + f_float32 = torch.mm(c_float32, e_float16.float()) + + # No manual casts are required when re-entering the autocast-enabled region. + # torch.mm again runs in float16 and produces float16 output, regardless of input types. + g_float16 = torch.mm(d_float32, f_float32) + + The autocast state is thread-local. If you want it enabled in a new thread, the context manager or decorator + must be invoked in that thread. This affects :class:`torch.nn.DataParallel` and + :class:`torch.nn.parallel.DistributedDataParallel` when used with more than one GPU per process + (see :ref:`Working with Multiple GPUs`). + + Args: + device_type(str, required): Device type to use. Possible values are: 'cuda', 'cpu', 'xpu' and 'hpu'. + The type is the same as the `type` attribute of a :class:`torch.device`. + Thus, you may obtain the device type of a tensor using `Tensor.device.type`. + enabled(bool, optional): Whether autocasting should be enabled in the region. + Default: ``True`` + dtype(torch_dtype, optional): Whether to use torch.float16 or torch.bfloat16. + cache_enabled(bool, optional): Whether the weight cache inside autocast should be enabled. + Default: ``True`` + """ + + def __init__( + self, + device_type: str, + dtype: Optional[_dtype] = None, + enabled: bool = True, + cache_enabled: Optional[bool] = None, + ): + if torch._jit_internal.is_scripting(): + self._enabled = enabled + self.device = device_type + self.fast_dtype = dtype + # TODO: support get_autocast_gpu/cpu_dtype + assert dtype is not None + return + self.device = device_type + self.custom_backend_name = torch._C._get_privateuse1_backend_name() + if self.device == "cuda": + self.fast_dtype = torch.get_autocast_gpu_dtype() + elif self.device == "cpu": + self.fast_dtype = torch.get_autocast_cpu_dtype() + elif self.device == "xpu": + self.fast_dtype = torch.xpu.get_autocast_xpu_dtype() # type: ignore[attr-defined] + elif self.device == "ipu": + self.fast_dtype = torch.get_autocast_ipu_dtype() # type: ignore[attr-defined] + elif self.device == "hpu": + self.fast_dtype = torch.hpu.get_autocast_hpu_dtype() # type: ignore[attr-defined] + elif self.device == "xla": + self.fast_dtype = torch.get_autocast_xla_dtype() # type: ignore[attr-defined] + elif self.device == self.custom_backend_name: + necessary_funcs = [ + "is_autocast_enabled", + "set_autocast_enabled", + "get_autocast_dtype", + "set_autocast_dtype", + "get_amp_supported_dtype", + ] + message = f"Tried to use AMP with the `{self.custom_backend_name}` backend, but the backend has not " + message += "registered a module or the module miss some necessary funcs. The backend should register " + message += "a module by `torch._register_device_module`, and the module must have these funcs: \n" + message += "`is_autocast_enabled() -> bool`, `set_autocast_enabled(bool) -> None`, " + message += "`get_autocast_dtype() -> torch.dtype`, `set_autocast_dtype(torch.dtype) " + message += ( + "-> None` and `get_amp_supported_dtype() -> List[torch.dtype]`. \n" + ) + + assert hasattr(torch, self.custom_backend_name), message + self.custom_device_mod = getattr(torch, self.custom_backend_name) + for func in necessary_funcs: + assert hasattr(self.custom_device_mod, func), ( + message + f"But the func `{func}` is missing. \n" + ) + + self.fast_dtype = self.custom_device_mod.get_autocast_dtype() + else: + raise RuntimeError( + f"User specified an unsupported autocast device_type '{self.device}'" + ) + self._cache_enabled = torch.is_autocast_cache_enabled() + if ( + enabled + and torch.cuda.amp.common.amp_definitely_not_available() + and self.device == "cuda" + ): + warnings.warn( + "User provided device_type of 'cuda', but CUDA is not available. Disabling" + ) + enabled = False + if dtype is not None: + self.fast_dtype = dtype + if cache_enabled is not None: + self._cache_enabled = cache_enabled + + if self.device == "cpu": + supported_dtype = [torch.bfloat16, torch.float16] + if self.fast_dtype not in supported_dtype and enabled: + error_message = "In CPU autocast, but the target dtype is not supported. Disabling autocast.\n" + error_message += "CPU Autocast only supports dtype of " + error_message += ( + ", ".join(str(dtype) for dtype in supported_dtype) + " currently." + ) + warnings.warn(error_message) + enabled = False + elif self.device == "xpu": + supported_dtype = [torch.bfloat16, torch.float16] + if self.fast_dtype not in supported_dtype: + error_message = "In XPU autocast, but the target dtype is not supported. Disabling autocast.\n" + error_message += "XPU Autocast only supports dtypes of torch.bfloat16 and torch.float16 currently." + warnings.warn(error_message) + enabled = False + elif self.device == "ipu": + supported_dtypes = [torch.bfloat16, torch.float16] + if self.fast_dtype not in supported_dtypes: + error_message = "In IPU autocast, but the target dtype is not supported. Disabling autocast.\n" + error_message += "IPU Autocast only supports dtypes of torch.bfloat16 and torch.float16 currently." + warnings.warn(error_message) + enabled = False + elif self.device == "hpu": + supported_dtype = [torch.bfloat16, torch.float16] + if self.fast_dtype not in supported_dtype: + error_message = "In HPU autocast, but the target dtype is not supported. Disabling autocast.\n" + error_message += "HPU Autocast only supports dtypes of torch.bfloat16 and torch.float16 currently." + warnings.warn(error_message) + enabled = False + elif self.device == self.custom_backend_name: + supported_dtype = self.custom_device_mod.get_amp_supported_dtype() + if self.fast_dtype not in supported_dtype: + error_message = f"In {self.custom_backend_name} autocast, but the target dtype is not supported. " + error_message += f"Disabling autocast.\n {self.custom_backend_name} Autocast only supports dtypes of " + error_message += ( + ", ".join(str(dtype) for dtype in supported_dtype) + " currently." + ) + warnings.warn(error_message) + enabled = False + elif self.device == "cuda": + if ( + enabled + and self.fast_dtype == torch.bfloat16 + and not torch.cuda.is_bf16_supported() + ): + raise RuntimeError( + "Current CUDA Device does not support bfloat16. Please switch dtype to float16." + ) + elif self.device == "xla": + supported_dtype = [torch.float16, torch.bfloat16] + if self.fast_dtype not in supported_dtype: + error_message = "In XLA autocast, but the target dtype is not supported. Disabling autocast.\n" + error_message += ( + "XLA Autocast only supports dtype of torch.bfloat16 currently." + ) + warnings.warn(error_message) + enabled = False + self._enabled = enabled + + def __enter__(self): + if torch._jit_internal.is_scripting(): + assert self.fast_dtype is not None + return self + + self.prev_cache_enabled = torch.is_autocast_cache_enabled() + if self.device == "cpu": + self.prev = torch.is_autocast_cpu_enabled() + self.prev_fastdtype = torch.get_autocast_cpu_dtype() + torch.set_autocast_cpu_enabled(self._enabled) + torch.set_autocast_cpu_dtype(self.fast_dtype) # type: ignore[arg-type] + torch.autocast_increment_nesting() + elif self.device == "xpu": + self.prev = torch.xpu.is_autocast_xpu_enabled() # type: ignore[attr-defined] + self.prev_fastdtype = torch.xpu.get_autocast_xpu_dtype() # type: ignore[attr-defined] + torch.xpu.set_autocast_xpu_enabled(self._enabled) # type: ignore[attr-defined] + torch.xpu.set_autocast_xpu_dtype(self.fast_dtype) # type: ignore[attr-defined] + torch.autocast_increment_nesting() + elif self.device == "ipu": + self.prev = torch.is_autocast_ipu_enabled() # type: ignore[attr-defined] + self.prev_fastdtype = torch.get_autocast_ipu_dtype() # type: ignore[attr-defined] + torch.set_autocast_ipu_enabled(self._enabled) # type: ignore[attr-defined] + torch.set_autocast_ipu_dtype(self.fast_dtype) # type: ignore[attr-defined] + torch.autocast_increment_nesting() + elif self.device == "hpu": + self.prev = torch.hpu.is_autocast_hpu_enabled() # type: ignore[attr-defined] + self.prev_fastdtype = torch.hpu.get_autocast_hpu_dtype() # type: ignore[attr-defined] + torch.hpu.set_autocast_hpu_enabled(self._enabled) # type: ignore[attr-defined] + torch.hpu.set_autocast_hpu_dtype(self.fast_dtype) # type: ignore[attr-defined] + torch.autocast_increment_nesting() + elif self.device == "xla": + self.prev = torch.is_autocast_xla_enabled() # type: ignore[attr-defined] + self.prev_fastdtype = torch.get_autocast_xla_dtype() # type: ignore[attr-defined] + torch.set_autocast_xla_enabled(self._enabled) # type: ignore[attr-defined] + torch.set_autocast_xla_dtype(self.fast_dtype) # type: ignore[attr-defined] + torch.autocast_increment_nesting() + elif self.device == self.custom_backend_name: + self.prev = self.custom_device_mod.is_autocast_enabled() + self.prev_fastdtype = self.custom_device_mod.get_autocast_dtype() + self.custom_device_mod.set_autocast_enabled(self._enabled) + self.custom_device_mod.set_autocast_dtype(self.fast_dtype) + torch.autocast_increment_nesting() + else: + self.prev = torch.is_autocast_enabled() + self.prev_fastdtype = torch.get_autocast_gpu_dtype() + torch.set_autocast_gpu_dtype(self.fast_dtype) # type: ignore[arg-type] + torch.set_autocast_enabled(self._enabled) + torch.autocast_increment_nesting() + torch.set_autocast_cache_enabled(self._cache_enabled) + + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any): # type: ignore[override] + if torch._jit_internal.is_scripting(): + return + + # Drop the cache when we exit to a nesting level that's outside any instance of autocast. + if self.device == "cpu": + if torch.autocast_decrement_nesting() == 0: + torch.clear_autocast_cache() + torch.set_autocast_cpu_enabled(self.prev) + torch.set_autocast_cpu_dtype(self.prev_fastdtype) + elif self.device == "xpu": + if torch.autocast_decrement_nesting() == 0: + torch.clear_autocast_cache() + torch.xpu.set_autocast_xpu_enabled(self.prev) # type: ignore[attr-defined] + torch.xpu.set_autocast_xpu_dtype(self.prev_fastdtype) # type: ignore[attr-defined] + elif self.device == "ipu": + if torch.autocast_decrement_nesting() == 0: + torch.clear_autocast_cache() + torch.set_autocast_ipu_enabled(self.prev) # type: ignore[attr-defined] + torch.set_autocast_ipu_dtype(self.prev_fastdtype) # type: ignore[attr-defined] + elif self.device == "hpu": + if torch.autocast_decrement_nesting() == 0: + torch.clear_autocast_cache() + torch.hpu.set_autocast_hpu_enabled(self.prev) # type: ignore[attr-defined] + torch.hpu.set_autocast_hpu_dtype(self.prev_fastdtype) # type: ignore[attr-defined] + elif self.device == "xla": + if torch.autocast_decrement_nesting() == 0: + torch.clear_autocast_cache() + torch.set_autocast_xla_enabled(self.prev) # type: ignore[attr-defined] + torch.set_autocast_xla_dtype(self.prev_fastdtype) # type: ignore[attr-defined] + elif self.device == self.custom_backend_name: + if torch.autocast_decrement_nesting() == 0: + torch.clear_autocast_cache() + self.custom_device_mod.set_autocast_enabled(self.prev) + self.custom_device_mod.set_autocast_dtype(self.prev_fastdtype) + else: + if torch.autocast_decrement_nesting() == 0: + torch.clear_autocast_cache() + torch.set_autocast_enabled(self.prev) + torch.set_autocast_gpu_dtype(self.prev_fastdtype) + torch.set_autocast_cache_enabled(self.prev_cache_enabled) + return False + + def __call__(self, func): + if torch._jit_internal.is_scripting(): + return func + return autocast_decorator(self, func) + + +# These functions aren't meant for public usage. +# They are what we trace into a graph during pre_dispatch tracing +# when we encounter an autocast context manager. +def _enter_autocast(*vals): + # For pre-dispatch tracing, if a TorchFunction mode is active, we'll want to trace this into a graph. + if torch._C._is_torch_function_mode_enabled(): + return torch.overrides.handle_torch_function( + torch.amp._enter_autocast, [], *vals + ) + mode = torch.amp.autocast(*vals) + mode.__enter__() + return mode + + +def _exit_autocast(mode): + if torch._C._is_torch_function_mode_enabled(): + return torch.overrides.handle_torch_function(torch.amp._exit_autocast, [], mode) + mode.__exit__(None, None, None) diff --git a/venv/lib/python3.10/site-packages/torch/amp/grad_scaler.py b/venv/lib/python3.10/site-packages/torch/amp/grad_scaler.py new file mode 100644 index 0000000000000000000000000000000000000000..140ec67162f83146d737beae6bb6be560a705a31 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/amp/grad_scaler.py @@ -0,0 +1,681 @@ +from __future__ import annotations + +import inspect +import warnings +from collections import abc, defaultdict +from enum import Enum +from typing import Any, cast, Dict, Iterable, List, Optional, overload, Tuple, Union + +import torch + + +__all__ = ["OptState", "GradScaler"] + + +class _MultiDeviceReplicator: + """Lazily serves copies of a tensor to requested devices. + + Copies are cached per-device. + """ + + def __init__(self, master_tensor: torch.Tensor) -> None: + self.master = master_tensor + self._per_device_tensors: Dict[torch.device, torch.Tensor] = {} + + def get(self, device: torch.device) -> torch.Tensor: + retval = self._per_device_tensors.get(device, None) + if retval is None: + retval = self.master.to(device=device, non_blocking=True, copy=True) + self._per_device_tensors[device] = retval + return retval + + +# Defines default_factory for GradScaler's _per_optimizer_states defaultdict, +# as well as associated "enum" values. Prefers defining these at top level because +# - Lambdas can't be pickled, so we don't want to supply a lambda as the factory. +# - Defining READY, UNSCALED, STEPPED and _refresh_per_optimizer_state within GradScaler +# causes a circular reference, which we'd rather avoid. +class OptState(Enum): + READY = 0 + UNSCALED = 1 + STEPPED = 2 + + +def _refresh_per_optimizer_state() -> Dict[str, Any]: + return {"stage": OptState.READY, "found_inf_per_device": {}} + + +class GradScaler: + """An instance ``scaler`` of :class:`GradScaler`. + + Helps perform the steps of gradient scaling + conveniently. + + * ``scaler.scale(loss)`` multiplies a given loss by ``scaler``'s current scale factor. + * ``scaler.step(optimizer)`` safely unscales gradients and calls ``optimizer.step()``. + * ``scaler.update()`` updates ``scaler``'s scale factor. + + Example:: + + # Creates a GradScaler once at the beginning of training. + scaler = GradScaler() + + for epoch in epochs: + for input, target in data: + optimizer.zero_grad() + output = model(input) + loss = loss_fn(output, target) + + # Scales loss. Calls backward() on scaled loss to create scaled gradients. + scaler.scale(loss).backward() + + # scaler.step() first unscales gradients of the optimizer's params. + # If gradients don't contain infs/NaNs, optimizer.step() is then called, + # otherwise, optimizer.step() is skipped. + scaler.step(optimizer) + + # Updates the scale for next iteration. + scaler.update() + + See the :ref:`Automatic Mixed Precision examples` for usage + (along with autocasting) in more complex cases like gradient clipping, gradient accumulation, gradient penalty, + and multiple losses/optimizers. + + ``scaler`` dynamically estimates the scale factor each iteration. To minimize gradient underflow, + a large scale factor should be used. However, ``float16`` values can "overflow" (become inf or NaN) if + the scale factor is too large. Therefore, the optimal scale factor is the largest factor that can be used + without incurring inf or NaN gradient values. + ``scaler`` approximates the optimal scale factor over time by checking the gradients for infs and NaNs during every + ``scaler.step(optimizer)`` (or optional separate ``scaler.unscale_(optimizer)``, see :meth:`unscale_`). + + * If infs/NaNs are found, ``scaler.step(optimizer)`` skips the underlying ``optimizer.step()`` (so the params + themselves remain uncorrupted) and ``update()`` multiplies the scale by ``backoff_factor``. + + * If no infs/NaNs are found, ``scaler.step(optimizer)`` runs the underlying ``optimizer.step()`` as usual. + If ``growth_interval`` unskipped iterations occur consecutively, ``update()`` multiplies the scale by + ``growth_factor``. + + The scale factor often causes infs/NaNs to appear in gradients for the first few iterations as its + value calibrates. ``scaler.step`` will skip the underlying ``optimizer.step()`` for these + iterations. After that, step skipping should occur rarely (once every few hundred or thousand iterations). + + Args: + device (str, optional, default="cuda"): Device type to use. Possible values are: 'cuda' and 'cpu'. + The type is the same as the `type` attribute of a :class:`torch.device`. + Thus, you may obtain the device type of a tensor using `Tensor.device.type`. + init_scale (float, optional, default=2.**16): Initial scale factor. + growth_factor (float, optional, default=2.0): Factor by which the scale is multiplied during + :meth:`update` if no inf/NaN gradients occur for ``growth_interval`` consecutive iterations. + backoff_factor (float, optional, default=0.5): Factor by which the scale is multiplied during + :meth:`update` if inf/NaN gradients occur in an iteration. + growth_interval (int, optional, default=2000): Number of consecutive iterations without inf/NaN gradients + that must occur for the scale to be multiplied by ``growth_factor``. + enabled (bool, optional): If ``False``, disables gradient scaling. :meth:`step` simply + invokes the underlying ``optimizer.step()``, and other methods become no-ops. + Default: ``True`` + """ + + def __init__( + self, + device: str = "cuda", + init_scale: float = 2.0**16, + growth_factor: float = 2.0, + backoff_factor: float = 0.5, + growth_interval: int = 2000, + enabled: bool = True, + ) -> None: + self._device = device + self._enabled = enabled + if self._device == "cuda": + if enabled and torch.cuda.amp.common.amp_definitely_not_available(): + warnings.warn( + "torch.cuda.amp.GradScaler is enabled, but CUDA is not available. Disabling." + ) + self._enabled = False + + if self._enabled: + assert growth_factor > 1.0, "The growth factor must be > 1.0." + assert backoff_factor < 1.0, "The backoff factor must be < 1.0." + + self._init_scale = init_scale + # self._scale will be lazily initialized during the first call to scale() + self._scale: Optional[torch.Tensor] = None + self._growth_factor = growth_factor + self._backoff_factor = backoff_factor + self._growth_interval = growth_interval + self._init_growth_tracker = 0 + # self._growth_tracker will be lazily initialized during the first call to scale() + self._growth_tracker: Optional[torch.Tensor] = None + self._per_optimizer_states: Dict[int, Dict[str, Any]] = defaultdict( + _refresh_per_optimizer_state + ) + + def _check_scale_growth_tracker( + self, funcname: str + ) -> Tuple[torch.Tensor, torch.Tensor]: + fix = "This may indicate your script did not use scaler.scale(loss or outputs) earlier in the iteration." + assert self._scale is not None, ( + f"Attempted {funcname} but _scale is None. " + fix + ) + assert self._growth_tracker is not None, ( + f"Attempted {funcname} but _growth_tracker is None. " + fix + ) + return (self._scale, self._growth_tracker) + + def _lazy_init_scale_growth_tracker(self, dev: torch.device) -> None: + assert self._growth_tracker is None, "_growth_tracker initialized before _scale" + self._scale = torch.full((), self._init_scale, dtype=torch.float32, device=dev) + self._growth_tracker = torch.full( + (), self._init_growth_tracker, dtype=torch.int32, device=dev + ) + + @overload + def scale(self, outputs: torch.Tensor) -> torch.Tensor: + ... + + @overload + def scale(self, outputs: List[torch.Tensor]) -> List[torch.Tensor]: + ... + + @overload + def scale(self, outputs: Tuple[torch.Tensor, ...]) -> Tuple[torch.Tensor, ...]: + ... + + @overload + def scale(self, outputs: Iterable[torch.Tensor]) -> Iterable[torch.Tensor]: + ... + + def scale( + self, + outputs: Union[torch.Tensor, Iterable[torch.Tensor]], + ) -> Union[torch.Tensor, Iterable[torch.Tensor]]: + """ + Multiplies ('scales') a tensor or list of tensors by the scale factor. + + Returns scaled outputs. If this instance of :class:`GradScaler` is not enabled, outputs are returned + unmodified. + + Args: + outputs (Tensor or iterable of Tensors): Outputs to scale. + """ + if not self._enabled: + return outputs + + # Short-circuit for the common case. + if isinstance(outputs, torch.Tensor): + if self._scale is None: + self._lazy_init_scale_growth_tracker(outputs.device) + assert self._scale is not None + return outputs * self._scale.to(device=outputs.device, non_blocking=True) + + # Invoke the more complex machinery only if we're treating multiple outputs. + stash: List[ + _MultiDeviceReplicator + ] = [] # holds a reference that can be overwritten by apply_scale + + def apply_scale(val: Union[torch.Tensor, Iterable[torch.Tensor]]): + if isinstance(val, torch.Tensor): + if len(stash) == 0: + if self._scale is None: + self._lazy_init_scale_growth_tracker(val.device) + assert self._scale is not None + stash.append(_MultiDeviceReplicator(self._scale)) + return val * stash[0].get(val.device) + if isinstance(val, abc.Iterable): + iterable = map(apply_scale, val) + if isinstance(val, (list, tuple)): + return type(val)(iterable) + return iterable + raise ValueError("outputs must be a Tensor or an iterable of Tensors") + + return apply_scale(outputs) + + def _unscale_grads_( + self, + optimizer: torch.optim.Optimizer, + inv_scale: torch.Tensor, + found_inf: torch.Tensor, + allow_fp16: bool, + ) -> Dict[torch.device, torch.Tensor]: + per_device_inv_scale = _MultiDeviceReplicator(inv_scale) + per_device_found_inf = _MultiDeviceReplicator(found_inf) + + # To set up _amp_foreach_non_finite_check_and_unscale_, split grads by device and dtype. + # There could be hundreds of grads, so we'd like to iterate through them just once. + # However, we don't know their devices or dtypes in advance. + + # https://stackoverflow.com/questions/5029934/defaultdict-of-defaultdict + # Google says mypy struggles with defaultdicts type annotations. + per_device_and_dtype_grads: Dict[ + torch.device, Dict[torch.dtype, List[torch.Tensor]] + ] = defaultdict(lambda: defaultdict(list)) + with torch.no_grad(): + for group in optimizer.param_groups: + for param in group["params"]: + assert isinstance(param, torch.Tensor) + if param.grad is None: + continue + if (not allow_fp16) and param.grad.dtype == torch.float16: + raise ValueError("Attempting to unscale FP16 gradients.") + if param.grad.is_sparse: + # is_coalesced() == False means the sparse grad has values with duplicate indices. + # coalesce() deduplicates indices and adds all values that have the same index. + # For scaled fp16 values, there's a good chance coalescing will cause overflow, + # so we should check the coalesced _values(). + if param.grad.dtype is torch.float16: + param.grad = param.grad.coalesce() + to_unscale = param.grad._values() + else: + to_unscale = param.grad + + # TODO: is there a way to split by device and dtype without appending in the inner loop? + per_device_and_dtype_grads[to_unscale.device][ + to_unscale.dtype + ].append(to_unscale) + + for device, per_dtype_grads in per_device_and_dtype_grads.items(): + for grads in per_dtype_grads.values(): + torch._amp_foreach_non_finite_check_and_unscale_( + grads, + per_device_found_inf.get(device), + per_device_inv_scale.get(device), + ) + + return per_device_found_inf._per_device_tensors + + def unscale_(self, optimizer: torch.optim.Optimizer) -> None: + """ + Divides ("unscales") the optimizer's gradient tensors by the scale factor. + + :meth:`unscale_` is optional, serving cases where you need to + :ref:`modify or inspect gradients` + between the backward pass(es) and :meth:`step`. + If :meth:`unscale_` is not called explicitly, gradients will be unscaled automatically during :meth:`step`. + + Simple example, using :meth:`unscale_` to enable clipping of unscaled gradients:: + + ... + scaler.scale(loss).backward() + scaler.unscale_(optimizer) + torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm) + scaler.step(optimizer) + scaler.update() + + Args: + optimizer (torch.optim.Optimizer): Optimizer that owns the gradients to be unscaled. + + .. note:: + :meth:`unscale_` does not incur a CPU-GPU sync. + + .. warning:: + :meth:`unscale_` should only be called once per optimizer per :meth:`step` call, + and only after all gradients for that optimizer's assigned parameters have been accumulated. + Calling :meth:`unscale_` twice for a given optimizer between each :meth:`step` triggers a RuntimeError. + + .. warning:: + :meth:`unscale_` may unscale sparse gradients out of place, replacing the ``.grad`` attribute. + """ + if not self._enabled: + return + + self._check_scale_growth_tracker("unscale_") + + optimizer_state = self._per_optimizer_states[id(optimizer)] + + if optimizer_state["stage"] is OptState.UNSCALED: + raise RuntimeError( + "unscale_() has already been called on this optimizer since the last update()." + ) + elif optimizer_state["stage"] is OptState.STEPPED: + raise RuntimeError("unscale_() is being called after step().") + + # FP32 division can be imprecise for certain compile options, so we carry out the reciprocal in FP64. + assert self._scale is not None + inv_scale = self._scale.double().reciprocal().float() + found_inf = torch.full((), 0.0, dtype=torch.float32, device=self._scale.device) + + optimizer_state["found_inf_per_device"] = self._unscale_grads_( + optimizer, inv_scale, found_inf, False + ) + optimizer_state["stage"] = OptState.UNSCALED + + def _maybe_opt_step( + self, + optimizer: torch.optim.Optimizer, + optimizer_state: Dict[str, Any], + *args: Any, + **kwargs: Any, + ) -> Optional[float]: + retval: Optional[float] = None + if not sum(v.item() for v in optimizer_state["found_inf_per_device"].values()): + retval = optimizer.step(*args, **kwargs) + return retval + + def step( + self, optimizer: torch.optim.Optimizer, *args: Any, **kwargs: Any + ) -> Optional[float]: + """Invoke ``unscale_(optimizer)`` followed by parameter update, if gradients are not infs/NaN. + + :meth:`step` carries out the following two operations: + + 1. Internally invokes ``unscale_(optimizer)`` (unless :meth:`unscale_` was explicitly called for ``optimizer`` + earlier in the iteration). As part of the :meth:`unscale_`, gradients are checked for infs/NaNs. + 2. If no inf/NaN gradients are found, invokes ``optimizer.step()`` using the unscaled + gradients. Otherwise, ``optimizer.step()`` is skipped to avoid corrupting the params. + + ``*args`` and ``**kwargs`` are forwarded to ``optimizer.step()``. + + Returns the return value of ``optimizer.step(*args, **kwargs)``. + + Args: + optimizer (torch.optim.Optimizer): Optimizer that applies the gradients. + args: Any arguments. + kwargs: Any keyword arguments. + + .. warning:: + Closure use is not currently supported. + """ + if not self._enabled: + return optimizer.step(*args, **kwargs) + + if "closure" in kwargs: + raise RuntimeError( + "Closure use is not currently supported if GradScaler is enabled." + ) + + self._check_scale_growth_tracker("step") + + optimizer_state = self._per_optimizer_states[id(optimizer)] + + if optimizer_state["stage"] is OptState.STEPPED: + raise RuntimeError( + "step() has already been called since the last update()." + ) + + retval: Optional[float] = None + + if getattr(optimizer, "_step_supports_amp_scaling", False): + # This optimizer has customized scale-handling logic, so we can call optimizer.step() directly. + # The contract with custom optimizers is that their step() should accept an additional, + # optional grad_scaler kwarg. We append self to the kwargs so the custom optimizer has full information: + # it can query its own state, invoke unscale_ on itself, etc + # The contract above is being deprecated to avoid introducing `grad_scaler: GradScaler` argument + # to `Optimizer.step`. The new behavior is going to add two Tensor attributes of `grad_scale` + # and `found_inf` to the passed optimizer so that the optimizer can utilize those + # to skip the parameter updates or unscale gradients before updating parameters in + # the fused kernel, e.g. `FusedAdamMathFunctor`. + # In this behavior, `GradScaler._check_inf_per_device` is called if `OptState.READY`, + # while the method is expected to be called by users side, i.e. their optimizers. + kwargs_ = kwargs + has_grad_scaler_kwarg = ( + "grad_scaler" in inspect.signature(optimizer.step).parameters + ) + if has_grad_scaler_kwarg: + warnings.warn( + "GradScaler is going to stop passing itself as a keyword argument to the passed " + "optimizer. In the near future GradScaler registers `grad_scale: Tensor` and " + "`found_inf: Tensor` to the passed optimizer and let the optimizer use them directly.", + FutureWarning, + ) + kwargs_.update({"grad_scaler": self}) + else: + if optimizer_state["stage"] is OptState.READY: + self._check_inf_per_device(optimizer) + scaler = self._get_scale_async() + assert scaler is not None + found_inf = cast( + torch.Tensor, + sum( + [ + t.to(scaler.device, non_blocking=True) + for t in optimizer_state["found_inf_per_device"].values() + ] + ), + ) + optimizer.grad_scale = ( # type: ignore[attr-defined] + None if optimizer_state["stage"] == OptState.UNSCALED else scaler + ) + optimizer.found_inf = found_inf # type: ignore[attr-defined] + retval = optimizer.step(*args, **kwargs_) + optimizer_state["stage"] = OptState.STEPPED + if not has_grad_scaler_kwarg: + del optimizer.grad_scale # type: ignore[attr-defined] + del optimizer.found_inf # type: ignore[attr-defined] + return retval + + if optimizer_state["stage"] is OptState.READY: + self.unscale_(optimizer) + + assert ( + len(optimizer_state["found_inf_per_device"]) > 0 + ), "No inf checks were recorded for this optimizer." + + retval = self._maybe_opt_step(optimizer, optimizer_state, *args, **kwargs) + + optimizer_state["stage"] = OptState.STEPPED + + return retval + + def update(self, new_scale: Optional[Union[float, torch.Tensor]] = None) -> None: + """Update the scale factor. + + If any optimizer steps were skipped the scale is multiplied by ``backoff_factor`` + to reduce it. If ``growth_interval`` unskipped iterations occurred consecutively, + the scale is multiplied by ``growth_factor`` to increase it. + + Passing ``new_scale`` sets the new scale value manually. (``new_scale`` is not + used directly, it's used to fill GradScaler's internal scale tensor. So if + ``new_scale`` was a tensor, later in-place changes to that tensor will not further + affect the scale GradScaler uses internally.) + + Args: + new_scale (float or :class:`torch.Tensor`, optional, default=None): New scale factor. + + .. warning:: + :meth:`update` should only be called at the end of the iteration, after ``scaler.step(optimizer)`` has + been invoked for all optimizers used this iteration. + + .. warning:: + For performance reasons, we do not check the scale factor value to avoid synchronizations, + so the scale factor is not guaranteed to be above 1. If the scale falls below 1 and/or + you are seeing NaNs in your gradients or loss, something is likely wrong. For example, + bf16-pretrained models are often incompatible with AMP/fp16 due to differing dynamic ranges. + """ + if not self._enabled: + return + + _scale, _growth_tracker = self._check_scale_growth_tracker("update") + + if new_scale is not None: + assert self._scale is not None + # Accept a new user-defined scale. + if isinstance(new_scale, float): + self._scale.fill_(new_scale) + else: + reason = "new_scale should be a float or a 1-element torch.cuda.FloatTensor or \ + torch.FloatTensor with requires_grad=False." + assert new_scale.device.type == self._device, reason + assert new_scale.numel() == 1, reason + assert new_scale.requires_grad is False, reason + self._scale.copy_(new_scale) + else: + # Consume shared inf/nan data collected from optimizers to update the scale. + # If all found_inf tensors are on the same device as self._scale, this operation is asynchronous. + found_infs = [ + found_inf.to(device=_scale.device, non_blocking=True) + for state in self._per_optimizer_states.values() + for found_inf in state["found_inf_per_device"].values() + ] + + assert len(found_infs) > 0, "No inf checks were recorded prior to update." + + found_inf_combined = found_infs[0] + if len(found_infs) > 1: + for i in range(1, len(found_infs)): + found_inf_combined += found_infs[i] + + torch._amp_update_scale_( + _scale, + _growth_tracker, + found_inf_combined, + self._growth_factor, + self._backoff_factor, + self._growth_interval, + ) + + # To prepare for next iteration, clear the data collected from optimizers this iteration. + self._per_optimizer_states = defaultdict(_refresh_per_optimizer_state) + + def _get_scale_async(self) -> Optional[torch.Tensor]: + return self._scale + + def get_scale(self) -> float: + """Return a Python float containing the current scale, or 1.0 if scaling is disabled. + + .. warning:: + :meth:`get_scale` incurs a CPU-GPU sync. + """ + if self._enabled: + return ( + self._init_scale + if (scale := self._get_scale_async()) is None + else cast(float, scale.item()) + ) + return 1.0 + + def get_growth_factor(self) -> float: + r"""Return a Python float containing the scale growth factor.""" + return self._growth_factor + + def set_growth_factor(self, new_factor: float) -> None: + r"""Set a new scale growth factor. + + Args: + new_scale (float): Value to use as the new scale growth factor. + """ + self._growth_factor = new_factor + + def get_backoff_factor(self) -> float: + r"""Return a Python float containing the scale backoff factor.""" + return self._backoff_factor + + def set_backoff_factor(self, new_factor: float) -> None: + r"""Set a new scale backoff factor. + + Args: + new_scale (float): Value to use as the new scale backoff factor. + """ + self._backoff_factor = new_factor + + def get_growth_interval(self) -> int: + r"""Return a Python int containing the growth interval.""" + return self._growth_interval + + def set_growth_interval(self, new_interval: int) -> None: + r"""Set a new growth interval. + + Args: + new_interval (int): Value to use as the new growth interval. + """ + self._growth_interval = new_interval + + def _get_growth_tracker(self) -> int: + if self._enabled: + return ( + self._init_growth_tracker + if self._growth_tracker is None + else cast(int, self._growth_tracker.item()) + ) + return 0 + + def is_enabled(self) -> bool: + r"""Return a bool indicating whether this instance is enabled.""" + return self._enabled + + def state_dict(self) -> Dict[str, Any]: + r"""Return the state of the scaler as a :class:`dict`. + + It contains five entries: + + * ``"scale"`` - a Python float containing the current scale + * ``"growth_factor"`` - a Python float containing the current growth factor + * ``"backoff_factor"`` - a Python float containing the current backoff factor + * ``"growth_interval"`` - a Python int containing the current growth interval + * ``"_growth_tracker"`` - a Python int containing the number of recent consecutive unskipped steps. + + If this instance is not enabled, returns an empty dict. + + .. note:: + If you wish to checkpoint the scaler's state after a particular iteration, :meth:`state_dict` + should be called after :meth:`update`. + """ + if self._enabled: + return { + "scale": self.get_scale(), + "growth_factor": self._growth_factor, + "backoff_factor": self._backoff_factor, + "growth_interval": self._growth_interval, + "_growth_tracker": self._get_growth_tracker(), + } + return {} + + def load_state_dict(self, state_dict: Dict[str, Any]) -> None: + r"""Load the scaler state. + + If this instance is disabled, :meth:`load_state_dict` is a no-op. + + Args: + state_dict(dict): scaler state. Should be an object returned from a call to :meth:`state_dict`. + """ + if not self._enabled: + return + + if len(state_dict) == 0: + raise RuntimeError( + "The source state dict is empty, possibly because it was saved " + "from a disabled instance of GradScaler." + ) + + self._init_scale = cast(float, state_dict["scale"]) + if self._scale is not None: + self._scale.fill_(state_dict["scale"]) + self._growth_factor = cast(float, state_dict["growth_factor"]) + self._backoff_factor = cast(float, state_dict["backoff_factor"]) + self._growth_interval = cast(int, state_dict["growth_interval"]) + self._init_growth_tracker = cast(int, state_dict["_growth_tracker"]) + if self._growth_tracker is not None: + self._growth_tracker.fill_(state_dict["_growth_tracker"]) + + def __getstate__(self) -> Dict[str, Any]: + state = self.__dict__.copy() + if self._enabled: + assert len(self._per_optimizer_states) == 0, ( + "A GradScaler instance may only be pickled at the beginning " + "of an iteration, or at the end after scaler.update()." + ) + # Pickling _scale and _growth_tracker Tensors directly triggers + # "warnings.warn("pickle support for Storage will be removed in 1.5..." + # so instead, we set the unpickled instance up to reinitialize them lazily. + state["_init_scale"] = self.get_scale() + state["_init_growth_tracker"] = self._get_growth_tracker() + state["_scale"] = None + state["_growth_tracker"] = None + return state + + def __setstate__(self, state: Dict[str, Any]) -> None: + self.__dict__.update(state) + + def _check_inf_per_device(self, optimizer: torch.optim.Optimizer) -> Dict[str, Any]: + _scale, _ = self._check_scale_growth_tracker("_check_inf_per_device") + + dummy_inv_scale = torch.full((), 1.0, dtype=torch.float32, device=_scale.device) + found_inf = torch.full((), 0.0, dtype=torch.float32, device=_scale.device) + + self._per_optimizer_states[id(optimizer)][ + "found_inf_per_device" + ] = self._unscale_grads_(optimizer, dummy_inv_scale, found_inf, True) + + return self._per_optimizer_states[id(optimizer)]["found_inf_per_device"] + + def _found_inf_per_device(self, optimizer: torch.optim.Optimizer) -> Dict[str, Any]: + return self._per_optimizer_states[id(optimizer)]["found_inf_per_device"] diff --git a/venv/lib/python3.10/site-packages/torch/backends/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/backends/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7165989295b4f2d912e4a34f17cc3443aed26ba0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/backends/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/backends/_coreml/__init__.py b/venv/lib/python3.10/site-packages/torch/backends/_coreml/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/torch/backends/_coreml/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/backends/_coreml/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b2e25592bdae6ac4fa8c8b50e67fcfd597e93ddc Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/backends/_coreml/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/backends/_coreml/__pycache__/preprocess.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/backends/_coreml/__pycache__/preprocess.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..078a87ee1cd7e16a0f00fad574bd98ca49a0f10f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/backends/_coreml/__pycache__/preprocess.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/backends/_coreml/preprocess.py b/venv/lib/python3.10/site-packages/torch/backends/_coreml/preprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..f393929bb7c2bbca028d43d8bf1ef27af7ccb23f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/backends/_coreml/preprocess.py @@ -0,0 +1,146 @@ +import hashlib +import json +from typing import Dict, Tuple + +import coremltools as ct # type: ignore[import] +from coremltools.converters.mil.input_types import TensorType # type: ignore[import] +from coremltools.converters.mil.mil import types # type: ignore[import] +from coremltools.models.neural_network import quantization_utils # type: ignore[import] + +import torch + +CT_METADATA_VERSION = "com.github.apple.coremltools.version" +CT_METADATA_SOURCE = "com.github.apple.coremltools.source" + + +class ScalarType: + Float = 0 + Double = 1 + Int = 2 + Long = 3 + Undefined = 4 + + +# Supported Tensor types in coremltools: +# https://github.com/apple/coremltools/blob/main/coremltools/converters/mil/frontend/torch/converter.py#L28 +torch_to_mil_types = { + ScalarType.Float: types.fp32, + ScalarType.Double: types.fp64, + ScalarType.Int: types.int32, + ScalarType.Long: types.int64, +} + + +class CoreMLComputeUnit: + CPU = "cpuOnly" + CPUAndGPU = "cpuAndGPU" + ALL = "all" + + +class CoreMLQuantizationMode: + LINEAR = "linear" + LINEAR_SYMMETRIC = "linear_symmetric" + NONE = "none" + + +def TensorSpec(shape, dtype=ScalarType.Float): + return (shape, dtype) + + +def CompileSpec( + inputs, + outputs, + backend=CoreMLComputeUnit.CPU, + allow_low_precision=True, + quantization_mode=CoreMLQuantizationMode.NONE, + mlmodel_export_path=None, +): + return ( + inputs, + outputs, + backend, + allow_low_precision, + quantization_mode, + mlmodel_export_path, + ) + + +def _check_enumerated_shape(shape): + for s in shape: + if not isinstance(s, (list, tuple)): + return False + return True + + +def _convert_to_mil_type(shape, dtype, name: str): + mil_shape = shape + if _check_enumerated_shape(shape): + mil_shape = ct.EnumeratedShapes(shape) + ml_type = TensorType(shape=mil_shape, dtype=torch_to_mil_types[dtype]) + ml_type.name = name + return ml_type + + +def preprocess(script_module: torch._C.ScriptObject, compile_spec: Dict[str, Tuple]): + spec = compile_spec["forward"] + ( + input_specs, + output_specs, + backend, + allow_low_precision, + quantization_mode, + mlmodel_export_path, + ) = spec + mil_inputs = [] + inputs = [] + for index, input in enumerate(input_specs): + shape, dtype = input + name = "input_" + str(index) + inputs.append([name, str(dtype), str(shape)]) + ml_type = _convert_to_mil_type(shape, dtype, name) + mil_inputs.append(ml_type) + model = torch.jit.RecursiveScriptModule._construct(script_module, lambda x: None) + mlmodel = ct.convert(model, inputs=mil_inputs) + + if quantization_mode != CoreMLQuantizationMode.NONE: + quant_model_spec = quantization_utils.quantize_weights( + mlmodel, nbits=8, quantization_mode=quantization_mode + ) + mlmodel = ct.models.MLModel(quant_model_spec) + + spec = mlmodel.get_spec() + assert len(spec.description.output) == len(output_specs) # type: ignore[attr-defined] + outputs = [] + for index, output in enumerate(output_specs): + shape, dtype = output + name = spec.description.output[index].name # type: ignore[attr-defined] + outputs.append([name, str(dtype), str(shape)]) + mlmodel = ct.models.model.MLModel(spec) + print(mlmodel) + + if mlmodel_export_path is not None: + print(f"Saving CoreML .mlmodel file to {mlmodel_export_path}") + mlmodel.save(mlmodel_export_path) + + config = { + "spec_ver": str(spec.specificationVersion), # type: ignore[attr-defined] + "backend": backend, + "allow_low_precision": str(allow_low_precision), + } + metadata = { + "coremltool_ver": mlmodel.user_defined_metadata[CT_METADATA_VERSION], + "torch_ver": mlmodel.user_defined_metadata[CT_METADATA_SOURCE], + } + coreml_compile_spec = { + "inputs": inputs, + "outputs": outputs, + "config": config, + "metadata": metadata, + } + mlmodel = spec.SerializeToString() # type: ignore[attr-defined] + + return { + "model": mlmodel, + "hash": str(hashlib.sha256(mlmodel).hexdigest()), + "extra": json.dumps(coreml_compile_spec), + } diff --git a/venv/lib/python3.10/site-packages/torch/backends/_nnapi/__init__.py b/venv/lib/python3.10/site-packages/torch/backends/_nnapi/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/torch/backends/_nnapi/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/backends/_nnapi/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61c8ec0f4e71807a69c060fd4440e71156028078 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/backends/_nnapi/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/backends/_nnapi/__pycache__/prepare.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/backends/_nnapi/__pycache__/prepare.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d9e41be4803edc3b785d1885e83942cabc180688 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/backends/_nnapi/__pycache__/prepare.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/backends/_nnapi/__pycache__/serializer.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/backends/_nnapi/__pycache__/serializer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1659fb1a4524e3596b9bd723dbc5642b51ba594d Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/backends/_nnapi/__pycache__/serializer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/backends/_nnapi/prepare.py b/venv/lib/python3.10/site-packages/torch/backends/_nnapi/prepare.py new file mode 100644 index 0000000000000000000000000000000000000000..d5d69b9d648b3280c96becb53e8d6af90c9a57e2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/backends/_nnapi/prepare.py @@ -0,0 +1,198 @@ +from typing import List, Optional + +import torch +from torch.backends._nnapi.serializer import _NnapiSerializer + +ANEURALNETWORKS_PREFER_LOW_POWER = 0 +ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER = 1 +ANEURALNETWORKS_PREFER_SUSTAINED_SPEED = 2 + + +class NnapiModule(torch.nn.Module): + """Torch Module that wraps an NNAPI Compilation. + + This module handles preparing the weights, initializing the + NNAPI TorchBind object, and adjusting the memory formats + of all inputs and outputs. + """ + + # _nnapi.Compilation is defined + comp: Optional[torch.classes._nnapi.Compilation] # type: ignore[name-defined] + weights: List[torch.Tensor] + out_templates: List[torch.Tensor] + + def __init__( + self, + shape_compute_module: torch.nn.Module, + ser_model: torch.Tensor, + weights: List[torch.Tensor], + inp_mem_fmts: List[int], + out_mem_fmts: List[int], + compilation_preference: int, + relax_f32_to_f16: bool, + ): + super().__init__() + self.shape_compute_module = shape_compute_module + self.ser_model = ser_model + self.weights = weights + self.inp_mem_fmts = inp_mem_fmts + self.out_mem_fmts = out_mem_fmts + self.out_templates = [] + self.comp = None + self.compilation_preference = compilation_preference + self.relax_f32_to_f16 = relax_f32_to_f16 + + @torch.jit.export + def init(self, args: List[torch.Tensor]): + assert self.comp is None + self.out_templates = self.shape_compute_module.prepare(self.ser_model, args) # type: ignore[operator] + self.weights = [w.contiguous() for w in self.weights] + comp = torch.classes._nnapi.Compilation() + comp.init2( + self.ser_model, + self.weights, + self.compilation_preference, + self.relax_f32_to_f16, + ) + + self.comp = comp + + def forward(self, args: List[torch.Tensor]) -> List[torch.Tensor]: + if self.comp is None: + self.init(args) + comp = self.comp + assert comp is not None + outs = [torch.empty_like(out) for out in self.out_templates] + + assert len(args) == len(self.inp_mem_fmts) + fixed_args = [] + for idx in range(len(args)): + fmt = self.inp_mem_fmts[idx] + # These constants match the values in DimOrder in serializer.py + # TODO: See if it's possible to use those directly. + if fmt == 0: + fixed_args.append(args[idx].contiguous()) + elif fmt == 1: + fixed_args.append(args[idx].permute(0, 2, 3, 1).contiguous()) + else: + raise Exception("Invalid mem_fmt") + comp.run(fixed_args, outs) + assert len(outs) == len(self.out_mem_fmts) + for idx in range(len(self.out_templates)): + fmt = self.out_mem_fmts[idx] + # These constants match the values in DimOrder in serializer.py + # TODO: See if it's possible to use those directly. + if fmt in (0, 2): + pass + elif fmt == 1: + outs[idx] = outs[idx].permute(0, 3, 1, 2) + else: + raise Exception("Invalid mem_fmt") + return outs + + +def convert_model_to_nnapi( + model, + inputs, + serializer=None, + return_shapes=None, + use_int16_for_qint16=False, + compilation_preference=ANEURALNETWORKS_PREFER_SUSTAINED_SPEED, + relax_f32_to_f16=False, +): + ( + shape_compute_module, + ser_model_tensor, + used_weights, + inp_mem_fmts, + out_mem_fmts, + retval_count, + ) = process_for_nnapi( + model, inputs, serializer, return_shapes, use_int16_for_qint16 + ) + + nnapi_model = NnapiModule( + shape_compute_module, + ser_model_tensor, + used_weights, + inp_mem_fmts, + out_mem_fmts, + compilation_preference, + relax_f32_to_f16, + ) + + class NnapiInterfaceWrapper(torch.nn.Module): + """NNAPI list-ifying and de-list-ifying wrapper. + + NNAPI always expects a list of inputs and provides a list of outputs. + This module allows us to accept inputs as separate arguments. + It returns results as either a single tensor or tuple, + matching the original module. + """ + + def __init__(self, mod): + super().__init__() + self.mod = mod + + wrapper_model_py = NnapiInterfaceWrapper(nnapi_model) + wrapper_model = torch.jit.script(wrapper_model_py) + # TODO: Maybe make these names match the original. + arg_list = ", ".join(f"arg_{idx}" for idx in range(len(inputs))) + if retval_count < 0: + ret_expr = "retvals[0]" + else: + ret_expr = "".join(f"retvals[{idx}], " for idx in range(retval_count)) + wrapper_model.define( + f"def forward(self, {arg_list}):\n" + f" retvals = self.mod([{arg_list}])\n" + f" return {ret_expr}\n" + ) + return wrapper_model + + +def process_for_nnapi( + model, inputs, serializer=None, return_shapes=None, use_int16_for_qint16=False +): + model = torch.jit.freeze(model) + + if isinstance(inputs, torch.Tensor): + inputs = [inputs] + + serializer = serializer or _NnapiSerializer( + config=None, use_int16_for_qint16=use_int16_for_qint16 + ) + ( + ser_model, + used_weights, + inp_mem_fmts, + out_mem_fmts, + shape_compute_lines, + retval_count, + ) = serializer.serialize_model(model, inputs, return_shapes) + ser_model_tensor = torch.tensor(ser_model, dtype=torch.int32) + + # We have to create a new class here every time this function is called + # because module.define adds a method to the *class*, not the instance. + class ShapeComputeModule(torch.nn.Module): + """Code-gen-ed module for tensor shape computation. + + module.prepare will mutate ser_model according to the computed operand + shapes, based on the shapes of args. Returns a list of output templates. + """ + + pass + + shape_compute_module = torch.jit.script(ShapeComputeModule()) + real_shape_compute_lines = [ + "def prepare(self, ser_model: torch.Tensor, args: List[torch.Tensor]) -> List[torch.Tensor]:\n", + ] + [f" {line}\n" for line in shape_compute_lines] + shape_compute_module.define("".join(real_shape_compute_lines)) + + return ( + shape_compute_module, + ser_model_tensor, + used_weights, + inp_mem_fmts, + out_mem_fmts, + retval_count, + ) diff --git a/venv/lib/python3.10/site-packages/torch/backends/_nnapi/serializer.py b/venv/lib/python3.10/site-packages/torch/backends/_nnapi/serializer.py new file mode 100644 index 0000000000000000000000000000000000000000..960f4091723df384f0b778df59604d1921992eb3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/backends/_nnapi/serializer.py @@ -0,0 +1,2188 @@ +import array +import enum +import functools +import logging +import operator +import struct +import sys +from typing import List, NamedTuple, Optional, Tuple + +import torch + + +# TODO: Add type annotations +# TODO: Check tensor types for ops + + +LOG = logging.getLogger("nnapi_serialize") + + +class NNAPI_OperandCode: + FLOAT32 = 0 + INT32 = 1 + UINT32 = 2 + TENSOR_FLOAT32 = 3 + TENSOR_INT32 = 4 + TENSOR_QUANT8_ASYMM = 5 + BOOL = 6 + TENSOR_QUANT16_SYMM = 7 + TENSOR_FLOAT16 = 8 + TENSOR_BOOL8 = 9 + FLOAT16 = 10 + TENSOR_QUANT8_SYMM_PER_CHANNEL = 11 + TENSOR_QUANT16_ASYMM = 12 + + +class NNAPI_OperationCode: + ADD = 0 + AVERAGE_POOL_2D = 1 + CONCATENATION = 2 + CONV_2D = 3 + DEPTHWISE_CONV_2D = 4 + DEPTH_TO_SPACE = 5 + DEQUANTIZE = 6 + EMBEDDING_LOOKUP = 7 + FLOOR = 8 + FULLY_CONNECTED = 9 + HASHTABLE_LOOKUP = 10 + L2_NORMALIZATION = 11 + L2_POOL_2D = 12 + LOCAL_RESPONSE_NORMALIZATION = 13 + LOGISTIC = 14 + LSH_PROJECTION = 15 + LSTM = 16 + MAX_POOL_2D = 17 + MUL = 18 + RELU = 19 + RELU1 = 20 + RELU6 = 21 + RESHAPE = 22 + RESIZE_BILINEAR = 23 + RNN = 24 + SOFTMAX = 25 + SPACE_TO_DEPTH = 26 + SVDF = 27 + TANH = 28 + BATCH_TO_SPACE_ND = 29 + DIV = 30 + MEAN = 31 + PAD = 32 + SPACE_TO_BATCH_ND = 33 + SQUEEZE = 34 + STRIDED_SLICE = 35 + SUB = 36 + TRANSPOSE = 37 + ABS = 38 + ARGMAX = 39 + ARGMIN = 40 + AXIS_ALIGNED_BBOX_TRANSFORM = 41 + BIDIRECTIONAL_SEQUENCE_LSTM = 42 + BIDIRECTIONAL_SEQUENCE_RNN = 43 + BOX_WITH_NMS_LIMIT = 44 + CAST = 45 + CHANNEL_SHUFFLE = 46 + DETECTION_POSTPROCESSING = 47 + EQUAL = 48 + EXP = 49 + EXPAND_DIMS = 50 + GATHER = 51 + GENERATE_PROPOSALS = 52 + GREATER = 53 + GREATER_EQUAL = 54 + GROUPED_CONV_2D = 55 + HEATMAP_MAX_KEYPOINT = 56 + INSTANCE_NORMALIZATION = 57 + LESS = 58 + LESS_EQUAL = 59 + LOG = 60 + LOGICAL_AND = 61 + LOGICAL_NOT = 62 + LOGICAL_OR = 63 + LOG_SOFTMAX = 64 + MAXIMUM = 65 + MINIMUM = 66 + NEG = 67 + NOT_EQUAL = 68 + PAD_V2 = 69 + POW = 70 + PRELU = 71 + QUANTIZE = 72 + QUANTIZED_16BIT_LSTM = 73 + RANDOM_MULTINOMIAL = 74 + REDUCE_ALL = 75 + REDUCE_ANY = 76 + REDUCE_MAX = 77 + REDUCE_MIN = 78 + REDUCE_PROD = 79 + REDUCE_SUM = 80 + ROI_ALIGN = 81 + ROI_POOLING = 82 + RSQRT = 83 + SELECT = 84 + SIN = 85 + SLICE = 86 + SPLIT = 87 + SQRT = 88 + TILE = 89 + TOPK_V2 = 90 + TRANSPOSE_CONV_2D = 91 + UNIDIRECTIONAL_SEQUENCE_LSTM = 92 + UNIDIRECTIONAL_SEQUENCE_RNN = 93 + RESIZE_NEAREST_NEIGHBOR = 94 + + +class NNAPI_FuseCode: + FUSED_NONE = 0 + FUSED_RELU = 1 + FUSED_RELU1 = 2 + FUSED_RELU6 = 3 + + +class OperandValueSourceType: + IMMEDIATE = 0 + NUMBERED_BUFFER = 2 + NUMBERED_MEMORY = 3 + + +# Scalar types that appear explicitly in models. +# These must be kept in sync with +# AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS. +# TODO: Expose these directly to Python to avoid maintaining this list. +class TorchScalarTypes(enum.Enum): + QUINT8 = 13 + + +def approx_equal(lhs, rhs, tolerance=1e-6): + return abs(lhs - rhs) <= tolerance * min(lhs, rhs) + + +def tensor_size(op_type, dims): + ITEM_SIZES = { + NNAPI_OperandCode.TENSOR_FLOAT32: 4, + NNAPI_OperandCode.TENSOR_INT32: 4, + NNAPI_OperandCode.TENSOR_QUANT8_ASYMM: 1, + NNAPI_OperandCode.TENSOR_QUANT16_SYMM: 2, + NNAPI_OperandCode.TENSOR_QUANT16_ASYMM: 2, + } + size = ITEM_SIZES[op_type] + for d in dims: + size *= d + return size + + +def change_element(tup, index, value): + ls = list(tup) + ls[index] = value + return tuple(ls) + + +class ConvPoolArgs2d(NamedTuple): + """Configuration arguments for a convolution.""" + + kernel_h: int + kernel_w: int + stride_h: int + stride_w: int + pad_t: int + pad_b: int + pad_l: int + pad_r: int + dilation_h: int + dilation_w: int + group: int + + +class DimOrder(enum.Enum): + PRESUMED_CONTIGUOUS = 0 + CHANNELS_LAST = 1 + SCALAR_OR_VECTOR = 2 + UNKNOWN_CONSTANT = 999 + + +class Operand(NamedTuple): + """Represenation of an NNAPI operand.""" + + # NNAPI operand type. One of NNAPI_OperandCode. + # TODO: Make this an enum. + op_type: int + + # This is always the PyTorch shape, which is NCHW for feature maps. + # The actual NNAPI operand might have a transposed shape. + # we use 0 for load time dynamic shapes & -1 for runtime dynamic shapes + shape: Tuple[int, ...] + + # Specifies how the shape of the operand that we define in NNAPI + # relates to the shape we track above. + # - PRESUMED_CONTIGUOUS: physical NNAPI operand will exactly match + # the shape of the PyTorch tensor. + # - CHANNELS_LAST: The PyTorch tensor is expected to be NCHW, and + # the NNAPI operand will be represented explicitly as NHWC. + dim_order: DimOrder + + # Quantization params + scale: float + zero_point: int + + def use_nchw(self): + if self.dim_order is DimOrder.PRESUMED_CONTIGUOUS: + return True + if self.dim_order is DimOrder.CHANNELS_LAST: + return False + raise Exception("Unknown dim order") + + +def broadcast_shapes(shape1, shape2): + assert len(shape1) > 0 + assert len(shape2) > 0 + s1 = list(shape1) + s2 = list(shape2) + # TODO: Support non-equal-rank broadcast where semantics match. + # This can be tricky for NHWC tensors because dimension orders + # don't match between PT and NNAPI, even though semantics match. + if len(s1) > len(s2): + # s2 = [1] * (len(s1) - len(s2)) + s2 + raise Exception("Non-equal-rank broadcast is not supported yet.") + if len(s2) > len(s1): + # s3 = [1] * (len(s2) - len(s1)) + s1 + raise Exception("Non-equal-rank broadcast is not supported yet.") + ret = [] + for d1, d2 in zip(s1, s2): + if d1 == 1: + ret.append(d2) + elif d2 == 1: + ret.append(d1) + elif d1 == d2: + ret.append(d1) + else: + raise Exception(f"Cannot broadcast shapes: {shape1} and {shape2}") + return tuple(ret) + + +def get_conv_pool_shape(image_shape, args, out_ch, transpose): + batch, in_c, in_h, in_w = image_shape + + # TODO: Handle dilation + if args.dilation_h != 1 or args.dilation_w != 1: + raise Exception("Dilation not supported yet.") + + if transpose: + out_h = (in_h - 1) * args.stride_h + args.kernel_h - args.pad_t - args.pad_b + out_w = (in_w - 1) * args.stride_w + args.kernel_w - args.pad_l - args.pad_l + else: + out_h = (in_h - args.kernel_h + args.pad_t + args.pad_b) // args.stride_h + 1 + out_w = (in_w - args.kernel_w + args.pad_l + args.pad_r) // args.stride_w + 1 + + # Handle variable-sized tensors. + if in_h == 0: + out_h = 0 + if in_w == 0: + out_w = 0 + + out_shape = (batch, out_ch, out_h, out_w) + return out_shape + + +def fix_shape(shape, dim_order): + # Return the actual shape that an operand should have in NNAPI, + # given a PyTorch shape and dimension order. This is where we + # convert from PyTorch's "always NCHW" shape to explicit NHWC. + if dim_order is DimOrder.PRESUMED_CONTIGUOUS: + return shape + if dim_order is DimOrder.CHANNELS_LAST: + return tuple([shape[0]] + list(shape[2:]) + [shape[1]]) + if dim_order is DimOrder.SCALAR_OR_VECTOR: + assert len(shape) == 0 or len(shape) == 1 + return shape + if dim_order is DimOrder.UNKNOWN_CONSTANT: + # XXX think this through + return shape + raise Exception(f"Bad dim_order: {dim_order!r}.") + + +def reverse_map_dim(dim_order, d): + # Return the original PyTorch dimension position for a given dimension. + # d should be the dimension that NNAPI will see. + # reverse_map_dim(PRESUMED_CONTIGUOUS, x) == x + # reverse_map_dim(CHANNELS_LAST, 3) == 1 + if dim_order in (DimOrder.PRESUMED_CONTIGUOUS, DimOrder.SCALAR_OR_VECTOR): + return d + assert dim_order is DimOrder.CHANNELS_LAST + return [0, 2, 3, 1][d] + + +def flex_name(op_id, dim): + # Return the local variable name for the computed flexible size + # for a given op and dimension. + return f"s_{op_id}_{dim}" + + +class _NnapiSerializer: + def __init__(self, config, use_int16_for_qint16=False): + self.operands = [] + self.values = [] + self.operations = [] + self.value_data = [] + self.operation_args = [] + self.inputs = [] + self.outputs = [] + self.flexible_shape_computation_lines = [] + + self.modules = {} + self.constants = {} + self.tensor_sequences = {} + self.jitval_operand_map = {} + self.cached_immediates = {} + self.used_weights = [] + self.weight_offset = 0 + self.use_int16_for_qint16 = use_int16_for_qint16 + + if config is None: + config = {} + + def get_next_operand_id(self): + return len(self.operands) + + # Add a tensor operand corresponding to a JIT Value. + # Returns the NNAPI operand ID. Can be looked up later with + # get_tensor_operand_by_jitval. + def add_tensor_operand(self, jitval, oper): + assert isinstance(oper, Operand) + if jitval in self.jitval_operand_map: + raise Exception(f"Duplicate tensor: {jitval!r}") + + operand_id = self.get_next_operand_id() + self.operands.append(oper) + self.jitval_operand_map[jitval] = operand_id + return operand_id + + # Add a tensor operand that does not correspond to a JIT Value. + # Useful for cases where multiple NNAPI operands are required + # to implement one JIT IR node. Returns the NNAPI operand ID. + def add_anonymous_tensor_operand(self, oper): + assert isinstance(oper, Operand) + operand_id = self.get_next_operand_id() + self.operands.append(oper) + return operand_id + + def torch_tensor_to_operand(self, tensor, dim_order): + dtype = str(tensor.dtype).replace("torch.", "") + scale = 0.0 + zero_point = 0 + if dtype == "float32": + op_type = NNAPI_OperandCode.TENSOR_FLOAT32 + elif dtype == "int32": + op_type = NNAPI_OperandCode.TENSOR_INT32 + elif dtype == "quint8": + op_type = NNAPI_OperandCode.TENSOR_QUANT8_ASYMM + scale = tensor.q_scale() + zero_point = tensor.q_zero_point() + elif dtype == "qint32": + op_type = NNAPI_OperandCode.TENSOR_INT32 + scale = tensor.q_scale() + zero_point = tensor.q_zero_point() + assert zero_point == 0 + elif dtype == "int16": + if self.use_int16_for_qint16: + nnapi_dtype = getattr(tensor, "nnapi_dtype", None) + op_codes = ( + NNAPI_OperandCode.TENSOR_QUANT16_SYMM, + NNAPI_OperandCode.TENSOR_QUANT16_ASYMM, + ) + if nnapi_dtype in op_codes: + op_type = nnapi_dtype + scale = tensor.nnapi_scale + zero_point = tensor.nnapi_zero_point + else: + raise Exception( + f"`nnapi_type` needs to be one of {op_codes} for `int16`" + ) + else: + raise Exception( + "`int16` isn't supported. If you're trying to represent NNAPI" + " qint16 with Pytorch int16, set `use_int16_for_qint16 = True`" + ) + else: + raise Exception(f"Can't handle input with dtype '{tensor.dtype}'") + return Operand( + shape=tuple(tensor.shape), + op_type=op_type, + dim_order=dim_order, + scale=scale, + zero_point=zero_point, + ) + + def add_tensor_operand_for_input(self, arg_idx, jitval, tensor): + dim_order = ( + DimOrder.CHANNELS_LAST + if getattr(tensor, "nnapi_nhwc", False) + else DimOrder.PRESUMED_CONTIGUOUS + ) + toper = self.torch_tensor_to_operand(tensor, dim_order) + operand_id = self.add_tensor_operand(jitval, toper) + self.inputs.append(operand_id) + for dim, size in enumerate(tensor.shape): + if size == 0: + self.compute_operand_shape( + operand_id, dim, f"args[{arg_idx}].shape[{dim}]" + ) + return operand_id + + def add_tensor_operand_for_weight( + self, tensor, dim_order=DimOrder.UNKNOWN_CONSTANT + ): + toper = self.torch_tensor_to_operand(tensor, dim_order) + operand_id = len(self.operands) + self.operands.append(toper) + tsize = tensor_size(toper.op_type, toper.shape) + psize = ((tsize - 1) | 0x3) + 1 + self.values.append((operand_id, OperandValueSourceType.NUMBERED_BUFFER)) + buf_num = len(self.used_weights) + offset = 0 + self.value_data.append(struct.pack("iii", buf_num, offset, tsize)) + # For NHWC NNAPI op, lay out data in the same dim order by permuting torch tensor + if dim_order == DimOrder.CHANNELS_LAST: + tensor = tensor.permute(0, 2, 3, 1) + self.used_weights.append(tensor) + return operand_id + + def add_immediate_operand(self, code, value, dims): + assert isinstance(dims, tuple) + cache_key = (code, value) + if cache_key not in self.cached_immediates: + operand_id = len(self.operands) + self.operands.append(Operand(code, dims, DimOrder.SCALAR_OR_VECTOR, 0.0, 0)) + self.values.append((operand_id, OperandValueSourceType.IMMEDIATE)) + self.value_data.append(value) + self.cached_immediates[cache_key] = operand_id + return self.cached_immediates[cache_key] + + def add_immediate_int_scalar(self, value): + return self.add_immediate_operand( + NNAPI_OperandCode.INT32, struct.pack("i", value), () + ) + + def add_immediate_float_scalar(self, value): + return self.add_immediate_operand( + NNAPI_OperandCode.FLOAT32, struct.pack("f", value), () + ) + + def add_immediate_bool_scalar(self, value): + return self.add_immediate_operand( + NNAPI_OperandCode.BOOL, b"\x01" if value else b"\x00", () + ) + + def add_immediate_int_vector(self, value): + return self.add_immediate_operand( + NNAPI_OperandCode.TENSOR_INT32, + array.array("i", value).tobytes(), + (len(value),), + ) + + def has_operand_for_jitval(self, jitval): + return jitval in self.jitval_operand_map + + def get_tensor_operand_by_jitval(self, jitval): + operand_id = self.jitval_operand_map[jitval] + return (operand_id, self.operands[operand_id]) + + def get_tensor_operand_by_jitval_fixed_size(self, jitval): + op_id, oper = self.get_tensor_operand_by_jitval(jitval) + for s in oper.shape: + if s == 0: + # TODO: Improve this error message, possibly after converting + # many callsites to support flexible size. + raise Exception("Flexible size is not supported for this operand.") + if s < 0: + # runtime flex + LOG.warning("Operand %s has runtime flex shape", oper) + return op_id, oper + + def get_tensor_operand_or_constant( + self, jitval, dim_order=DimOrder.PRESUMED_CONTIGUOUS + ): + operand_id = self.jitval_operand_map.get(jitval) + if operand_id is None: + _, value = self.get_constant_value(jitval, "TensorType") + operand_id = self.add_tensor_operand_for_weight(value, dim_order) + return (operand_id, self.operands[operand_id]) + + def get_tensor_operand_for_weight(self, jitval): + _, value = self.get_constant_value(jitval, "TensorType") + operand_id = self.add_tensor_operand_for_weight(value) + return (operand_id, self.operands[operand_id]) + + def add_operation(self, opcode, inputs, outputs): + self.operations.append((opcode, len(inputs), len(outputs))) + self.operation_args.extend(inputs + outputs) + + def add_tensor_sequence(self, jitval, values): + assert jitval not in self.tensor_sequences + self.tensor_sequences[jitval] = values + + def add_constant_value(self, jitval, ctype, value): + assert jitval not in self.constants + self.constants[jitval] = (ctype, value) + + def get_constant_value(self, jitval, typekind=None): + record = self.constants.get(jitval) + if record is None: + raise Exception(f"Could not find constant value for '{jitval!r}'.") + ctype, _ = record + if typekind is not None and ctype.kind() != typekind: + raise Exception( + f"Expected constant value of type {typekind}, but got {ctype.kind()} for value '{jitval!r}'" + ) + return record + + def operand_to_template_torchscript(self, op_id, oper, shape=None): + """Return a TorchScript expression to build a template for a given operand.""" + if shape is None: + shape = oper.shape + else: + assert len(shape) == len(oper.shape) + + shape_parts = ["("] + for d, s in enumerate(shape): + if s > 0: + # Fixed shape dimension: just add the value. + shape_parts.append(str(s)) + elif s == 0: + # Load time flexible shape dimension: it should have been computed in a variable. + shape_parts.append(flex_name(op_id, d)) + elif s == -1: + # Runtime flexible shape + shape_parts.append("0") + else: + raise Exception("Unknown dim value, dimensions should be >= -1") + shape_parts.append(",") + shape_parts.append(")") + shape_code = "".join(shape_parts) + if oper.op_type == NNAPI_OperandCode.TENSOR_FLOAT32: + return f"torch.zeros({shape_code}, dtype=torch.float32)" + elif oper.op_type == NNAPI_OperandCode.TENSOR_INT32: + return f"torch.zeros({shape_code}, dtype=torch.int32)" + elif oper.op_type == NNAPI_OperandCode.TENSOR_QUANT8_ASYMM: + return ( + f"torch.quantize_per_tensor(" + f"torch.zeros(1), scale={oper.scale}, zero_point={oper.zero_point}, dtype=torch.quint8)" + f".expand({shape_code}).contiguous()" + ) + elif oper.op_type in ( + NNAPI_OperandCode.TENSOR_QUANT16_ASYMM, + NNAPI_OperandCode.TENSOR_QUANT16_SYMM, + ): + if self.use_int16_for_qint16: + return f"torch.zeros({shape_code}, dtype=torch.int16)" + else: + raise Exception( + "`int16` isn't supported. If you're trying to represent NNAPI" + " qint16 with Pytorch int16, set `use_int16_for_qint16 = True`" + ) + + raise Exception(f"Unsupported output operand type: {oper.op_type}") + + def forward_operand_shape(self, out_op_id, out_dim, in_op_id, in_dim): + self.compute_operand_shape(out_op_id, out_dim, flex_name(in_op_id, in_dim)) + + def compute_operand_shape(self, op_id, dim, expr): + self.flexible_shape_computation_lines.append( + f"{flex_name(op_id, dim)} = {expr}" + ) + + def transpose_to_nhwc(self, in_id, oper): + if oper.shape[2:] != (1, 1): + raise Exception("Automatic transpose only supported for H,W == 1,1") + + out_oper = oper._replace(dim_order=DimOrder.CHANNELS_LAST) + + inputs = [None] * 2 + inputs[0] = in_id + inputs[1] = self.add_immediate_int_vector([0, 2, 3, 1]) + + outputs = [None] * 1 + outputs[0] = self.add_anonymous_tensor_operand(out_oper) + + self.add_operation(NNAPI_OperationCode.TRANSPOSE, inputs, outputs) + + return outputs[0], out_oper + + # Transpose inputs as necessary to allow broadcasting. + def transpose_for_broadcast(self, in0_id, in0_oper, in1_id, in1_oper): + if in0_oper.dim_order == in1_oper.dim_order: + return in0_id, in0_oper, in1_id, in1_oper + + # Assume NHWC is preferred if there is a mismatch. + orders = (in0_oper.dim_order, in1_oper.dim_order) + if orders == (DimOrder.PRESUMED_CONTIGUOUS, DimOrder.CHANNELS_LAST): + return self.transpose_to_nhwc(in0_id, in0_oper) + (in1_id, in1_oper) + if orders == (DimOrder.CHANNELS_LAST, DimOrder.PRESUMED_CONTIGUOUS): + return (in0_id, in0_oper) + self.transpose_to_nhwc(in1_id, in1_oper) + + raise Exception( + f"Automatic transpose not supported for dim_orders: {in0_oper.dim_order!r}, {in1_oper.dim_order!r}" + ) + + def get_size_arg(self, jitval): + ctype, value = self.get_constant_value(jitval) + if ctype.kind() == "ListType": + assert ctype.getElementType().kind() == "IntType" + return value + raise Exception(f"Can't handle size arg of type '{ctype!r}' for '{jitval!r}'") + + def get_conv_pool_args_2d_from_pack(self, kernel_size, packed_config): + pc = [i.item() for i in packed_config] + assert pc[0] == 2 + strides = [pc[1], pc[2]] + paddings = [pc[3], pc[4]] + dilations = [pc[5], pc[6]] + output_padding = [pc[7], pc[8]] + group_num = pc[9] + + assert len(pc) == 11 + assert output_padding == [0, 0] + + return self.get_conv_pool_args_2d_common( + kernel_size, strides, paddings, dilations, group_num + ) + + def get_conv_pool_args_2d_from_jit( + self, kernel_size, stride, padding, dilation=None, group=None + ): + strides = self.get_size_arg(stride) + paddings = self.get_size_arg(padding) + if dilation is None: + dilations = [1, 1] + else: + dilations = self.get_size_arg(dilation) + if group is not None: + _, group_num = self.get_constant_value(group, "IntType") + else: + group_num = None + return self.get_conv_pool_args_2d_common( + kernel_size, strides, paddings, dilations, group_num + ) + + def get_conv_pool_args_2d_common( + self, kernel_size, strides, paddings, dilations, group_num + ): + kernels = list(kernel_size) + + assert len(kernels) == 2 + assert len(strides) == 2 + assert len(paddings) == 2 + assert len(dilations) == 2 + + # NNAPI uses 4 values for padding. + ph, pw = paddings + real_paddings = [ph, ph, pw, pw] + + return ConvPoolArgs2d( + *(kernels + strides + real_paddings + dilations + [group_num]) + ) + + def serialize_model(self, model, inputs, return_shapes=None): + self.add_immediate_bool_scalar(False) + self.add_immediate_bool_scalar(True) + + inp_dim_orders = [] + out_dim_orders = [] + + self_jitval = next(model.graph.inputs()) + self.add_constant_value(self_jitval, self_jitval.type(), model) + + for arg_idx, (input_value, input_tensor) in enumerate( + zip(list(model.graph.inputs())[1:], inputs) + ): + op_id = self.add_tensor_operand_for_input( + arg_idx, input_value, input_tensor + ) + inp_dim_orders.append(self.operands[op_id].dim_order.value) + + for idx, node in enumerate(model.graph.nodes()): + LOG.debug("Processing node #%d: %r", idx, node) + self.add_node(node) + + retn = model.graph.return_node() + assert retn.inputsSize() == 1 + assert retn.outputsSize() == 0 + retn_input = retn.inputsAt(0) + template_return_lines = ["return ["] + if retn_input.type().kind() == "TensorType": + return_values = [retn_input] + retval_count = -1 + elif retn_input.type().kind() == "TupleType": + return_values = self.tensor_sequences[retn_input] + retval_count = len(return_values) + else: + raise Exception(f"Unsupported return type: {retn_input.type()}") + + if return_shapes is not None: + assert len(return_shapes) == len(return_values) + for i, v in enumerate(return_values): + op_id = self.jitval_operand_map[v] + self.outputs.append(op_id) + out_dim_orders.append(self.operands[op_id].dim_order.value) + shape = return_shapes[i] if return_shapes else None + template_return_lines.append( + self.operand_to_template_torchscript(op_id, self.operands[op_id], shape) + + "," + ) + template_return_lines.append("]") + + model = [] + + version = 1 + header = struct.pack( + "iiiiii", + version, + len(self.operands), + len(self.values), + len(self.operations), + len(self.inputs), + len(self.outputs), + ) + model.append(header) + + serialized_values, serialized_value_data = self.serialize_values() + + model.extend( + struct.pack("iifi", t, len(d), s, z) for (t, d, _m, s, z) in self.operands + ) + model.extend(serialized_values) + model.extend(struct.pack("iii", *x) for x in self.operations) + + # Compact the model so we can get its length so far. + model = [b"".join(model)] + model_offset = len(model[0]) + # Model offset is the index into the model (in 32-bit words, not bytes) + # of the next dimension we're about to serialize. If it's 0, + # generate code to mutate it before passing to NNAPI. + assert model_offset % 4 == 0 + model_offset = int(model_offset / 4) + + for op_id, (_, dims, dim_order, _, _) in enumerate(self.operands): + shape = fix_shape(dims, dim_order) + for d, s in enumerate(shape): + if s == 0: + pt_d = reverse_map_dim(dim_order, d) + self.flexible_shape_computation_lines.append( + f"ser_model[{model_offset}] = {flex_name(op_id, pt_d)}" + ) + model_offset += 1 + + # convert runtime flex shape from -1 to 0 + shape = tuple(d if d != -1 else 0 for d in shape) + model.append(self.serialize_ints(shape)) + + model.extend(serialized_value_data) + model.append(self.serialize_ints(self.operation_args)) + model.append(self.serialize_ints(self.inputs)) + model.append(self.serialize_ints(self.outputs)) + + self.flexible_shape_computation_lines.extend(template_return_lines) + + return ( + array.array("i", b"".join(model)), + self.used_weights, + inp_dim_orders, + out_dim_orders, + self.flexible_shape_computation_lines, + retval_count, + ) + + def serialize_values(self): + serialized_values = [] + serialized_value_data = [] + assert len(self.values) == len(self.value_data) + for (op_index, source_type), data in zip(self.values, self.value_data): + source_length = len(data) + + # Pad with 0 bytes out to a multiple of 4 for alignment. + physical_length = ((source_length - 1) | 0x3) + 1 + padded_data = data + (b"\0" * (physical_length - source_length)) + + serialized_values.append( + struct.pack("iii", op_index, source_type, source_length) + ) + serialized_value_data.append(padded_data) + + return serialized_values, serialized_value_data + + @staticmethod + def serialize_ints(ints): + return array.array("i", ints).tobytes() + + ADDER_MAP = { + "prim::GetAttr": lambda self, node: self.add_getattr(node), + "prim::Constant": lambda self, node: self.add_constant_node(node), + "prim::ListConstruct": lambda self, node: self.add_list_construct(node), + "prim::TupleConstruct": lambda self, node: self.add_tuple_construct(node), + "aten::unsqueeze": lambda self, node: self.add_unsqueeze(node), + "aten::to": lambda self, node: self.add_to(node), + "aten::detach": lambda self, node: self._identity(node), + "aten::reshape": lambda self, node: self.add_reshape(node), + "aten::flatten": lambda self, node: self.add_flatten(node), + "aten::slice": lambda self, node: self.add_slice(node), + "aten::size": lambda self, node: self.add_size(node), + "aten::cat": lambda self, node: self.add_cat(node), + "aten::mean": lambda self, node: self.add_mean(node), + "aten::quantize_per_tensor": lambda self, node: self.add_quantize(node), + "aten::dequantize": lambda self, node: self.add_dequantize(node), + "aten::add": lambda self, node: self.add_add_sub_op( + node, NNAPI_OperationCode.ADD, NNAPI_FuseCode.FUSED_NONE + ), + "aten::sub": lambda self, node: self.add_add_sub_op( + node, NNAPI_OperationCode.SUB, NNAPI_FuseCode.FUSED_NONE + ), + "aten::mul": lambda self, node: self.add_pointwise_simple_binary_broadcast_op( + node, NNAPI_OperationCode.MUL, NNAPI_FuseCode.FUSED_NONE + ), + "aten::div": lambda self, node: self.add_pointwise_simple_binary_broadcast_op( + node, NNAPI_OperationCode.DIV, NNAPI_FuseCode.FUSED_NONE + ), + "aten::relu": lambda self, node: self.add_pointwise_simple_unary_op( + node, NNAPI_OperationCode.RELU + ), + "aten::sigmoid": lambda self, node: self.add_pointwise_simple_unary_op( + node, NNAPI_OperationCode.LOGISTIC + ), + "aten::softmax": lambda self, node: self.add_softmax(node), + "aten::hardtanh": lambda self, node: self.add_hardtanh(node), + "aten::avg_pool2d": lambda self, node: self.add_avg_pool2d(node), + "aten::max_pool2d": lambda self, node: self.add_pool2d_node( + node, NNAPI_OperationCode.MAX_POOL_2D + ), + "aten::adaptive_avg_pool2d": lambda self, node: self.add_adaptive_avg_pool2d( + node + ), + "aten::upsample_nearest2d": lambda self, node: self.add_upsample_nearest2d( + node + ), + "aten::prelu": lambda self, node: self.add_prelu_op(node), + "aten::addmm": lambda self, node: self.add_addmm(node), + "aten::linear": lambda self, node: self.add_linear(node), + "aten::_convolution": lambda self, node: self.add_conv_underscore(node), + "aten::conv2d": lambda self, node: self.add_conv2d(node), + "aten::log_softmax": lambda self, node: self.add_log_softmax(node), + "quantized::linear": lambda self, node: self.add_qlinear(node), + "quantized::conv2d": lambda self, node: self.add_qconv2d( + node, NNAPI_FuseCode.FUSED_NONE + ), + "quantized::conv2d_relu": lambda self, node: self.add_qconv2d( + node, NNAPI_FuseCode.FUSED_RELU + ), + "quantized::conv_transpose2d": lambda self, node: self.add_qconv2d( + node, NNAPI_FuseCode.FUSED_NONE, transpose=True + ), + "quantized::add": lambda self, node: self.add_qadd( + node, NNAPI_OperationCode.ADD, NNAPI_FuseCode.FUSED_NONE + ), + "quantized::add_relu": lambda self, node: self.add_qadd( + node, NNAPI_OperationCode.ADD, NNAPI_FuseCode.FUSED_RELU + ), + "quantized::mul": lambda self, node: self.add_qadd( + node, NNAPI_OperationCode.MUL, NNAPI_FuseCode.FUSED_NONE + ), + } + + def add_node(self, node): + adder = self.ADDER_MAP.get(node.kind()) + if not adder: + raise Exception(f"Unsupported node kind ({node.kind()!r}) in node {node!r}") + adder(self, node) + + def _identity(self, node): + in_id, in_oper = self.get_tensor_operand_by_jitval(node.inputsAt(0)) + jitval = node.outputsAt(0) + self.jitval_operand_map[jitval] = in_id + + def add_getattr(self, node): + assert node.inputsSize() == 1 + assert node.outputsSize() == 1 + obj_ctype, obj = self.get_constant_value(node.inputsAt(0)) + assert str(obj_ctype).startswith("__torch__.") + name = node.s("name") + value = getattr(obj, name) + output = node.outputsAt(0) + ctype = output.type() + self.add_constant_value(output, ctype, value) + + def add_constant_node(self, node): + assert node.inputsSize() == 0 + assert node.outputsSize() == 1 + output = node.outputsAt(0) + ctype = output.type() + value = output.toIValue() + self.add_constant_value(output, ctype, value) + + def add_list_construct(self, node): + assert node.outputsSize() == 1 + output = node.outputsAt(0) + ctype = output.type() + const_vals: Optional[List] = [] + tensors: Optional[List] = [] + for inp in node.inputs(): + if const_vals is not None and inp in self.constants: + _, val = self.get_constant_value(inp) + const_vals.append(val) + else: + const_vals = None + if tensors is not None and inp.type().kind() == "TensorType": + tensors.append(inp) + else: + tensors = None + + if const_vals is not None: + # NOTE: Now that TorchScript supports list constants, + # this code path might not be used anymore. + self.add_constant_value(output, ctype, const_vals) + if tensors is not None: + self.add_tensor_sequence(output, tensors) + if const_vals is None and tensors is None: + raise Exception( + f"Unable to handle ListConstruct node. Neither all constants nor all tensors. {node!r}" + ) + + def add_tuple_construct(self, node): + assert node.outputsSize() == 1 + output = node.outputsAt(0) + values = list(node.inputs()) + self.add_tensor_sequence(output, values) + + def add_unsqueeze(self, node): + assert node.inputsSize() == 2 + assert node.outputsSize() == 1 + + in_id, in_oper = self.get_tensor_operand_by_jitval_fixed_size(node.inputsAt(0)) + + _, dim = self.get_constant_value(node.inputsAt(1), "IntType") + assert in_oper.dim_order == DimOrder.PRESUMED_CONTIGUOUS + + real_dim = dim if dim >= 0 else dim + len(in_oper.shape) + 1 + out_shape_list = list(in_oper.shape) + out_shape_list.insert(real_dim, 1) + out_shape = tuple(out_shape_list) + out_oper = in_oper._replace(shape=out_shape) + + inputs = [None] * 2 + inputs[0] = in_id + inputs[1] = self.add_immediate_int_scalar(dim) + + outputs = [None] * 1 + outputs[0] = self.add_tensor_operand(node.outputsAt(0), out_oper) + + self.add_operation(NNAPI_OperationCode.EXPAND_DIMS, inputs, outputs) + + def add_to(self, node): + # Handle to("cpu") / to("gpu") case + self._identity(node) + + def add_reshape(self, node): + assert node.inputsSize() == 2 + assert node.outputsSize() == 1 + + in_id, in_oper = self.get_tensor_operand_by_jitval_fixed_size(node.inputsAt(0)) + + shape_ctype, shape = self.get_constant_value(node.inputsAt(1)) + assert shape_ctype.kind() == "ListType" + assert shape_ctype.getElementType().kind() == "IntType" + is_trivial_reshape = len(shape) == 2 and shape[1] == -1 + + if in_oper.dim_order != DimOrder.PRESUMED_CONTIGUOUS and not is_trivial_reshape: + raise Exception( + "Currently, reshape is only supported on NHWC tensors if the target size is [X, -1]." + ) + + # Bit of a hack here. Use a real tensor to infer the output shape. + out_shape = torch.zeros(1).expand(in_oper.shape).reshape(shape).shape + out_oper = in_oper._replace( + shape=out_shape, dim_order=DimOrder.PRESUMED_CONTIGUOUS + ) + + inputs = [None] * 2 + inputs[0] = in_id + inputs[1] = self.add_immediate_int_vector(shape) + + outputs = [None] * 1 + outputs[0] = self.add_tensor_operand(node.outputsAt(0), out_oper) + + self.add_operation(NNAPI_OperationCode.RESHAPE, inputs, outputs) + + def add_flatten(self, node): + assert node.inputsSize() == 3 + assert node.outputsSize() == 1 + + in_id, in_oper = self.get_tensor_operand_by_jitval(node.inputsAt(0)) + + start_ctype, start_dim = self.get_constant_value(node.inputsAt(1), "IntType") + end_ctype, end_dim = self.get_constant_value(node.inputsAt(2), "IntType") + + # channels last with channels == 1 or (height & width both 1) + is_trivial_flatten = len(in_oper.shape) == 4 and ( + in_oper.shape[1] == 1 or (in_oper.shape[2] == 1 and in_oper.shape[3] == 1) + ) + if in_oper.dim_order != DimOrder.PRESUMED_CONTIGUOUS and not is_trivial_flatten: + raise Exception( + "Currently, flatten is not supported on NHWC tensors unless C=1 or H=W=1" + ) + + if start_dim < 0: + start_dim += len(in_oper.shape) + if end_dim < 0: + end_dim += len(in_oper.shape) + + out_shape = ( + in_oper.shape[:start_dim] + + (functools.reduce(operator.mul, in_oper.shape[start_dim : end_dim + 1]),) + + in_oper.shape[end_dim + 1 :] + ) + + if any(dim == 0 for dim in in_oper.shape[start_dim : end_dim + 1]): + raise Exception("Flattening flexible dims is not supported yet") + non_flattened_dims = in_oper.shape[:start_dim] + in_oper.shape[end_dim + 1 :] + if non_flattened_dims.count(0) > 1: + raise Exception("Only 1 dim can be flexible") + + out_oper = in_oper._replace( + shape=out_shape, dim_order=DimOrder.PRESUMED_CONTIGUOUS + ) + out_id = self.add_tensor_operand(node.outputsAt(0), out_oper) + + for idx, dim in enumerate(out_shape): + if dim == 0: + self.forward_operand_shape(out_id, idx, in_id, in_oper.shape.index(0)) + + inputs_1 = tuple(dim if dim != 0 else -1 for dim in out_shape) + inputs = [None] * 2 + inputs[0] = in_id + inputs[1] = self.add_immediate_int_vector(inputs_1) + + outputs = [None] * 1 + outputs[0] = out_id + + self.add_operation(NNAPI_OperationCode.RESHAPE, inputs, outputs) + + def add_slice(self, node): + assert node.inputsSize() == 5 + assert node.outputsSize() == 1 + + in_id, in_oper = self.get_tensor_operand_by_jitval(node.inputsAt(0)) + _, dim_value = self.get_constant_value(node.inputsAt(1)) + _, start_value = self.get_constant_value(node.inputsAt(2)) + _, stop_value = self.get_constant_value(node.inputsAt(3)) + _, step_value = self.get_constant_value(node.inputsAt(4)) + + if start_value is None: + start_value = 0 + if stop_value is None: + stop_value = sys.maxsize + + if start_value < 0: + start_value += in_oper.shape[dim_value] + elif start_value == sys.maxsize: + start_value = 0 + + if start_value == 0 and stop_value == sys.maxsize: + self._identity(node) + return + + if in_oper.shape[dim_value] == 0: + raise Exception("Unable to slice with flexible shape") + + if stop_value < 0: + stop_value += in_oper.shape[dim_value] + elif stop_value == sys.maxsize: + stop_value = in_oper.shape[dim_value] + + if start_value >= stop_value: + raise Exception("Slice start value should be less than stop value") + + out_len = (stop_value - start_value) // step_value + out_shape = tuple( + out_len if i == dim_value else dim for i, dim in enumerate(in_oper.shape) + ) + out_id = self.add_tensor_operand( + node.outputsAt(0), in_oper._replace(shape=out_shape) + ) + + # flex inputs + end_mask = 0 + for idx, dim in enumerate(out_shape): + if dim == 0: + self.forward_operand_shape(out_id, idx, in_id, idx) + end_mask |= 1 << idx + + inputs = [None] * 7 + inputs[0] = in_id + inputs[1] = self.add_immediate_int_vector( + [start_value if i == dim_value else 0 for i in range(len(in_oper.shape))] + ) + inputs[2] = self.add_immediate_int_vector( + [ + stop_value if i == dim_value else dim + for i, dim in enumerate(in_oper.shape) + ] + ) + inputs[3] = self.add_immediate_int_vector( + [step_value if i == dim_value else 1 for i in range(len(in_oper.shape))] + ) + inputs[4] = self.add_immediate_int_scalar(0) # begin mask + inputs[5] = self.add_immediate_int_scalar(end_mask) + inputs[6] = self.add_immediate_int_scalar(0) # shrink axis mas + + outputs = [None] * 1 + outputs[0] = out_id + + self.add_operation(NNAPI_OperationCode.STRIDED_SLICE, inputs, outputs) + + def add_size(self, node): + assert node.inputsSize() == 2 + assert node.outputsSize() == 1 + + _, in_oper = self.get_tensor_operand_by_jitval_fixed_size(node.inputsAt(0)) + _, value = self.constants[node.inputsAt(1)] + res = in_oper.shape[value] + output = node.outputsAt(0) + self.add_constant_value(output, output.type(), res) + + def add_cat(self, node): + assert node.inputsSize() == 2 + assert node.outputsSize() == 1 + + tensors = self.tensor_sequences[node.inputsAt(0)] + _, dim = self.get_constant_value(node.inputsAt(1), "IntType") + + assert len(tensors) > 0 + in_ids = [] + out_oper = None + out_dim_size = 0 + for inp in tensors: + in_id, in_oper = self.get_tensor_operand_by_jitval(inp) + if out_oper is None: + out_shape = change_element(in_oper.shape, dim, -1) + out_oper = in_oper._replace(shape=out_shape) + assert in_oper.op_type == out_oper.op_type + assert in_oper.dim_order == out_oper.dim_order + assert change_element(in_oper.shape, dim, -1) == change_element( + out_oper.shape, dim, -1 + ) + # TODO: Possibly check scale and zero point. + in_ids.append(in_id) + # TODO: Possibly support variable-sized inputs. + out_dim_size += in_oper.shape[dim] + + assert out_oper is not None + out_oper = out_oper._replace( + shape=change_element(out_oper.shape, dim, out_dim_size) + ) + + if in_oper.dim_order == DimOrder.CHANNELS_LAST: # type: ignore[possibly-undefined] + assert len(out_oper.shape) == 4 + nnapi_dim = [0, 3, 1, 2][dim] + else: + nnapi_dim = dim + + out_id = self.add_tensor_operand(node.outputsAt(0), out_oper) + for idx, d in enumerate(out_oper.shape): + if d == 0: + if idx == dim: + shape = " + ".join(flex_name(ip_id, dim) for ip_id in in_ids) + self.compute_operand_shape(out_id, idx, shape) + else: + self.forward_operand_shape(out_id, idx, in_ids[0], idx) + + inputs = in_ids + [self.add_immediate_int_scalar(nnapi_dim)] + + outputs = [None] * 1 + outputs[0] = out_id + + self.add_operation(NNAPI_OperationCode.CONCATENATION, inputs, outputs) + + def add_mean(self, node): + assert node.inputsSize() == 4 + assert node.outputsSize() == 1 + + in_id, in_oper = self.get_tensor_operand_by_jitval_fixed_size(node.inputsAt(0)) + dim_ctype, dim = self.get_constant_value(node.inputsAt(1)) + assert dim_ctype.kind() == "ListType" + assert dim_ctype.getElementType().kind() == "IntType" + _, keep_dim = self.get_constant_value(node.inputsAt(2), "BoolType") + # Expect None for dtype + self.get_constant_value(node.inputsAt(3), "NoneType") + + if in_oper.dim_order == DimOrder.CHANNELS_LAST: + assert len(in_oper.shape) == 4 + nnapi_dim = [[0, 3, 1, 2][d] for d in dim] + else: + nnapi_dim = dim + + collapsed_dims = set() + for d in dim: + if d < 0: + d += len(in_oper.shape) + collapsed_dims.add(d) + + if in_oper.dim_order == DimOrder.CHANNELS_LAST and not keep_dim: + assert collapsed_dims.issuperset({2, 3}) + out_dim_order = DimOrder.PRESUMED_CONTIGUOUS + else: + out_dim_order = in_oper.dim_order + + out_shape = [] + for i, s in enumerate(in_oper.shape): + if i not in collapsed_dims: + out_shape.append(s) + elif keep_dim: + out_shape.append(1) + + out_oper = in_oper._replace(shape=out_shape, dim_order=out_dim_order) + + inputs = [None] * 3 + inputs[0] = in_id + inputs[1] = self.add_immediate_int_vector(nnapi_dim) + inputs[2] = self.add_immediate_int_scalar(keep_dim) + + outputs = [None] * 1 + outputs[0] = self.add_tensor_operand(node.outputsAt(0), out_oper) + + self.add_operation(NNAPI_OperationCode.MEAN, inputs, outputs) + + def add_quantize(self, node): + assert node.inputsSize() == 4 + assert node.outputsSize() == 1 + + in_id, in_oper = self.get_tensor_operand_by_jitval_fixed_size(node.inputsAt(0)) + if in_oper.dim_order != DimOrder.CHANNELS_LAST: + raise Exception( + "Most hardware backends prefer NHWC quantized tensors. " + "Try setting `t.nnapi_nhwc = True` on your tensor inputs. " + ) + _, scale = self.get_constant_value(node.inputsAt(1), "FloatType") + _, zero_point = self.get_constant_value(node.inputsAt(2), "IntType") + _, scalar_type = self.get_constant_value(node.inputsAt(3), "IntType") + if scalar_type != TorchScalarTypes.QUINT8.value: + raise Exception( + "PyTorch NNAPI export only supports quantized tensors " + "with the quint8 dtype." + ) + op_type = NNAPI_OperandCode.TENSOR_QUANT8_ASYMM + + out_oper = in_oper._replace( + op_type=op_type, + scale=scale, + zero_point=zero_point, + ) + + inputs = [None] * 1 + inputs[0] = in_id + + outputs = [None] * 1 + outputs[0] = self.add_tensor_operand(node.outputsAt(0), out_oper) + + self.add_operation(NNAPI_OperationCode.QUANTIZE, inputs, outputs) + + def add_dequantize(self, node): + assert node.inputsSize() == 1 + assert node.outputsSize() == 1 + + in_id, in_oper = self.get_tensor_operand_by_jitval_fixed_size(node.inputsAt(0)) + out_oper = in_oper._replace( + op_type=NNAPI_OperandCode.TENSOR_FLOAT32, + scale=0.0, + zero_point=0, + ) + + inputs = [None] * 1 + inputs[0] = in_id + + outputs = [None] * 1 + outputs[0] = self.add_tensor_operand(node.outputsAt(0), out_oper) + + self.add_operation(NNAPI_OperationCode.DEQUANTIZE, inputs, outputs) + + def add_pointwise_simple_unary_op(self, node, opcode): + assert node.inputsSize() == 1 + assert node.outputsSize() == 1 + + in_id, in_oper = self.get_tensor_operand_by_jitval(node.inputsAt(0)) + + out_oper = in_oper + if opcode == NNAPI_OperationCode.LOGISTIC: + # NNAPI docs: For ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, the scale + # must be 1.f / 256 and the zeroPoint must be 0. + # https://fburl.com/h52stoog + if in_oper.op_type == NNAPI_OperandCode.TENSOR_QUANT8_ASYMM: + out_oper = in_oper._replace(zero_point=0, scale=1.0 / 256) + + out_id = self.add_tensor_operand(node.outputsAt(0), out_oper) + + for idx, dim in enumerate(in_oper.shape): + if dim == 0: + self.forward_operand_shape(out_id, idx, in_id, idx) + + inputs = [None] * 1 + inputs[0] = in_id + + outputs = [None] * 1 + outputs[0] = out_id + + self.add_operation(opcode, inputs, outputs) + + def _do_add_binary(self, node, opcode, fuse_code, *, qparams=None): # noqa: D401 + """Helper for pointwise binary broadcast ops with superfluous extra args.""" + assert node.outputsSize() == 1 + + assert node.inputsAt(0).type().kind() == "TensorType" + assert node.inputsAt(1).type().kind() == "TensorType" + + if self.has_operand_for_jitval(node.inputsAt(0)): + in0_id, in0_oper = self.get_tensor_operand_by_jitval(node.inputsAt(0)) + in1_id, in1_oper = self.get_tensor_operand_or_constant( + node.inputsAt(1), in0_oper.dim_order + ) + elif self.has_operand_for_jitval(node.inputsAt(1)): + in1_id, in1_oper = self.get_tensor_operand_by_jitval(node.inputsAt(1)) + in0_id, in0_oper = self.get_tensor_operand_or_constant( + node.inputsAt(0), in1_oper.dim_order + ) + else: + raise Exception(f"Can't do a NNAPI binary op: {opcode} on two constants") + + assert in0_oper.op_type == in1_oper.op_type + in0_id, in0_oper, in1_id, in1_oper = self.transpose_for_broadcast( + in0_id, in0_oper, in1_id, in1_oper + ) + # NOTE: PyTorch and NNAPI have the same broadcast semantics. + out_shape = broadcast_shapes(in0_oper.shape, in1_oper.shape) + out_oper = in0_oper._replace(shape=out_shape) + if qparams is not None: + scale, zp = qparams + out_oper = out_oper._replace(scale=scale, zero_point=zp) + + out_id = self.add_tensor_operand(node.outputsAt(0), out_oper) + for idx, (d0, d1) in enumerate(zip(in0_oper.shape, in1_oper.shape)): + if d0 == 1 and d1 == 0: + self.forward_operand_shape(out_id, idx, in1_id, idx) + elif d0 == 0 and d1 == 1: + self.forward_operand_shape(out_id, idx, in0_id, idx) + elif d0 == 0 and d1 == 0: + self.flexible_shape_computation_lines.append( + f"assert {flex_name(in0_id, idx)} == {flex_name(in1_id, idx)}" + ) + self.forward_operand_shape(out_id, idx, in0_id, idx) + + inputs = [None] * 3 + inputs[0] = in0_id + inputs[1] = in1_id + inputs[2] = self.add_immediate_int_scalar(fuse_code) + + outputs = [None] * 1 + outputs[0] = out_id + + self.add_operation(opcode, inputs, outputs) + + def add_pointwise_simple_binary_broadcast_op(self, node, opcode, fuse_code): + assert node.inputsSize() == 2 + self._do_add_binary(node, opcode, fuse_code) + + def add_add_sub_op(self, node, opcode, fuse_code): + assert node.inputsSize() == 3 + + _, alpha = self.get_constant_value(node.inputsAt(2), "IntType") + if alpha != 1: + raise Exception("NNAPI does not support add/sub with alpha.") + + self._do_add_binary(node, opcode, fuse_code) + + def add_qadd(self, node, opcode, fuse_code): + assert node.inputsSize() == 4 + + _, scale = self.get_constant_value(node.inputsAt(2), "FloatType") + _, zero_point = self.get_constant_value(node.inputsAt(3), "IntType") + + self._do_add_binary(node, opcode, fuse_code, qparams=(scale, zero_point)) + + def add_softmax(self, node): + assert node.inputsSize() == 3 + in_id, in_oper = self.get_tensor_operand_by_jitval(node.inputsAt(0)) + + _, softmax_dim = self.get_constant_value(node.inputsAt(1), "IntType") + + out_id = self.add_tensor_operand(node.outputsAt(0), in_oper) + for dim, size in enumerate(in_oper.shape): + if size == 0: + self.forward_operand_shape(out_id, dim, in_id, dim) + + inputs = [None] * 3 + inputs[0] = in_id + inputs[1] = self.add_immediate_float_scalar( + 1.0 + ) # positive scaling factor of exponent, beta + inputs[2] = self.add_immediate_int_scalar(softmax_dim) + + outputs = [None] * 1 + outputs[0] = out_id + + self.add_operation(NNAPI_OperationCode.SOFTMAX, inputs, outputs) + + def add_hardtanh(self, node): + assert node.inputsSize() == 3 + assert node.outputsSize() == 1 + + in_id, in_oper = self.get_tensor_operand_by_jitval_fixed_size(node.inputsAt(0)) + _, min_val = self.get_constant_value(node.inputsAt(1), "FloatType") + _, max_val = self.get_constant_value(node.inputsAt(2), "FloatType") + + op_map = { + (-1, 1): NNAPI_OperationCode.RELU1, + (0, 6): NNAPI_OperationCode.RELU6, # noqa: E201 + } + + opcode = op_map.get((min_val, max_val)) + if opcode is None: + raise Exception("NNAPI only supports hardtanh with args (-1, 1) or (0, 6).") + + inputs = [None] * 1 + inputs[0] = in_id + + outputs = [None] * 1 + outputs[0] = self.add_tensor_operand(node.outputsAt(0), in_oper) + + self.add_operation(opcode, inputs, outputs) + + def add_prelu_op(self, node): + assert node.inputsSize() == 2 + assert node.outputsSize() == 1 + + assert node.inputsAt(0).type().kind() == "TensorType" + assert node.inputsAt(1).type().kind() == "TensorType" + + in_id, in_oper = self.get_tensor_operand_by_jitval(node.inputsAt(0)) + w_id, w_oper = self.get_tensor_operand_for_weight(node.inputsAt(1)) + assert len(w_oper.shape) == 1 + assert w_oper.shape[0] > 0 + if w_oper.shape[0] > 1: + if in_oper.use_nchw(): + # TODO: Support this by adding trailing 1 dims. + raise Exception( + "Per-channel PReLU only supports channels_last right now." + ) + + out_id = self.add_tensor_operand(node.outputsAt(0), in_oper) + for dim, size in enumerate(in_oper.shape): + if size > 0: + pass + elif dim <= 1: + raise Exception("PReLU requires fixed size for dim 0 and dim 1.") + else: + self.forward_operand_shape(out_id, dim, in_id, dim) + + inputs = [None] * 2 + inputs[0] = in_id + inputs[1] = w_id + + outputs = [None] * 1 + outputs[0] = out_id + + self.add_operation(NNAPI_OperationCode.PRELU, inputs, outputs) + + def add_pool2d_node(self, node, opcode): + assert node.inputsSize() == 6 + assert node.outputsSize() == 1 + image, kernel, stride, padding, dilation, ceil_mode = node.inputs() + + stride = stride or kernel + + # TODO: Validate ceil_mode semantics. + + args = self.get_conv_pool_args_2d_from_jit( + self.get_size_arg(kernel), stride, padding, dilation + ) + if args.dilation_h != 1 or args.dilation_w != 1: + raise Exception("NNAPI does not support dilated pooling.") + + image_id, image_oper = self.get_tensor_operand_by_jitval_fixed_size(image) + assert len(image_oper.shape) == 4 + + out_shape = get_conv_pool_shape( + image_oper.shape, args, image_oper.shape[1], False + ) + use_nchw = image_oper.use_nchw() + + inputs = [None] * 11 + inputs[0] = image_id + inputs[1] = self.add_immediate_int_scalar(args.pad_l) + inputs[2] = self.add_immediate_int_scalar(args.pad_r) + inputs[3] = self.add_immediate_int_scalar(args.pad_t) + inputs[4] = self.add_immediate_int_scalar(args.pad_b) + inputs[5] = self.add_immediate_int_scalar(args.stride_w) + inputs[6] = self.add_immediate_int_scalar(args.stride_h) + inputs[7] = self.add_immediate_int_scalar(args.kernel_w) + inputs[8] = self.add_immediate_int_scalar(args.kernel_h) + inputs[9] = self.add_immediate_int_scalar(NNAPI_FuseCode.FUSED_NONE) + inputs[10] = self.add_immediate_bool_scalar(use_nchw) + + outputs = [None] * 1 + outputs[0] = self.add_tensor_operand( + node.outputsAt(0), image_oper._replace(shape=out_shape) + ) + + self.add_operation(opcode, inputs, outputs) + + def add_avg_pool2d(self, node): + assert node.inputsSize() == 7 + assert node.outputsSize() == 1 + ( + image, + kernel, + stride, + padding, + ceil_mode, + count_include_pad, + divisor_override, + ) = node.inputs() + + _, count_include_pad_value = self.get_constant_value(count_include_pad) + _, divisor_override_value = self.get_constant_value(divisor_override) + if not count_include_pad_value or divisor_override_value: + raise Exception( + "NNAPI doesn't support count_include_pad=False or divisor_override" + ) + + args = self.get_conv_pool_args_2d_from_jit( + self.get_size_arg(kernel), stride, padding + ) + + image_id, image_oper = self.get_tensor_operand_by_jitval(image) + assert len(image_oper.shape) == 4 + + out_shape = get_conv_pool_shape( + image_oper.shape, args, image_oper.shape[1], False + ) + use_nchw = image_oper.use_nchw() + + inputs = [None] * 11 + inputs[0] = image_id + inputs[1] = self.add_immediate_int_scalar(args.pad_l) + inputs[2] = self.add_immediate_int_scalar(args.pad_r) + inputs[3] = self.add_immediate_int_scalar(args.pad_t) + inputs[4] = self.add_immediate_int_scalar(args.pad_b) + inputs[5] = self.add_immediate_int_scalar(args.stride_w) + inputs[6] = self.add_immediate_int_scalar(args.stride_h) + inputs[7] = self.add_immediate_int_scalar(args.kernel_w) + inputs[8] = self.add_immediate_int_scalar(args.kernel_h) + inputs[9] = self.add_immediate_int_scalar(NNAPI_FuseCode.FUSED_NONE) + inputs[10] = self.add_immediate_bool_scalar(use_nchw) + + outputs = [None] * 1 + out_id = self.add_tensor_operand( + node.outputsAt(0), image_oper._replace(shape=out_shape) + ) + self._handle_conv_pool_flexible_input(out_id, image, args, False) + outputs[0] = out_id + + self.add_operation(NNAPI_OperationCode.AVERAGE_POOL_2D, inputs, outputs) + + def add_adaptive_avg_pool2d(self, node): + assert node.inputsSize() == 2 + assert node.outputsSize() == 1 + + image_id, image_oper = self.get_tensor_operand_by_jitval_fixed_size( + node.inputsAt(0) + ) + assert len(image_oper.shape) == 4 + + size_ctype, size_arg = self.get_constant_value(node.inputsAt(1)) + assert size_ctype.kind() == "ListType" + assert size_ctype.getElementType().kind() == "IntType" + if size_arg != [1, 1]: + raise Exception( + "NNAPI only supports adaptive_avg_pool2d with output size (1, 1)." + ) + + out_shape = image_oper.shape[0:2] + tuple(size_arg) + use_nchw = image_oper.use_nchw() + + inputs = [None] * 11 + inputs[0] = image_id + inputs[1] = self.add_immediate_int_scalar(0) + inputs[2] = self.add_immediate_int_scalar(0) + inputs[3] = self.add_immediate_int_scalar(0) + inputs[4] = self.add_immediate_int_scalar(0) + inputs[5] = self.add_immediate_int_scalar(1) + inputs[6] = self.add_immediate_int_scalar(1) + inputs[7] = self.add_immediate_int_scalar(image_oper.shape[3]) + inputs[8] = self.add_immediate_int_scalar(image_oper.shape[2]) + inputs[9] = self.add_immediate_int_scalar(NNAPI_FuseCode.FUSED_NONE) + inputs[10] = self.add_immediate_bool_scalar(use_nchw) + + outputs = [None] * 1 + outputs[0] = self.add_tensor_operand( + node.outputsAt(0), image_oper._replace(shape=out_shape) + ) + + self.add_operation(NNAPI_OperationCode.AVERAGE_POOL_2D, inputs, outputs) + + def add_upsample_nearest2d(self, node): + assert node.inputsSize() == 3 or node.inputsSize() == 4 + assert node.outputsSize() == 1 + if node.inputsSize() == 3: + image, size_jit, scale_jit = node.inputs() + else: + image, size_jit, scale_h_jit, scale_w_jit = node.inputs() + size_ctype, size_arg = self.get_constant_value(size_jit) + + if node.inputsSize() == 3: + scale_ctype, scale_arg = self.get_constant_value(scale_jit) # type: ignore[possibly-undefined] + else: + scale_h_ctype, scale_h_arg = self.get_constant_value(scale_h_jit) # type: ignore[possibly-undefined] + scale_w_ctype, scale_w_arg = self.get_constant_value(scale_w_jit) # type: ignore[possibly-undefined] + + # The only way for the 4-argument overload of upsample_nearest2d to + # have been added to the graph without error is if the scale_h and + # scale_w arguments are None + assert scale_h_ctype.kind() == "NoneType" + assert scale_w_ctype.kind() == "NoneType" + + scale_ctype = scale_h_ctype + scale_arg = scale_h_arg + + image_id, image_oper = self.get_tensor_operand_by_jitval(image) + assert len(image_oper.shape) == 4 + + if size_ctype.kind() != "NoneType" and scale_ctype.kind() != "NoneType": + raise Exception("Size and scale cannot both be non-None.") + elif size_ctype.kind() != "NoneType": + assert size_ctype.kind() == "ListType" + assert size_ctype.getElementType().kind() == "IntType" + assert scale_ctype.kind() == "NoneType" + assert scale_arg is None + assert isinstance(size_arg, list) + assert size_arg + assert all(isinstance(val, int) for val in size_arg) + if len(size_arg) == 1: + size_arg = size_arg * 2 + assert len(size_arg) == 2 + out_h = size_arg[0] + out_w = size_arg[1] + arg_h = self.add_immediate_int_scalar(out_h) + arg_w = self.add_immediate_int_scalar(out_w) + elif scale_ctype.kind() != "NoneType": + assert scale_ctype.kind() == "ListType" + assert scale_ctype.getElementType().kind() == "FloatType" + assert size_ctype.kind() == "NoneType" + assert size_arg is None + assert isinstance(scale_arg, list) + assert scale_arg + assert all(isinstance(val, float) for val in scale_arg) + if len(scale_arg) == 1: + scale_arg = scale_arg * 2 + assert len(scale_arg) == 2 + out_h = int(scale_arg[0] * image_oper.shape[2]) + out_w = int(scale_arg[1] * image_oper.shape[3]) + arg_h = self.add_immediate_float_scalar(scale_arg[0]) + arg_w = self.add_immediate_float_scalar(scale_arg[1]) + else: + raise Exception("Size and scale cannot both be None.") + + out_shape = (image_oper.shape[0], image_oper.shape[1], out_h, out_w) + use_nchw = image_oper.use_nchw() + out_id = self.add_tensor_operand( + node.outputsAt(0), image_oper._replace(shape=out_shape) + ) + + if image_oper.shape[0] == 0 or image_oper.shape[1] == 0: + raise Exception("Flexible batch or channels not supported") + + # Handle variable input size + for dim in (2, 3): # h, w indices + if image_oper.shape[dim] == 0: + if size_ctype.kind() != "NoneType": + self.compute_operand_shape(out_id, dim, size_arg[dim - 2]) + elif scale_ctype.kind() != "NoneType": + self.compute_operand_shape( + out_id, + dim, + f"int({scale_arg[dim - 2]} * {flex_name(image_id, dim)})", + ) + else: + raise Exception("Size and scale cannot both be None.") + + inputs = [None] * 4 + inputs[0] = image_id + inputs[1] = arg_w + inputs[2] = arg_h + inputs[3] = self.add_immediate_bool_scalar(use_nchw) + + outputs = [None] * 1 + outputs[0] = out_id + + self.add_operation(NNAPI_OperationCode.RESIZE_NEAREST_NEIGHBOR, inputs, outputs) + + def add_addmm(self, node): + assert node.inputsSize() == 5 + assert node.outputsSize() == 1 + jit_bias, jit_input, jit_weight, jit_beta, jit_alpha = node.inputs() + + for jitval in (jit_beta, jit_alpha): + scale_ctype, scale_value = self.get_constant_value(jitval) + assert scale_ctype.kind() in ("IntType", "FloatType") + if scale_value != 1: + raise Exception( + "NNAPI Fully-Connected does not support alpha and beta." + ) + + self.add_addmm_or_linear(node, True, jit_input, jit_weight, jit_bias) + + def add_linear(self, node): + assert node.inputsSize() == 3 + assert node.outputsSize() == 1 + jit_input, jit_weight, jit_bias = node.inputs() + + self.add_addmm_or_linear(node, False, jit_input, jit_weight, jit_bias) + + def add_addmm_or_linear( + self, node, transpose_weight, jit_input, jit_weight, jit_bias + ): + input_id, input_oper = self.get_tensor_operand_by_jitval(jit_input) + bias_id, bias_oper = self.get_tensor_operand_for_weight(jit_bias) + + assert len(input_oper.shape) == 2 + assert len(bias_oper.shape) == 1 + + # TODO: Transform at load time to share weights with CPU model. + _, weight_tensor = self.get_constant_value(jit_weight, "TensorType") + assert len(weight_tensor.shape) == 2 + if transpose_weight: + nnapi_weight_tensor = weight_tensor.t().contiguous() + else: + nnapi_weight_tensor = weight_tensor.contiguous() + weight_id = self.add_tensor_operand_for_weight(nnapi_weight_tensor) + weight_oper = self.operands[weight_id] + + out_shape = (input_oper.shape[0], weight_oper.shape[0]) + out_id = self.add_tensor_operand( + node.outputsAt(0), input_oper._replace(shape=out_shape) + ) + + if input_oper.shape[0] == 0: + self.forward_operand_shape(out_id, 0, input_id, 0) + + inputs = [None] * 4 + inputs[0] = input_id + inputs[1] = weight_id + inputs[2] = bias_id + inputs[3] = self.add_immediate_int_scalar(NNAPI_FuseCode.FUSED_NONE) + + outputs = [None] * 1 + outputs[0] = out_id + + self.add_operation(NNAPI_OperationCode.FULLY_CONNECTED, inputs, outputs) + + def add_qlinear(self, node): + assert node.inputsSize() == 4 + assert node.outputsSize() == 1 + ( + jit_input, + jit_packed_weight, + jit_scale, + jit_zero_point, + ) = node.inputs() + + input_id, input_oper = self.get_tensor_operand_by_jitval_fixed_size(jit_input) + # TODO: Support automatic reshape + assert len(input_oper.shape) == 2 + + _, out_scale = self.get_constant_value(jit_scale, "FloatType") + _, out_zero_point = self.get_constant_value(jit_zero_point, "IntType") + weight_ctype, packed_weight = self.get_constant_value(jit_packed_weight) + assert weight_ctype.name() == "LinearPackedParamsBase" + raw_weight, raw_bias = packed_weight.__getstate__()[0] + assert raw_bias is not None + + assert len(raw_weight.shape) == 2 + assert len(raw_bias.shape) == 1 + assert raw_bias.shape[0] == raw_weight.shape[0] + assert raw_weight.shape[1] == input_oper.shape[1] + + assert raw_weight.qscheme() == torch.per_tensor_affine + if raw_weight.dtype == torch.quint8: + unsigned_weight = raw_weight + else: + assert raw_weight.dtype == torch.qint8 + unsigned_weight = torch._make_per_tensor_quantized_tensor( + (raw_weight.int_repr().int() + 128).to(torch.uint8), + scale=raw_weight.q_scale(), + zero_point=raw_weight.q_zero_point() + 128, + ) + weight_scale = unsigned_weight.q_scale() + bias_scale = input_oper.scale * weight_scale + int_bias = torch.quantize_per_tensor(raw_bias, bias_scale, 0, torch.qint32) + bias_id = self.add_tensor_operand_for_weight(int_bias) + + multiplier = input_oper.scale * weight_scale / out_scale + assert multiplier > 0 + if multiplier >= 1: + raise Exception( + "Quantized convolution multiplier is greater than 1. " + "This is supported by NNAPI, but not by most hardware backends. " + "Try training a model without quantization-aware training. " + ) + + # TODO: Transform at load time to share weights with CPU model. + nnapi_weight_tensor = unsigned_weight.contiguous() + weight_id = self.add_tensor_operand_for_weight(nnapi_weight_tensor) + weight_oper = self.operands[weight_id] + + out_shape = (input_oper.shape[0], weight_oper.shape[0]) + out_oper = input_oper._replace( + shape=out_shape, + scale=out_scale, + zero_point=out_zero_point, + ) + + inputs = [None] * 4 + inputs[0] = input_id + inputs[1] = weight_id + inputs[2] = bias_id + inputs[3] = self.add_immediate_int_scalar(NNAPI_FuseCode.FUSED_NONE) + + outputs = [None] * 1 + outputs[0] = self.add_tensor_operand(node.outputsAt(0), out_oper) + + self.add_operation(NNAPI_OperationCode.FULLY_CONNECTED, inputs, outputs) + + def get_optional_bias(self, jit_bias, weight_tensor, transpose=False): + ctype, value = self.get_constant_value(jit_bias) + if ctype.kind() == "NoneType": + bias_idx = 1 if transpose else 0 + nnapi_bias_tensor = torch.zeros( + weight_tensor.size()[bias_idx], dtype=weight_tensor.dtype + ) + bias_id = self.add_tensor_operand_for_weight(nnapi_bias_tensor) + bias_oper = self.operands[bias_id] + return bias_id, bias_oper + else: + return self.get_tensor_operand_for_weight(jit_bias) + + def add_conv2d(self, node): + assert node.inputsSize() == 7 + assert node.outputsSize() == 1 + + ( + jit_image, + jit_weight, + jit_bias, + jit_stride, + jit_pad, + jit_dilation, + jit_groups, + ) = node.inputs() + + _, weight_tensor = self.get_constant_value(jit_weight, "TensorType") + bias_id, bias_oper = self.get_optional_bias(jit_bias, weight_tensor) + args = self.get_conv_pool_args_2d_from_jit( + weight_tensor.shape[2:4], jit_stride, jit_pad, jit_dilation, jit_groups + ) + + return self.add_conv2d_common( + node.outputsAt(0), + 0.0, + 0, + jit_image, + weight_tensor, + bias_id, + args, + False, # transpose + NNAPI_FuseCode.FUSED_NONE, + ) + + def add_conv_underscore(self, node): + assert node.inputsSize() == 13 + assert node.outputsSize() == 1 + + ( + jit_image, + jit_weight, + jit_bias, + jit_stride, + jit_pad, + jit_dilation, + jit_transpose, + _, + jit_groups, + _, + _, + _, + _, + ) = node.inputs() + + _, weight_tensor = self.get_constant_value(jit_weight, "TensorType") + _, transpose = self.get_constant_value(jit_transpose) + bias_id, bias_oper = self.get_optional_bias(jit_bias, weight_tensor, transpose) + args = self.get_conv_pool_args_2d_from_jit( + weight_tensor.shape[2:4], jit_stride, jit_pad, jit_dilation, jit_groups + ) + + return self.add_conv2d_common( + node.outputsAt(0), + 0.0, + 0, + jit_image, + weight_tensor, + bias_id, + args, + transpose, + NNAPI_FuseCode.FUSED_NONE, + ) + + def add_log_softmax(self, node): + assert node.inputsSize() == 3 + assert node.outputsSize() == 1 + + (jit_input, jit_dim, jit_half_to_float) = node.inputs() + input_id, input_oper = self.get_tensor_operand_by_jitval_fixed_size(jit_input) + _, dim = self.get_constant_value(jit_dim, "IntType") + + out_shape = input_oper.shape + + inputs = [None] * 3 + inputs[0] = input_id + # specifying 1 as the scaling factor for the exponent, beta + inputs[1] = self.add_immediate_float_scalar(1) + inputs[2] = self.add_immediate_int_scalar(dim) + + outputs = [None] * 1 + outputs[0] = self.add_tensor_operand( + node.outputsAt(0), input_oper._replace(shape=out_shape) + ) + self.add_operation(NNAPI_OperationCode.LOG_SOFTMAX, inputs, outputs) + + def add_qconv2d(self, node, fuse_code, transpose=False): + assert node.inputsSize() == 4 + assert node.outputsSize() == 1 + + ( + jit_image, + jit_packed_weight, + jit_scale, + jit_zero_point, + ) = node.inputs() + + _, out_scale = self.get_constant_value(jit_scale, "FloatType") + _, out_zero_point = self.get_constant_value(jit_zero_point, "IntType") + weight_ctype, packed_weight = self.get_constant_value(jit_packed_weight) + assert weight_ctype.name() == "Conv2dPackedParamsBase" + ( + pack_version, + tensors, + opt_tensors, + ) = packed_weight.__getstate__()[0] + assert pack_version == "2" + packed_config, raw_weight = tensors + (raw_bias,) = opt_tensors + assert raw_bias is not None + args = self.get_conv_pool_args_2d_from_pack( + raw_weight.shape[2:4], packed_config + ) + + assert raw_weight.qscheme() == torch.per_tensor_affine + if raw_weight.dtype == torch.quint8: + unsigned_weight = raw_weight + else: + assert raw_weight.dtype == torch.qint8 + unsigned_weight = torch._make_per_tensor_quantized_tensor( + (raw_weight.int_repr().int() + 128).to(torch.uint8), + scale=raw_weight.q_scale(), + zero_point=raw_weight.q_zero_point() + 128, + ) + weight_scale = unsigned_weight.q_scale() + _, image_oper = self.get_tensor_operand_by_jitval(jit_image) + bias_scale = image_oper.scale * weight_scale + int_bias = torch.quantize_per_tensor(raw_bias, bias_scale, 0, torch.qint32) + bias_id = self.add_tensor_operand_for_weight(int_bias) + + multiplier = image_oper.scale * weight_scale / out_scale + assert multiplier > 0 + if multiplier >= 1: + raise Exception( + "Quantized convolution multiplier is greater than 1. " + "This is supported by NNAPI, but not by most hardware backends. " + "Try training a model without quantization-aware training. " + ) + + return self.add_conv2d_common( + node.outputsAt(0), + out_scale, + out_zero_point, + jit_image, + unsigned_weight, + bias_id, + args, + transpose, + fuse_code, + ) + + def add_conv2d_common( + self, + jit_out, + out_scale, + out_zero_point, + jit_image, + weight_tensor, + bias_id, + args, + transpose, + fuse_code, + ): + image_id, image_oper = self.get_tensor_operand_by_jitval(jit_image) + in_c = image_oper.shape[1] + + if args.group == 1: + # Full convolution + depthwise = False + if transpose: + weight_permutation = (1, 2, 3, 0) + else: + weight_permutation = (0, 2, 3, 1) + elif args.group == in_c: + # Depthwise convolution + depthwise = True + weight_permutation = (1, 2, 3, 0) + else: + raise Exception("Group convolution not supported yet.") + + # TODO: Transform at load time to share weights with CPU model. + nnapi_weight_tensor = weight_tensor.permute(*weight_permutation).contiguous() + weight_id = self.add_tensor_operand_for_weight(nnapi_weight_tensor) + weight_oper = self.operands[weight_id] + + bias_oper = self.operands[bias_id] + + if image_oper.op_type == NNAPI_OperandCode.TENSOR_FLOAT32: + assert weight_oper.op_type == NNAPI_OperandCode.TENSOR_FLOAT32 + assert bias_oper.op_type == NNAPI_OperandCode.TENSOR_FLOAT32 + elif image_oper.op_type == NNAPI_OperandCode.TENSOR_QUANT8_ASYMM: + assert weight_oper.op_type == NNAPI_OperandCode.TENSOR_QUANT8_ASYMM + assert bias_oper.op_type == NNAPI_OperandCode.TENSOR_INT32 + assert approx_equal(image_oper.scale * weight_oper.scale, bias_oper.scale) + assert bias_oper.zero_point == 0 + else: + raise Exception(f"Unsupported input type for conv2d: {image_oper.op_type}") + + assert len(image_oper.shape) == 4 + assert len(weight_oper.shape) == 4 + assert len(bias_oper.shape) == 1 + + if depthwise: + # Depthwise convolution + one, kern_h, kern_w, out_c = weight_oper.shape + assert one == 1 + assert out_c % in_c == 0 + channel_multiplier = out_c // in_c + assert channel_multiplier == 1 # Don't support multiplier + assert out_c == in_c + else: + # Full convolution + out_c, kern_h, kern_w, kern_d = weight_oper.shape + assert kern_d == in_c + + assert out_c == bias_oper.shape[0] + + use_nchw = image_oper.use_nchw() + + if depthwise: + num_args = 12 + opcode = NNAPI_OperationCode.DEPTHWISE_CONV_2D + else: + num_args = 11 + if transpose: + opcode = NNAPI_OperationCode.TRANSPOSE_CONV_2D + else: + opcode = NNAPI_OperationCode.CONV_2D + + inputs = [None] * num_args + inputs[0] = image_id + inputs[1] = weight_id + inputs[2] = bias_id + inputs[3] = self.add_immediate_int_scalar(args.pad_l) + inputs[4] = self.add_immediate_int_scalar(args.pad_r) + inputs[5] = self.add_immediate_int_scalar(args.pad_t) + inputs[6] = self.add_immediate_int_scalar(args.pad_b) + inputs[7] = self.add_immediate_int_scalar(args.stride_w) + inputs[8] = self.add_immediate_int_scalar(args.stride_h) + if depthwise: + inputs[9] = self.add_immediate_int_scalar(1) + inputs[10] = self.add_immediate_int_scalar(fuse_code) + inputs[11] = self.add_immediate_bool_scalar(use_nchw) + else: + inputs[9] = self.add_immediate_int_scalar(fuse_code) + inputs[10] = self.add_immediate_bool_scalar(use_nchw) + + outputs = [None] * 1 + out_shape = get_conv_pool_shape(image_oper.shape, args, out_c, transpose) + out_oper = image_oper._replace( + shape=out_shape, + scale=out_scale, + zero_point=out_zero_point, + ) + out_id = self.add_tensor_operand(jit_out, out_oper) + self._handle_conv_pool_flexible_input(out_id, jit_image, args, transpose) + + outputs[0] = out_id + self.add_operation(opcode, inputs, outputs) + + def _handle_conv_pool_flexible_input(self, out_id, jit_image, args, transpose): + image_id, image_oper = self.get_tensor_operand_by_jitval(jit_image) + batch, in_ch, in_h, in_w = image_oper.shape + + if batch == 0: + self.forward_operand_shape(out_id, 0, image_id, 0) + if in_ch == 0: + raise Exception("Input channels can't be flexible") + # H & W + if transpose: + if in_h == 0: + self.compute_operand_shape( + out_id, + 2, + f"({flex_name(image_id, 2)} - 1) * {args.stride_h} + {args.kernel_h} - {args.pad_t} - {args.pad_b}", + ) + if in_w == 0: + self.compute_operand_shape( + out_id, + 3, + f"({flex_name(image_id, 3)} - 1) * {args.stride_w} + {args.kernel_w} - {args.pad_l} - {args.pad_r}", + ) + else: + if in_h == 0: + self.compute_operand_shape( + out_id, + 2, + f"({flex_name(image_id, 2)} - {args.kernel_h} + {args.pad_t} + {args.pad_b}) // {args.stride_h} + 1", + ) + if in_w == 0: + self.compute_operand_shape( + out_id, + 3, + f"({flex_name(image_id, 3)} - {args.kernel_w} + {args.pad_l} + {args.pad_r}) // {args.stride_w} + 1", + ) + + +def serialize_model( + module, inputs, *, config=None, return_shapes=None, use_int16_for_qint16=False +): + """Convert to NNAPI and serialize torchscript module. + + Parameters: + module: Torchscript module to convert + inputs: Tensors used to specify input details for NNAPI + config (optional): Optional config to attach to module + return_shapes (optional): Specify shape of outputs if + your module uses runtime flexible shapes to set output + buffer size for NNAPI + use_int16_for_qint16 (optional): Use Pytorch int16 to represent NNAPI qint16 values + """ + return _NnapiSerializer(config, use_int16_for_qint16).serialize_model( + module, inputs, return_shapes + ) diff --git a/venv/lib/python3.10/site-packages/torch/backends/cpu/__init__.py b/venv/lib/python3.10/site-packages/torch/backends/cpu/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2df99e709b7e83f06f6cf9780b6cee8aa50d8576 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/backends/cpu/__init__.py @@ -0,0 +1,19 @@ +import torch + +__all__ = [ + "get_cpu_capability", +] + + +def get_cpu_capability() -> str: + r"""Return cpu capability as a string value. + + Possible values: + - "DEFAULT" + - "VSX" + - "Z VECTOR" + - "NO AVX" + - "AVX2" + - "AVX512" + """ + return torch._C._get_cpu_capability() diff --git a/venv/lib/python3.10/site-packages/torch/backends/cpu/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/backends/cpu/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..490213353d9aa5b4e36b1be374910d1925388f42 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/backends/cpu/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/backends/cuda/__init__.py b/venv/lib/python3.10/site-packages/torch/backends/cuda/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..da0cea5c6ce14561510b7ecf887e2472645649c4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/backends/cuda/__init__.py @@ -0,0 +1,371 @@ +import contextlib +import warnings + +from typing import Union + +import torch + +__all__ = [ + "is_built", + "cuFFTPlanCacheAttrContextProp", + "cuFFTPlanCache", + "cuFFTPlanCacheManager", + "cuBLASModule", + "preferred_linalg_library", + "cufft_plan_cache", + "matmul", + "SDPBackend", + "SDPAParams", + "enable_cudnn_sdp", + "cudnn_sdp_enabled", + "enable_flash_sdp", + "flash_sdp_enabled", + "enable_mem_efficient_sdp", + "mem_efficient_sdp_enabled", + "math_sdp_enabled", + "enable_math_sdp", + "can_use_flash_attention", + "can_use_efficient_attention", + "sdp_kernel", +] + + +def is_built(): + r""" + Return whether PyTorch is built with CUDA support. + + Note that this doesn't necessarily mean CUDA is available; just that if this PyTorch + binary were run on a machine with working CUDA drivers and devices, we would be able to use it. + """ + return torch._C._has_cuda + + +class cuFFTPlanCacheAttrContextProp: + # Like regular ContextProp, but uses the `.device_index` attribute from the + # calling object as the first argument to the getter and setter. + def __init__(self, getter, setter): + self.getter = getter + self.setter = setter + + def __get__(self, obj, objtype): + return self.getter(obj.device_index) + + def __set__(self, obj, val): + if isinstance(self.setter, str): + raise RuntimeError(self.setter) + self.setter(obj.device_index, val) + + +class cuFFTPlanCache: + r""" + Represent a specific plan cache for a specific `device_index`. + + The attributes `size` and `max_size`, and method `clear`, can fetch and/ or + change properties of the C++ cuFFT plan cache. + """ + + def __init__(self, device_index): + self.device_index = device_index + + size = cuFFTPlanCacheAttrContextProp( + torch._cufft_get_plan_cache_size, + ".size is a read-only property showing the number of plans currently in the " + "cache. To change the cache capacity, set cufft_plan_cache.max_size.", + ) + + max_size = cuFFTPlanCacheAttrContextProp( + torch._cufft_get_plan_cache_max_size, torch._cufft_set_plan_cache_max_size + ) + + def clear(self): + return torch._cufft_clear_plan_cache(self.device_index) + + +class cuFFTPlanCacheManager: + r""" + Represent all cuFFT plan caches, return the cuFFTPlanCache for a given device when indexed. + + Finally, this object, when used directly as a `cuFFTPlanCache` object (e.g., + setting the `.max_size`) attribute, the current device's cuFFT plan cache is + used. + """ + + __initialized = False + + def __init__(self): + self.caches = [] + self.__initialized = True + + def __getitem__(self, device): + index = torch.cuda._utils._get_device_index(device) + if index < 0 or index >= torch.cuda.device_count(): + raise RuntimeError( + f"cufft_plan_cache: expected 0 <= device index < {torch.cuda.device_count()}, but got " + f"device with index {index}" + ) + if len(self.caches) == 0: + self.caches.extend( + cuFFTPlanCache(index) for index in range(torch.cuda.device_count()) + ) + return self.caches[index] + + def __getattr__(self, name): + return getattr(self[torch.cuda.current_device()], name) + + def __setattr__(self, name, value): + if self.__initialized: + return setattr(self[torch.cuda.current_device()], name, value) + else: + return super().__setattr__(name, value) + + +class cuBLASModule: + def __getattr__(self, name): + if name == "allow_tf32": + return torch._C._get_cublas_allow_tf32() + elif name == "allow_fp16_reduced_precision_reduction": + return torch._C._get_cublas_allow_fp16_reduced_precision_reduction() + elif name == "allow_bf16_reduced_precision_reduction": + return torch._C._get_cublas_allow_bf16_reduced_precision_reduction() + raise AttributeError("Unknown attribute " + name) + + def __setattr__(self, name, value): + if name == "allow_tf32": + return torch._C._set_cublas_allow_tf32(value) + elif name == "allow_fp16_reduced_precision_reduction": + return torch._C._set_cublas_allow_fp16_reduced_precision_reduction(value) + elif name == "allow_bf16_reduced_precision_reduction": + return torch._C._set_cublas_allow_bf16_reduced_precision_reduction(value) + raise AttributeError("Unknown attribute " + name) + + +_LinalgBackends = { + "default": torch._C._LinalgBackend.Default, + "cusolver": torch._C._LinalgBackend.Cusolver, + "magma": torch._C._LinalgBackend.Magma, +} +_LinalgBackends_str = ", ".join(_LinalgBackends.keys()) + + +def preferred_linalg_library( + backend: Union[None, str, torch._C._LinalgBackend] = None +) -> torch._C._LinalgBackend: + r""" + Override the heuristic PyTorch uses to choose between cuSOLVER and MAGMA for CUDA linear algebra operations. + + .. warning:: This flag is experimental and subject to change. + + When PyTorch runs a CUDA linear algebra operation it often uses the cuSOLVER or MAGMA libraries, + and if both are available it decides which to use with a heuristic. + This flag (a :class:`str`) allows overriding those heuristics. + + * If `"cusolver"` is set then cuSOLVER will be used wherever possible. + * If `"magma"` is set then MAGMA will be used wherever possible. + * If `"default"` (the default) is set then heuristics will be used to pick between + cuSOLVER and MAGMA if both are available. + * When no input is given, this function returns the currently preferred library. + * User may use the environment variable TORCH_LINALG_PREFER_CUSOLVER=1 to set the preferred library to cuSOLVER + globally. + This flag only sets the initial value of the preferred library and the preferred library + may still be overridden by this function call later in your script. + + Note: When a library is preferred other libraries may still be used if the preferred library + doesn't implement the operation(s) called. + This flag may achieve better performance if PyTorch's heuristic library selection is incorrect + for your application's inputs. + + Currently supported linalg operators: + + * :func:`torch.linalg.inv` + * :func:`torch.linalg.inv_ex` + * :func:`torch.linalg.cholesky` + * :func:`torch.linalg.cholesky_ex` + * :func:`torch.cholesky_solve` + * :func:`torch.cholesky_inverse` + * :func:`torch.linalg.lu_factor` + * :func:`torch.linalg.lu` + * :func:`torch.linalg.lu_solve` + * :func:`torch.linalg.qr` + * :func:`torch.linalg.eigh` + * :func:`torch.linalg.eighvals` + * :func:`torch.linalg.svd` + * :func:`torch.linalg.svdvals` + """ + if backend is None: + pass + elif isinstance(backend, str): + if backend not in _LinalgBackends: + raise RuntimeError( + "Unknown input value. " f"Choose from: {_LinalgBackends_str}." + ) + torch._C._set_linalg_preferred_backend(_LinalgBackends[backend]) + elif isinstance(backend, torch._C._LinalgBackend): + torch._C._set_linalg_preferred_backend(backend) + else: + raise RuntimeError("Unknown input value type.") + + return torch._C._get_linalg_preferred_backend() + + +from torch._C import _SDPAParams as SDPAParams, _SDPBackend as SDPBackend + +# Set the __module__ attribute +SDPAParams.__module__ = "torch.backends.cuda" +SDPAParams.__name__ = "SDPAParams" + + +def flash_sdp_enabled(): + r""" + .. warning:: This flag is beta and subject to change. + + Returns whether flash scaled dot product attention is enabled or not. + """ + return torch._C._get_flash_sdp_enabled() + + +def enable_flash_sdp(enabled: bool): + r""" + .. warning:: This flag is beta and subject to change. + + Enables or disables flash scaled dot product attention. + """ + torch._C._set_sdp_use_flash(enabled) + + +def mem_efficient_sdp_enabled(): + r""" + .. warning:: This flag is beta and subject to change. + + Returns whether memory efficient scaled dot product attention is enabled or not. + """ + return torch._C._get_mem_efficient_sdp_enabled() + + +def enable_mem_efficient_sdp(enabled: bool): + r""" + .. warning:: This flag is beta and subject to change. + + Enables or disables memory efficient scaled dot product attention. + """ + torch._C._set_sdp_use_mem_efficient(enabled) + + +def math_sdp_enabled(): + r""" + .. warning:: This flag is beta and subject to change. + + Returns whether math scaled dot product attention is enabled or not. + """ + return torch._C._get_math_sdp_enabled() + + +def enable_math_sdp(enabled: bool): + r""" + .. warning:: This flag is beta and subject to change. + + Enables or disables math scaled dot product attention. + """ + torch._C._set_sdp_use_math(enabled) + + +def can_use_flash_attention(params: SDPAParams, debug: bool = False) -> bool: + r"""Check if FlashAttention can be utilized in scaled_dot_product_attention. + + Args: + params: An instance of SDPAParams containing the tensors for query, + key, value, an optional attention mask, dropout rate, and + a flag indicating if the attention is causal. + debug: Whether to logging.warn debug information as to why FlashAttention could not be run. + Defaults to False. + + Returns: + True if FlashAttention can be used with the given parameters; otherwise, False. + + Note: + This function is dependent on a CUDA-enabled build of PyTorch. It will return False + in non-CUDA environments. + """ + return torch._C._can_use_flash_attention(params, debug) + + +def can_use_efficient_attention(params: SDPAParams, debug: bool = False) -> bool: + r"""Check if efficient_attention can be utilized in scaled_dot_product_attention. + + Args: + params: An instance of SDPAParams containing the tensors for query, + key, value, an optional attention mask, dropout rate, and + a flag indicating if the attention is causal. + debug: Whether to logging.warn with information as to why efficient_attention could not be run. + Defaults to False. + + Returns: + True if efficient_attention can be used with the given parameters; otherwise, False. + + Note: + This function is dependent on a CUDA-enabled build of PyTorch. It will return False + in non-CUDA environments. + """ + return torch._C._can_use_mem_efficient_attention(params, debug) + + +def cudnn_sdp_enabled(): + r""" + .. warning:: This flag is beta and subject to change. + + Returns whether cuDNN scaled dot product attention is enabled or not. + """ + return torch._C._get_cudnn_sdp_enabled() + + +def enable_cudnn_sdp(enabled: bool): + r""" + .. warning:: This flag is beta and subject to change. + + Enables or disables cuDNN scaled dot product attention. + """ + torch._C._set_sdp_use_cudnn(enabled) + + +@contextlib.contextmanager +def sdp_kernel( + enable_flash: bool = True, + enable_math: bool = True, + enable_mem_efficient: bool = True, + enable_cudnn: bool = True, +): + r""" + .. warning:: This flag is beta and subject to change. + + This context manager can be used to temporarily enable or disable any of the three backends for scaled dot product attention. + Upon exiting the context manager, the previous state of the flags will be restored. + """ + warnings.warn( + ( + "torch.backends.cuda.sdp_kernel() " + "is deprecated. In the future, this context manager will be removed. " + "Please see, torch.nn.attention.sdpa_kernel() for the new context manager, with updated " + "signature." + ), + FutureWarning, + ) + from torch.nn.attention import sdpa_kernel, SDPBackend + + backend_list = [] + if enable_flash: + backend_list.append(SDPBackend.FLASH_ATTENTION) + if enable_mem_efficient: + backend_list.append(SDPBackend.EFFICIENT_ATTENTION) + if enable_math: + backend_list.append(SDPBackend.MATH) + if enable_cudnn: + backend_list.append(SDPBackend.CUDNN_ATTENTION) + + with sdpa_kernel(backend_list) as context: + try: + yield context + finally: + pass + + +cufft_plan_cache = cuFFTPlanCacheManager() +matmul = cuBLASModule() diff --git a/venv/lib/python3.10/site-packages/torch/backends/cuda/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/backends/cuda/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3072ffe141086a83d47182a569b664050ec00bf5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/backends/cuda/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/backends/mps/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/backends/mps/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c90fa65dcfe76719fa4ac5257863dd15f6365f7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/backends/mps/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/backends/nnpack/__init__.py b/venv/lib/python3.10/site-packages/torch/backends/nnpack/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..892dfa022cfc23eb09eafdf9c5ee5811e0cf8c9b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/backends/nnpack/__init__.py @@ -0,0 +1,30 @@ +from contextlib import contextmanager + +import torch +from torch.backends import __allow_nonbracketed_mutation, ContextProp, PropModule + +__all__ = ["is_available", "flags", "set_flags"] + + +def is_available(): + r"""Return whether PyTorch is built with NNPACK support.""" + return torch._nnpack_available() + + +def set_flags(_enabled): + r"""Set if nnpack is enabled globally""" + orig_flags = (torch._C._get_nnpack_enabled(),) + torch._C._set_nnpack_enabled(_enabled) + return orig_flags + + +@contextmanager +def flags(enabled=False): + r"""Context manager for setting if nnpack is enabled globally""" + with __allow_nonbracketed_mutation(): + orig_flags = set_flags(enabled) + try: + yield + finally: + with __allow_nonbracketed_mutation(): + set_flags(orig_flags[0]) diff --git a/venv/lib/python3.10/site-packages/torch/backends/nnpack/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/backends/nnpack/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d250e223cd4a60af84e591a9c18cfba68d63945 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/backends/nnpack/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/backends/openmp/__init__.py b/venv/lib/python3.10/site-packages/torch/backends/openmp/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4a7fcca12d0c8be54a3a1d733facf2cf9f2e6aaa --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/backends/openmp/__init__.py @@ -0,0 +1,6 @@ +import torch + + +def is_available(): + r"""Return whether PyTorch is built with OpenMP support.""" + return torch._C.has_openmp diff --git a/venv/lib/python3.10/site-packages/torch/backends/openmp/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/backends/openmp/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98a2e7966c9c3ca3e8c64d254db7479b8fee90b1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/backends/openmp/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/backends/xeon/__init__.py b/venv/lib/python3.10/site-packages/torch/backends/xeon/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/torch/backends/xeon/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/backends/xeon/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..62412f5ce010ec7eb2b936fb9883b09f12056c63 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/backends/xeon/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/backends/xeon/__pycache__/run_cpu.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/backends/xeon/__pycache__/run_cpu.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..62f862f9232727b28066abc1beaed38b698a7732 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/backends/xeon/__pycache__/run_cpu.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/backends/xeon/run_cpu.py b/venv/lib/python3.10/site-packages/torch/backends/xeon/run_cpu.py new file mode 100644 index 0000000000000000000000000000000000000000..6b54eed29985e71aec630cb8a5ccf2196fd34cae --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/backends/xeon/run_cpu.py @@ -0,0 +1,929 @@ +""" +This is a script for launching PyTorch inference on Intel(R) Xeon(R) Scalable Processors with optimal configurations. + +Single instance inference, multi-instance inference are enabled. + +Note: term "instance" here doesn't refer to a cloud instance. This script is executed as a single process. It invokes +multiple "instances" which are formed from multiple threads for each. "instance" is kind of group of threads in this +context. + +Illustrated as below: + +:: + + +-----------------------------+----------------------+-------+ + | process | thread | core | + +=============================+======================+=======+ + | torch.backends.xeon.run_cpu | instance 0: thread 0 | 0 | + | | thread 1 | 1 | + | +----------------------+-------+ + | | instance 1: thread 0 | 2 | + | | thread 1 | 3 | + | +----------------------+-------+ + | | ... | ... | + | +----------------------+-------+ + | | instance N: thread 0 | M | + | | thread 1 | M+1 | + +-----------------------------+----------------------+-------+ + +To get the peak performance on Intel(R) Xeon(R) Scalable Processors, the script optimizes the configuration of thread and memory +management. For thread management, the script configures thread affinity and the preload of Intel OMP library. +For memory management, it configures NUMA binding and preload optimized memory allocation library (e.g. tcmalloc, jemalloc). + +Environment variables that will be set by this script: + ++------------------+-------------------------------------------------------------------------------------------------+ +| Environ Variable | Value | ++==================+=================================================================================================+ +| LD_PRELOAD | Depending on knobs you set, /libiomp5.so, /libjemalloc.so, /libtcmalloc.so might | +| | be appended to LD_PRELOAD. | ++------------------+-------------------------------------------------------------------------------------------------+ +| KMP_AFFINITY | If libiomp5.so is preloaded, KMP_AFFINITY could be set to "granularity=fine,compact,1,0". | ++------------------+-------------------------------------------------------------------------------------------------+ +| KMP_BLOCKTIME | If libiomp5.so is preloaded, KMP_BLOCKTIME is set to "1". | ++------------------+-------------------------------------------------------------------------------------------------+ +| OMP_NUM_THREADS | value of ncores_per_instance | ++------------------+-------------------------------------------------------------------------------------------------+ +| MALLOC_CONF | If libjemalloc.so is preloaded, MALLOC_CONF will be set to | +| | "oversize_threshold:1,background_thread:true,metadata_thp:auto". | ++------------------+-------------------------------------------------------------------------------------------------+ + +*Note*: This script respects environment variables set preliminarily. I.e. If you set the environment variables +mentioned above before running the script, the script will not overwrite the values in the script. + +How to use this module: +~~~~~~~~~~~~~~~~~~~~~~~ + +Single instance inference +------------------------- + +1. Run single-instance inference on a single node with all CPU nodes. + +:: + + python -m torch.backends.xeon.run_cpu --throughput-mode script.py args + +2. Run single-instance inference on a single CPU node. + +:: + + python -m torch.backends.xeon.run_cpu --node-id 1 script.py args + +Multi-instance inference +------------------------ + +1. Multi-instance + By default this tool runs one process per node. If you want to set the instance numbers and core per instance, + --ninstances and --ncores-per-instance should be set. + +:: + + python -m torch.backends.xeon.run_cpu -- python_script args + + eg: on an Intel(R) Xeon(R) Scalable Processor with 14 instance, 4 cores per instance + +:: + + python -m torch.backends.xeon.run_cpu --ninstances 14 --ncores-per-instance 4 python_script args + +2. Run single-instance inference among multiple instances. + By default, runs all ninstances. If you want to independently run a single instance among ninstances, specify rank. + + eg: run 0th instance on an Intel(R) Xeon(R) Scalable Processor with 2 instance (i.e., numactl -C 0-27) + +:: + + python -m torch.backends.xeon.run_cpu --ninstances 2 --rank 0 python_script args + + eg: run 1st instance on an Intel(R) Xeon(R) Scalable Processor with 2 instance (i.e., numactl -C 28-55) + +:: + + python -m torch.backends.xeon.run_cpu --ninstances 2 --rank 1 python_script args + + eg: run 0th instance on an Intel(R) Xeon(R) Scalable Processor with 2 instance, 2 cores per instance, + first four cores (i.e., numactl -C 0-1) + +:: + + python -m torch.backends.xeon.run_cpu --core-list "0, 1, 2, 3" --ninstances 2 --ncores-per-instance 2 + --rank 0 python_script args + +3. To look up what optional arguments this module offers: + +:: + + python -m torch.backends.xeon.run_cpu --help + +Memory allocator +---------------- + +"--enable-tcmalloc" and "--enable-jemalloc" can be used to enable different memory allcator. + +""" + +import glob +import logging +import os +import platform +import re +import subprocess +import sys +from argparse import ArgumentParser, RawTextHelpFormatter, REMAINDER +from os.path import expanduser +from typing import Dict, List + +from torch.distributed.elastic.multiprocessing import ( + DefaultLogsSpecs, + start_processes, + Std, +) + +format_str = "%(asctime)s - %(name)s - %(levelname)s - %(message)s" +logging.basicConfig(level=logging.INFO, format=format_str) +logger = logging.getLogger(__name__) + + +class _CPUinfo: + """Get CPU information, such as cores list and NUMA information.""" + + def __init__(self, test_input=""): + self.cpuinfo = [] + if platform.system() in ["Windows", "Darwin"]: + raise RuntimeError(f"{platform.system()} is not supported!!!") + elif platform.system() == "Linux": + # Sample output of: `lscpu --parse=CPU,Core,Socket,Node` + # + # # The following is the parsable format, which can be fed to other + # # programs. Each different item in every column has an unique ID + # # starting from zero. + # # CPU,Core,Socket,Node + # 0,0,0,0 + # 1,1,0,0 + # ... + if test_input == "": + lscpu_cmd = ["lscpu", "--parse=CPU,Core,Socket,Node"] + lscpu_info = subprocess.check_output( + lscpu_cmd, universal_newlines=True + ).split("\n") + else: + lscpu_info = test_input.split("\n") + + # Get information about cpu, core, socket and node + for line in lscpu_info: + pattern = r"^([\d]+,[\d]+,[\d]+,[\d]?)" + regex_out = re.search(pattern, line) + if regex_out: + self.cpuinfo.append(regex_out.group(1).strip().split(",")) + + # physical cores := core column in lscpu output + # logical cores := cPU column in lscpu output + self.node_nums = int(max([line[3] for line in self.cpuinfo])) + 1 + self.node_physical_cores: List[List[int]] = [] # node_id is index + self.node_logical_cores: List[List[int]] = [] # node_id is index + self.physical_core_node_map = {} # physical core to numa node id + self.logical_core_node_map = {} # logical core to numa node id + + for node_id in range(self.node_nums): + cur_node_physical_core = [] + cur_node_logical_core = [] + for cpuinfo in self.cpuinfo: + nid = cpuinfo[3] if cpuinfo[3] != "" else "0" + if node_id == int(nid): + if int(cpuinfo[1]) not in cur_node_physical_core: + cur_node_physical_core.append(int(cpuinfo[1])) + self.physical_core_node_map[int(cpuinfo[1])] = int(node_id) + cur_node_logical_core.append(int(cpuinfo[0])) + self.logical_core_node_map[int(cpuinfo[0])] = int(node_id) + self.node_physical_cores.append(cur_node_physical_core) + self.node_logical_cores.append(cur_node_logical_core) + + def _physical_core_nums(self): + return len(self.node_physical_cores) * len(self.node_physical_cores[0]) + + def _logical_core_nums(self): + return len(self.node_logical_cores) * len(self.node_logical_cores[0]) + + def get_node_physical_cores(self, node_id): + if node_id < 0 or node_id > self.node_nums - 1: + raise ValueError( + f"Invalid node id: {node_id}. Valid node ids: {list(range(len(self.node_physical_cores)))}" + ) + return self.node_physical_cores[node_id] + + def get_node_logical_cores(self, node_id): + if node_id < 0 or node_id > self.node_nums - 1: + raise ValueError( + f"Invalid node id: {node_id}. Valid node ids: {list(range(len(self.node_physical_cores)))}" + ) + return self.node_logical_cores[node_id] + + def get_all_physical_cores(self): + all_cores = [] + for cores in self.node_physical_cores: + all_cores.extend(cores) + return all_cores + + def get_all_logical_cores(self): + all_cores = [] + for cores in self.node_logical_cores: + all_cores.extend(cores) + return all_cores + + def numa_aware_check(self, core_list): + """ + Check whether all cores in core_list are in the same NUMA node. + + Cross NUMA will reduce performance. + We strongly advice to not use cores on different nodes. + """ + cores_numa_map = self.logical_core_node_map + numa_ids = [] + for core in core_list: + numa_id = cores_numa_map[core] + if numa_id not in numa_ids: + numa_ids.append(numa_id) + if len(numa_ids) > 1: + logger.warning( + "Numa Aware: cores:%s on different NUMA nodes:%s. To avoid \ +this behavior, please use --ncores-per-instance knob to make sure number of cores is divisible by --ncores-per-\ +instance. Alternatively, please use --skip-cross-node-cores knob.", + str(core_list), + str(numa_ids), + ) + if len(numa_ids) == 0: + raise RuntimeError( + "invalid number of NUMA nodes; please make sure numa_ids >= 1" + ) + return numa_ids + + +class _Launcher: + r"""Class for launcher.""" + + msg_lib_notfound = f"Unable to find the {{0}} library file lib{{1}}.so in $CONDA_PREFIX/lib or $VIRTUAL_ENV/lib \ +or /.local/lib/ or /usr/local/lib/ or /usr/local/lib64/ or /usr/lib or /usr/lib64 or \ +{expanduser('~')}/.local/lib/ so the LD_PRELOAD environment variable will not be set." + + def __init__(self): + self.cpuinfo = _CPUinfo() + + def add_lib_preload(self, lib_type): + """Enable TCMalloc/JeMalloc/intel OpenMP.""" + library_paths = [] + if "CONDA_PREFIX" in os.environ: + library_paths.append(f"{os.environ['CONDA_PREFIX']}/lib") + if "VIRTUAL_ENV" in os.environ: + library_paths.append(f"{os.environ['VIRTUAL_ENV']}/lib") + + library_paths += [ + f"{expanduser('~')}/.local/lib", + "/usr/local/lib", + "/usr/local/lib64", + "/usr/lib", + "/usr/lib64", + ] + + lib_find = False + lib_set = False + for item in os.getenv("LD_PRELOAD", "").split(":"): + if item.endswith(f"lib{lib_type}.so"): + lib_set = True + break + if not lib_set: + for lib_path in library_paths: + library_file = os.path.join(lib_path, f"lib{lib_type}.so") + matches = glob.glob(library_file) + if len(matches) > 0: + ld_preloads = [f"{matches[0]}", os.getenv("LD_PRELOAD", "")] + os.environ["LD_PRELOAD"] = os.pathsep.join( + [p.strip(os.pathsep) for p in ld_preloads if p] + ) + lib_find = True + break + return lib_set or lib_find + + def is_numactl_available(self): + numactl_available = False + try: + cmd = ["numactl", "-C", "0", "-m", "0", "hostname"] + r = subprocess.run( + cmd, + env=os.environ, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + check=False, + ) + if r.returncode == 0: + numactl_available = True + except Exception: + pass + return numactl_available + + def set_memory_allocator( + self, enable_tcmalloc=True, enable_jemalloc=False, use_default_allocator=False + ): + """ + Enable TCMalloc/JeMalloc with LD_PRELOAD and set configuration for JeMalloc. + + By default, PTMalloc will be used for PyTorch, but TCMalloc and JeMalloc can get better + memory reuse and reduce page fault to improve performance. + """ + if enable_tcmalloc and enable_jemalloc: + raise RuntimeError( + "Unable to enable TCMalloc and JEMalloc at the same time." + ) + + if enable_tcmalloc: + find_tc = self.add_lib_preload(lib_type="tcmalloc") + if not find_tc: + msg = f'{self.msg_lib_notfound} you can use "conda install -c conda-forge gperftools" to install {{0}}' + logger.warning(msg.format("TCmalloc", "tcmalloc")) # noqa: G001 + else: + logger.info("Use TCMalloc memory allocator") + + elif enable_jemalloc: + find_je = self.add_lib_preload(lib_type="jemalloc") + if not find_je: + msg = f'{self.msg_lib_notfound} you can use "conda install -c conda-forge jemalloc" to install {{0}}' + logger.warning(msg.format("Jemalloc", "jemalloc")) # noqa: G001 + else: + logger.info("Use JeMalloc memory allocator") + self.set_env( + "MALLOC_CONF", + "oversize_threshold:1,background_thread:true,metadata_thp:auto", + ) + + elif use_default_allocator: + pass + + else: + find_tc = self.add_lib_preload(lib_type="tcmalloc") + if find_tc: + logger.info("Use TCMalloc memory allocator") + return + find_je = self.add_lib_preload(lib_type="jemalloc") + if find_je: + logger.info("Use JeMalloc memory allocator") + return + logger.warning( + """Neither TCMalloc nor JeMalloc is found in $CONDA_PREFIX/lib or $VIRTUAL_ENV/lib + or /.local/lib/ or /usr/local/lib/ or /usr/local/lib64/ or /usr/lib or /usr/lib64 or + %s/.local/lib/ so the LD_PRELOAD environment variable will not be set. + This may drop the performance""", + expanduser("~"), + ) + + def log_env_var(self, env_var_name=""): + if env_var_name in os.environ: + logger.info("%s=%s", env_var_name, os.environ[env_var_name]) + + def set_env(self, env_name, env_value): + if not env_value: + logger.warning("%s is None", env_name) + if env_name not in os.environ: + os.environ[env_name] = env_value + elif os.environ[env_name] != env_value: + logger.warning( + "Overriding value with the one set in environment variable: %s. \ +Value applied: %s. Value ignored: %s", + env_name, + os.environ[env_name], + env_value, + ) + self.log_env_var(env_name) + + # set_kmp_affinity is used to control whether to set KMP_AFFINITY or not. + # In scenario that use all cores on all nodes, including logical cores, setting KMP_AFFINITY disables logical cores. + # In this case, KMP_AFFINITY should not be set. + def set_multi_thread_and_allocator( + self, + ncores_per_instance, + disable_iomp=False, + set_kmp_affinity=True, + enable_tcmalloc=True, + enable_jemalloc=False, + use_default_allocator=False, + ): + """ + Set multi-thread configuration and enable Intel openMP and TCMalloc/JeMalloc. + + By default, GNU openMP and PTMalloc are used in PyTorch. but Intel openMP and TCMalloc/JeMalloc are better alternatives + to get performance benefit. + """ + self.set_memory_allocator( + enable_tcmalloc, enable_jemalloc, use_default_allocator + ) + self.set_env("OMP_NUM_THREADS", str(ncores_per_instance)) + if not disable_iomp: + find_iomp = self.add_lib_preload(lib_type="iomp5") + if not find_iomp: + msg = f'{self.msg_lib_notfound} you can use "conda install mkl" to install {{0}}' + logger.warning(msg.format("iomp", "iomp5")) # noqa: G001 + else: + logger.info("Using Intel OpenMP") + if set_kmp_affinity: + self.set_env("KMP_AFFINITY", "granularity=fine,compact,1,0") + self.set_env("KMP_BLOCKTIME", "1") + self.log_env_var("LD_PRELOAD") + + r""" + Launcher for single instance and multi-instance + """ + + def launch(self, args): + cores = [] + set_kmp_affinity = True + enable_taskset = False + if args.core_list: # user specify what cores will be used by params + cores = [int(x) for x in args.core_list.split(",")] + if args.ncores_per_instance == -1: + raise RuntimeError( + 'please specify the "--ncores-per-instance" if you have pass the --core-list params' + ) + elif ( + args.ninstances > 1 + and args.ncores_per_instance * args.ninstances < len(cores) + ): + logger.warning( + "only first %s cores will be used, \ +but you specify %s cores in core_list", + args.ncores_per_instance * args.ninstances, + len(cores), + ) + else: + args.ninstances = len(cores) // args.ncores_per_instance + + else: + if args.use_logical_core: + if args.node_id != -1: + cores = self.cpuinfo.get_node_logical_cores(args.node_id) + else: + cores = self.cpuinfo.get_all_logical_cores() + # When using all cores on all nodes, including logical cores, + # setting KMP_AFFINITY disables logical cores. Thus, KMP_AFFINITY should not be set. + set_kmp_affinity = False + else: + if args.node_id != -1: + cores = self.cpuinfo.get_node_physical_cores(args.node_id) + else: + cores = self.cpuinfo.get_all_physical_cores() + if ( + not args.multi_instance + and args.ninstances == -1 + and args.ncores_per_instance == -1 + ): + args.ninstances = 1 + args.ncores_per_instance = len(cores) + elif ( + args.multi_instance + and args.ninstances == -1 + and args.ncores_per_instance == -1 + ): + args.throughput_mode = True + elif args.ncores_per_instance == -1 and args.ninstances != -1: + if args.ninstances > len(cores): + raise RuntimeError( + f"there are {len(cores)} total cores but you specify {args.ninstances} ninstances; \ +please make sure ninstances <= total_cores)" + ) + else: + args.ncores_per_instance = len(cores) // args.ninstances + elif args.ncores_per_instance != -1 and args.ninstances == -1: + if not args.skip_cross_node_cores: + args.ninstances = len(cores) // args.ncores_per_instance + else: + ncore_per_node = len(self.cpuinfo.node_physical_cores[0]) + num_leftover_cores = ncore_per_node % args.ncores_per_instance + if args.ncores_per_instance > ncore_per_node: + # too many ncores_per_instance to skip cross-node cores + logger.warning( + "there are %s core(s) per socket, but you specify %s ncores_per_instance and \ +skip_cross_node_cores. Please make sure --ncores-per-instance < core(s) per \ +socket", + ncore_per_node, + args.ncores_per_instance, + ) + sys.exit(-1) + elif num_leftover_cores == 0: + # aren't any cross-node cores + logger.info( + "--skip-cross-node-cores is set, but there are no cross-node cores." + ) + args.ninstances = len(cores) // args.ncores_per_instance + else: + # skip cross-node cores + if args.ninstances != -1: + logger.warning( + "--skip-cross-node-cores is exclusive to --ninstances. --ninstances \ +won't take effect even if it is set explicitly." + ) + + i = 1 + leftover_cores = set() + while ncore_per_node * i <= len(cores): + leftover_cores.update( + cores[ + ncore_per_node * i + - num_leftover_cores : ncore_per_node * i + ] + ) + i += 1 + cores = list(set(cores) - leftover_cores) + assert len(cores) % args.ncores_per_instance == 0 + args.ninstances = len(cores) // args.ncores_per_instance + else: + if args.ninstances * args.ncores_per_instance > len(cores): + raise RuntimeError( + "Please make sure ninstances * ncores_per_instance <= total_cores" + ) + if args.latency_mode: + logger.warning( + "--latency-mode is exclusive to --ninstances, --ncores-per-instance, --node-id and \ +--use-logical-core. They won't take effect even they are set explicitly." + ) + args.ncores_per_instance = 4 + cores = self.cpuinfo.get_all_physical_cores() + args.ninstances = len(cores) // args.ncores_per_instance + + if args.throughput_mode: + logger.warning( + "--throughput-mode is exclusive to --ninstances, --ncores-per-instance, --node-id and \ +--use-logical-core. They won't take effect even they are set explicitly." + ) + args.ninstances = self.cpuinfo.node_nums + cores = self.cpuinfo.get_all_physical_cores() + args.ncores_per_instance = len(cores) // args.ninstances + + if args.ninstances > 1 and args.rank != -1: + logger.info( + "assigning %s cores for instance %s", + args.ncores_per_instance, + args.rank, + ) + + if not args.disable_numactl: + numactl_available = self.is_numactl_available() + if not numactl_available: + if not args.disable_taskset: + logger.warning( + "Core binding with numactl is not available. Disabling numactl and using taskset instead. \ + This may affect performance in multi-socket system; please use numactl if memory binding is needed." + ) + args.disable_numactl = True + enable_taskset = True + else: + logger.warning( + "Core binding with numactl is not available, and --disable_taskset is set. \ + Please unset --disable_taskset to use taskset instead of numactl." + ) + sys.exit(-1) + + if not args.disable_taskset: + enable_taskset = True + + self.set_multi_thread_and_allocator( + args.ncores_per_instance, + args.disable_iomp, + set_kmp_affinity, + args.enable_tcmalloc, + args.enable_jemalloc, + args.use_default_allocator, + ) + entrypoint = "" + launch_args = {} + launch_envs: Dict[int, Dict] = {} + launch_tee = {} + for i in range(args.ninstances): + cmd = [] + cur_process_cores = "" + if not args.disable_numactl or enable_taskset: + if not args.disable_numactl: + cmd = ["numactl"] + elif enable_taskset: + cmd = ["taskset"] + cores = sorted(cores) + if ( + args.rank == -1 + ): # sequentially assign ncores_per_instance to ninstances + core_list = cores[ + i + * args.ncores_per_instance : (i + 1) + * args.ncores_per_instance + ] + else: # assign ncores_per_instance from rank + core_list = cores[ + args.rank + * args.ncores_per_instance : (args.rank + 1) + * args.ncores_per_instance + ] + + core_ranges: List[Dict] = [] + for core in core_list: + if len(core_ranges) == 0: + range_elem = {"start": core, "end": core} + core_ranges.append(range_elem) + else: + if core - core_ranges[-1]["end"] == 1: + core_ranges[-1]["end"] = core + else: + range_elem = {"start": core, "end": core} + core_ranges.append(range_elem) + for r in core_ranges: + cur_process_cores = f"{cur_process_cores}{r['start']}-{r['end']}," + cur_process_cores = cur_process_cores[:-1] + if not args.disable_numactl: + numa_params = f"-C {cur_process_cores} " + numa_ids = ",".join( + [ + str(numa_id) + for numa_id in self.cpuinfo.numa_aware_check(core_list) + ] + ) + numa_params += f"-m {numa_ids}" + cmd.extend(numa_params.split()) + elif enable_taskset: + taskset_params = f"-c {cur_process_cores} " + cmd.extend(taskset_params.split()) + with_python = not args.no_python + if with_python: + cmd.append(sys.executable) + cmd.append("-u") + if args.module: + cmd.append("-m") + cmd.append(args.program) + cmd.extend(args.program_args) + cmd_s = " ".join(cmd) + logger.info(cmd_s) + if entrypoint == "": + entrypoint = cmd[0] + del cmd[0] + launch_args[i] = tuple(cmd) + launch_envs[i] = {} + launch_tee[i] = Std.ALL + + if args.rank != -1: # launches single instance, rank, only + break + + ctx = start_processes( + name=args.log_file_prefix, + entrypoint=entrypoint, + args=launch_args, + envs=launch_envs, + logs_specs=DefaultLogsSpecs(log_dir=args.log_path, tee=launch_tee), + ) + ctx.wait() + + +def _add_memory_allocator_params(parser): + group = parser.add_argument_group("Memory Allocator Parameters") + # allocator control + group.add_argument( + "--enable-tcmalloc", + "--enable_tcmalloc", + action="store_true", + default=False, + help="Enable tcmalloc allocator", + ) + group.add_argument( + "--enable-jemalloc", + "--enable_jemalloc", + action="store_true", + default=False, + help="Enable jemalloc allocator", + ) + group.add_argument( + "--use-default-allocator", + "--use_default_allocator", + action="store_true", + default=False, + help="Use default memory allocator", + ) + + +def _add_multi_instance_params(parser): + group = parser.add_argument_group("Multi-instance Parameters") + # multi-instance control + group.add_argument( + "--ncores-per-instance", + "--ncores_per_instance", + metavar="\b", + default=-1, + type=int, + help="Cores per instance", + ) + group.add_argument( + "--ninstances", + metavar="\b", + default=-1, + type=int, + help="For multi-instance, you should give the cores number you used for per instance.", + ) + group.add_argument( + "--skip-cross-node-cores", + "--skip_cross_node_cores", + action="store_true", + default=False, + help="If specified --ncores-per-instance, skips cross-node cores.", + ) + group.add_argument( + "--rank", + metavar="\b", + default="-1", + type=int, + help="Specify instance index to assign ncores_per_instance for rank; \ +otherwise ncores_per_instance will be assigned sequentially to ninstances. Please refer to \ +https://github.com/intel/intel-extension-for-pytorch/blob/master/docs/tutorials/performance_tuning/launch_script.md", + ) + group.add_argument( + "--latency-mode", + "--latency_mode", + action="store_true", + default=False, + help="By default 4 core per instance and use all physical cores", + ) + group.add_argument( + "--throughput-mode", + "--throughput_mode", + action="store_true", + default=False, + help="By default one instance per node and use all physical cores", + ) + group.add_argument( + "--node-id", + "--node_id", + metavar="\b", + default=-1, + type=int, + help="node id for multi-instance, by default all nodes will be used", + ) + group.add_argument( + "--use-logical-core", + "--use_logical_core", + action="store_true", + default=False, + help="Whether only use physical cores", + ) + group.add_argument( + "--disable-numactl", + "--disable_numactl", + action="store_true", + default=False, + help="Disable numactl", + ) + group.add_argument( + "--disable-taskset", + "--disable_taskset", + action="store_true", + default=False, + help="Disable taskset", + ) + group.add_argument( + "--core-list", + "--core_list", + metavar="\b", + default=None, + type=str, + help='Specify the core list as "core_id, core_id, ....", otherwise, all the cores will be used.', + ) + group.add_argument( + "--log-path", + "--log_path", + metavar="\b", + default="", + type=str, + help="The log file directory. Default path is " + ", which means disable logging to files.", + ) + group.add_argument( + "--log-file-prefix", + "--log_file_prefix", + metavar="\b", + default="run", + type=str, + help="log file prefix", + ) + + +def _add_kmp_iomp_params(parser): + group = parser.add_argument_group("IOMP Parameters") + group.add_argument( + "--disable-iomp", + "--disable_iomp", + action="store_true", + default=False, + help="By default, we use Intel OpenMP and libiomp5.so will be add to LD_PRELOAD", + ) + + +def create_args(parser=None): + """ + Parse the command line options. + + @retval ArgumentParser + """ + parser.add_argument( + "--multi-instance", + "--multi_instance", + action="store_true", + default=False, + help="Enable multi-instance, by default one instance per node", + ) + + parser.add_argument( + "-m", + "--module", + default=False, + action="store_true", + help="Changes each process to interpret the launch script " + "as a python module, executing with the same behavior as" + '"python -m".', + ) + + parser.add_argument( + "--no-python", + "--no_python", + default=False, + action="store_true", + help='Do not prepend the --program script with "python" - just exec ' + "it directly. Useful when the script is not a Python script.", + ) + + _add_memory_allocator_params(parser) + _add_kmp_iomp_params(parser) + + _add_multi_instance_params(parser) + # positional + parser.add_argument( + "program", + type=str, + help="The full path to the program/script to be launched. " + "followed by all the arguments for the script", + ) + + # rest from the training program + parser.add_argument("program_args", nargs=REMAINDER) + + +def main(args): + env_before = set(os.environ.keys()) + if platform.system() in ["Windows", "Darwin"]: + raise RuntimeError(f"{platform.system()} is not supported!!!") + + if args.log_path: + os.makedirs(args.log_path, exist_ok=True) + else: + args.log_path = os.devnull + + if args.latency_mode and args.throughput_mode: + raise RuntimeError( + "Either args.latency_mode or args.throughput_mode should be set" + ) + + if not args.no_python and not args.program.endswith(".py"): + raise RuntimeError( + 'For non Python script, you should use "--no-python" parameter.' + ) + + # Verify LD_PRELOAD + if "LD_PRELOAD" in os.environ: + lst_valid = [] + tmp_ldpreload = os.environ["LD_PRELOAD"] + for item in tmp_ldpreload.split(":"): + matches = glob.glob(item) + if len(matches) > 0: + lst_valid.append(item) + else: + logger.warning("%s doesn't exist. Removing it from LD_PRELOAD.", item) + if len(lst_valid) > 0: + os.environ["LD_PRELOAD"] = ":".join(lst_valid) + else: + os.environ["LD_PRELOAD"] = "" + + launcher = _Launcher() + launcher.launch(args) + for x in sorted(set(os.environ.keys()) - env_before): + logger.debug("%s=%s", x, os.environ[x]) + + +if __name__ == "__main__": + parser = ArgumentParser( + description="This is a script for launching PyTorch inference on Intel(R) Xeon(R) Scalable " + "Processors with optimal configurations. Single instance inference, " + "multi-instance inference are enable. To get the peak performance on Intel(R) " + "Xeon(R) Scalable Processors, the script optimizes the configuration " + "of thread and memory management. For thread management, the script configures thread " + "affinity and the preload of Intel OMP library. For memory management, it configures " + "NUMA binding and preload optimized memory allocation library (e.g. tcmalloc, jemalloc) " + "\n################################# Basic usage ############################# \n" + "\n 1. single instance\n" + "\n >>> python -m torch.backends.xeon.run_cpu python_script args \n" + "\n2. multi-instance \n" + "\n >>> python -m torch.backends.xeon.run_cpu --ninstances xxx " + "--ncores-per-instance xx python_script args\n" + "\n############################################################################# \n", + formatter_class=RawTextHelpFormatter, + ) + create_args(parser) + args = parser.parse_args() + main(args)