diff --git a/ckpts/universal/global_step120/zero/21.attention.query_key_value.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/21.attention.query_key_value.weight/exp_avg_sq.pt
new file mode 100644
index 0000000000000000000000000000000000000000..b885806556abe37fa5a598926b49681ae7edb322
--- /dev/null
+++ b/ckpts/universal/global_step120/zero/21.attention.query_key_value.weight/exp_avg_sq.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:70e4dcc64eeea313e32c8e1851f202027890fa5ac11678574603256981e1d814
+size 50332843
diff --git a/ckpts/universal/global_step120/zero/22.attention.query_key_value.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/22.attention.query_key_value.weight/exp_avg_sq.pt
new file mode 100644
index 0000000000000000000000000000000000000000..1161b2cf66176a3914378a7091b38f2008c16b99
--- /dev/null
+++ b/ckpts/universal/global_step120/zero/22.attention.query_key_value.weight/exp_avg_sq.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d49a710942fc3e1cbd6e72d9796e19901058eb79fca810807b3d34cc3a819410
+size 50332843
diff --git a/venv/lib/python3.10/site-packages/fsspec-2024.3.1.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/fsspec-2024.3.1.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/fsspec-2024.3.1.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/venv/lib/python3.10/site-packages/fsspec-2024.3.1.dist-info/LICENSE b/venv/lib/python3.10/site-packages/fsspec-2024.3.1.dist-info/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..67590a5e5be5a5a2dde3fe53a7512e404a896c22
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/fsspec-2024.3.1.dist-info/LICENSE
@@ -0,0 +1,29 @@
+BSD 3-Clause License
+
+Copyright (c) 2018, Martin Durant
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+* Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/venv/lib/python3.10/site-packages/fsspec-2024.3.1.dist-info/METADATA b/venv/lib/python3.10/site-packages/fsspec-2024.3.1.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..f885cd9222a0b1e67a0f676d20a9ea2c07553dfb
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/fsspec-2024.3.1.dist-info/METADATA
@@ -0,0 +1,167 @@
+Metadata-Version: 2.1
+Name: fsspec
+Version: 2024.3.1
+Summary: File-system specification
+Home-page: https://github.com/fsspec/filesystem_spec
+Maintainer: Martin Durant
+Maintainer-email: mdurant@anaconda.com
+License: BSD
+Project-URL: Changelog, https://filesystem-spec.readthedocs.io/en/latest/changelog.html
+Project-URL: Documentation, https://filesystem-spec.readthedocs.io/en/latest/
+Keywords: file
+Classifier: Development Status :: 4 - Beta
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Requires-Python: >=3.8
+Description-Content-Type: text/markdown
+License-File: LICENSE
+Provides-Extra: abfs
+Requires-Dist: adlfs ; extra == 'abfs'
+Provides-Extra: adl
+Requires-Dist: adlfs ; extra == 'adl'
+Provides-Extra: arrow
+Requires-Dist: pyarrow >=1 ; extra == 'arrow'
+Provides-Extra: dask
+Requires-Dist: dask ; extra == 'dask'
+Requires-Dist: distributed ; extra == 'dask'
+Provides-Extra: devel
+Requires-Dist: pytest ; extra == 'devel'
+Requires-Dist: pytest-cov ; extra == 'devel'
+Provides-Extra: dropbox
+Requires-Dist: dropboxdrivefs ; extra == 'dropbox'
+Requires-Dist: requests ; extra == 'dropbox'
+Requires-Dist: dropbox ; extra == 'dropbox'
+Provides-Extra: entrypoints
+Provides-Extra: full
+Requires-Dist: adlfs ; extra == 'full'
+Requires-Dist: aiohttp !=4.0.0a0,!=4.0.0a1 ; extra == 'full'
+Requires-Dist: dask ; extra == 'full'
+Requires-Dist: distributed ; extra == 'full'
+Requires-Dist: dropbox ; extra == 'full'
+Requires-Dist: dropboxdrivefs ; extra == 'full'
+Requires-Dist: fusepy ; extra == 'full'
+Requires-Dist: gcsfs ; extra == 'full'
+Requires-Dist: libarchive-c ; extra == 'full'
+Requires-Dist: ocifs ; extra == 'full'
+Requires-Dist: panel ; extra == 'full'
+Requires-Dist: paramiko ; extra == 'full'
+Requires-Dist: pyarrow >=1 ; extra == 'full'
+Requires-Dist: pygit2 ; extra == 'full'
+Requires-Dist: requests ; extra == 'full'
+Requires-Dist: s3fs ; extra == 'full'
+Requires-Dist: smbprotocol ; extra == 'full'
+Requires-Dist: tqdm ; extra == 'full'
+Provides-Extra: fuse
+Requires-Dist: fusepy ; extra == 'fuse'
+Provides-Extra: gcs
+Requires-Dist: gcsfs ; extra == 'gcs'
+Provides-Extra: git
+Requires-Dist: pygit2 ; extra == 'git'
+Provides-Extra: github
+Requires-Dist: requests ; extra == 'github'
+Provides-Extra: gs
+Requires-Dist: gcsfs ; extra == 'gs'
+Provides-Extra: gui
+Requires-Dist: panel ; extra == 'gui'
+Provides-Extra: hdfs
+Requires-Dist: pyarrow >=1 ; extra == 'hdfs'
+Provides-Extra: http
+Requires-Dist: aiohttp !=4.0.0a0,!=4.0.0a1 ; extra == 'http'
+Provides-Extra: libarchive
+Requires-Dist: libarchive-c ; extra == 'libarchive'
+Provides-Extra: oci
+Requires-Dist: ocifs ; extra == 'oci'
+Provides-Extra: s3
+Requires-Dist: s3fs ; extra == 's3'
+Provides-Extra: sftp
+Requires-Dist: paramiko ; extra == 'sftp'
+Provides-Extra: smb
+Requires-Dist: smbprotocol ; extra == 'smb'
+Provides-Extra: ssh
+Requires-Dist: paramiko ; extra == 'ssh'
+Provides-Extra: tqdm
+Requires-Dist: tqdm ; extra == 'tqdm'
+
+# filesystem_spec
+
+[](https://pypi.python.org/pypi/fsspec/)
+[](https://anaconda.org/conda-forge/fsspec)
+
+[](https://filesystem-spec.readthedocs.io/en/latest/?badge=latest)
+[](https://pepy.tech/project/fsspec)
+
+A specification for pythonic filesystems.
+
+## Install
+
+```bash
+pip install fsspec
+```
+
+would install the base fsspec. Various optionally supported features might require specification of custom
+extra require, e.g. `pip install fsspec[ssh]` will install dependencies for `ssh` backends support.
+Use `pip install fsspec[full]` for installation of all known extra dependencies.
+
+Up-to-date package also provided through conda-forge distribution:
+
+```bash
+conda install -c conda-forge fsspec
+```
+
+
+## Purpose
+
+To produce a template or specification for a file-system interface, that specific implementations should follow,
+so that applications making use of them can rely on a common behaviour and not have to worry about the specific
+internal implementation decisions with any given backend. Many such implementations are included in this package,
+or in sister projects such as `s3fs` and `gcsfs`.
+
+In addition, if this is well-designed, then additional functionality, such as a key-value store or FUSE
+mounting of the file-system implementation may be available for all implementations "for free".
+
+## Documentation
+
+Please refer to [RTD](https://filesystem-spec.readthedocs.io/en/latest/?badge=latest)
+
+## Develop
+
+fsspec uses GitHub Actions for CI. Environment files can be found
+in the "ci/" directory. Note that the main environment is called "py38",
+but it is expected that the version of python installed be adjustable at
+CI runtime. For local use, pick a version suitable for you.
+
+### Testing
+
+Tests can be run in the dev environment, if activated, via ``pytest fsspec``.
+
+The full fsspec suite requires a system-level docker, docker-compose, and fuse
+installation. If only making changes to one backend implementation, it is
+not generally necessary to run all tests locally.
+
+It is expected that contributors ensure that any change to fsspec does not
+cause issues or regressions for either other fsspec-related packages such
+as gcsfs and s3fs, nor for downstream users of fsspec. The "downstream" CI
+run and corresponding environment file run a set of tests from the dask
+test suite, and very minimal tests against pandas and zarr from the
+test_downstream.py module in this repo.
+
+### Code Formatting
+
+fsspec uses [Black](https://black.readthedocs.io/en/stable) to ensure
+a consistent code format throughout the project.
+Run ``black fsspec`` from the root of the filesystem_spec repository to
+auto-format your code. Additionally, many editors have plugins that will apply
+``black`` as you edit files. ``black`` is included in the ``tox`` environments.
+
+Optionally, you may wish to setup [pre-commit hooks](https://pre-commit.com) to
+automatically run ``black`` when you make a git commit.
+Run ``pre-commit install --install-hooks`` from the root of the
+filesystem_spec repository to setup pre-commit hooks. ``black`` will now be run
+before you commit, reformatting any changed files. You can format without
+committing via ``pre-commit run`` or skip these checks with ``git commit
+--no-verify``.
diff --git a/venv/lib/python3.10/site-packages/fsspec-2024.3.1.dist-info/RECORD b/venv/lib/python3.10/site-packages/fsspec-2024.3.1.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..9afca34810c31218652469db522c8183649871cc
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/fsspec-2024.3.1.dist-info/RECORD
@@ -0,0 +1,104 @@
+fsspec-2024.3.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+fsspec-2024.3.1.dist-info/LICENSE,sha256=LcNUls5TpzB5FcAIqESq1T53K0mzTN0ARFBnaRQH7JQ,1513
+fsspec-2024.3.1.dist-info/METADATA,sha256=Wv4QVGqB4lYfHfgP-Cfby1Nce57WYXXAhH0f6Ju5FUM,6786
+fsspec-2024.3.1.dist-info/RECORD,,
+fsspec-2024.3.1.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
+fsspec-2024.3.1.dist-info/top_level.txt,sha256=blt2pDrQDwN3Gklcw13CSPLQRd6aaOgJ8AxqrW395MI,7
+fsspec/__init__.py,sha256=C8G5rqeNAes0NHYFhdxcw-ZMg4yDipqzSNc4NIR7uoc,2010
+fsspec/__pycache__/__init__.cpython-310.pyc,,
+fsspec/__pycache__/_version.cpython-310.pyc,,
+fsspec/__pycache__/archive.cpython-310.pyc,,
+fsspec/__pycache__/asyn.cpython-310.pyc,,
+fsspec/__pycache__/caching.cpython-310.pyc,,
+fsspec/__pycache__/callbacks.cpython-310.pyc,,
+fsspec/__pycache__/compression.cpython-310.pyc,,
+fsspec/__pycache__/config.cpython-310.pyc,,
+fsspec/__pycache__/conftest.cpython-310.pyc,,
+fsspec/__pycache__/core.cpython-310.pyc,,
+fsspec/__pycache__/dircache.cpython-310.pyc,,
+fsspec/__pycache__/exceptions.cpython-310.pyc,,
+fsspec/__pycache__/fuse.cpython-310.pyc,,
+fsspec/__pycache__/generic.cpython-310.pyc,,
+fsspec/__pycache__/gui.cpython-310.pyc,,
+fsspec/__pycache__/mapping.cpython-310.pyc,,
+fsspec/__pycache__/parquet.cpython-310.pyc,,
+fsspec/__pycache__/registry.cpython-310.pyc,,
+fsspec/__pycache__/spec.cpython-310.pyc,,
+fsspec/__pycache__/transaction.cpython-310.pyc,,
+fsspec/__pycache__/utils.cpython-310.pyc,,
+fsspec/_version.py,sha256=wHq_BWmvVWba6IeFL8lWxmbw1fXQhCR4NJHH8b77Nxs,500
+fsspec/archive.py,sha256=S__DzfZj-urAN3tp2W6jJ6YDiXG1fAl7FjvWUN73qIE,2386
+fsspec/asyn.py,sha256=AOd2SXH2YPCaQL5jA6IegYevdMFkAnGD7Seh9DC2gSE,36404
+fsspec/caching.py,sha256=TrZqKo3drK9Afujg7grZRiLNcmgUr84rnvMcojzURnI,28819
+fsspec/callbacks.py,sha256=BDIwLzK6rr_0V5ch557fSzsivCElpdqhXr5dZ9Te-EE,9210
+fsspec/compression.py,sha256=Yyd8FXw2rwWRtVoRVah_yguv-J7BUcBo4yDu6Qt52a0,4859
+fsspec/config.py,sha256=LF4Zmu1vhJW7Je9Q-cwkRc3xP7Rhyy7Xnwj26Z6sv2g,4279
+fsspec/conftest.py,sha256=fVfx-NLrH_OZS1TIpYNoPzM7efEcMoL62reHOdYeFCA,1245
+fsspec/core.py,sha256=kkwJ7IR3-i1C9SAZ_oGrPpbM5hqpBZ2OLkuHU5a1sYE,22471
+fsspec/dircache.py,sha256=YzogWJrhEastHU7vWz-cJiJ7sdtLXFXhEpInGKd4EcM,2717
+fsspec/exceptions.py,sha256=xcS7LiRrQ748kvOB9mrUR14kpjNztrHgEkZWi9M-VaI,330
+fsspec/fuse.py,sha256=66amOa6wdIbS0DMhhfAPUoOB37HPorfXD1izV0prmTY,10145
+fsspec/generic.py,sha256=jIA7wBwtUzJhTth78PTzWbOBjGom2e4IjmQ_KBSlHPg,13575
+fsspec/gui.py,sha256=XKoXZpUhRE7jOhRCJH4-jRbKhVu56aS8h9tecvPD3nc,13932
+fsspec/implementations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+fsspec/implementations/__pycache__/__init__.cpython-310.pyc,,
+fsspec/implementations/__pycache__/arrow.cpython-310.pyc,,
+fsspec/implementations/__pycache__/cache_mapper.cpython-310.pyc,,
+fsspec/implementations/__pycache__/cache_metadata.cpython-310.pyc,,
+fsspec/implementations/__pycache__/cached.cpython-310.pyc,,
+fsspec/implementations/__pycache__/dask.cpython-310.pyc,,
+fsspec/implementations/__pycache__/data.cpython-310.pyc,,
+fsspec/implementations/__pycache__/dbfs.cpython-310.pyc,,
+fsspec/implementations/__pycache__/dirfs.cpython-310.pyc,,
+fsspec/implementations/__pycache__/ftp.cpython-310.pyc,,
+fsspec/implementations/__pycache__/git.cpython-310.pyc,,
+fsspec/implementations/__pycache__/github.cpython-310.pyc,,
+fsspec/implementations/__pycache__/http.cpython-310.pyc,,
+fsspec/implementations/__pycache__/jupyter.cpython-310.pyc,,
+fsspec/implementations/__pycache__/libarchive.cpython-310.pyc,,
+fsspec/implementations/__pycache__/local.cpython-310.pyc,,
+fsspec/implementations/__pycache__/memory.cpython-310.pyc,,
+fsspec/implementations/__pycache__/reference.cpython-310.pyc,,
+fsspec/implementations/__pycache__/sftp.cpython-310.pyc,,
+fsspec/implementations/__pycache__/smb.cpython-310.pyc,,
+fsspec/implementations/__pycache__/tar.cpython-310.pyc,,
+fsspec/implementations/__pycache__/webhdfs.cpython-310.pyc,,
+fsspec/implementations/__pycache__/zip.cpython-310.pyc,,
+fsspec/implementations/arrow.py,sha256=_7TLuV6ZzNlpmUU_v6ud56u2wadzsKmY5qugPBxgMEs,8649
+fsspec/implementations/cache_mapper.py,sha256=iHgBA6gjzDJ7_mBboHFzpLTf55HP3UEwUOZ43xyUK4M,2429
+fsspec/implementations/cache_metadata.py,sha256=ZvyA7Y3KK-5Ct4E5pELzD6mH_5T03XqaKVT96qYDADU,8576
+fsspec/implementations/cached.py,sha256=CuxQXQ6f-MRnLvsRzvauEhpmXEgicZZCfViKjIu1kn4,33029
+fsspec/implementations/dask.py,sha256=CXZbJzIVOhKV8ILcxuy3bTvcacCueAbyQxmvAkbPkrk,4466
+fsspec/implementations/data.py,sha256=LDLczxRh8h7x39Zjrd-GgzdQHr78yYxDlrv2C9Uxb5E,1658
+fsspec/implementations/dbfs.py,sha256=cix9OYUveuSOx5UO5uRUwNUkYqjzyY0fkKnca1kTgZ0,15014
+fsspec/implementations/dirfs.py,sha256=inDIRSDPhI1_ud1MMBFrpZQ11VIAMJ_dZQtbE4V08Ng,11384
+fsspec/implementations/ftp.py,sha256=rp6cTog8xqjDPlKdSLKcsyP7K593_ByMabxGbNSEpTo,11655
+fsspec/implementations/git.py,sha256=vKGI-Vd5q4H2RrvhebkPc9NwlfkZ980OUGhebeCw-M0,4034
+fsspec/implementations/github.py,sha256=0kIiKkeAaROuHgdWBHVQFrzJ2ZfoDgymCehL_kJXHYA,7565
+fsspec/implementations/http.py,sha256=yr6t0OhLwZx_pvNQ05detAehcQjRw0Pg6XdwWv81jxk,29601
+fsspec/implementations/jupyter.py,sha256=B2uj7OEm7yIk-vRSsO37_ND0t0EBvn4B-Su43ibN4Pg,3811
+fsspec/implementations/libarchive.py,sha256=5_I2DiLXwQ1JC8x-K7jXu-tBwhO9dj7tFLnb0bTnVMQ,7102
+fsspec/implementations/local.py,sha256=XLsBoG4lf92w5ZddmbHXcStThSYxHgeoJEd2Mp5Uo0Y,14327
+fsspec/implementations/memory.py,sha256=tlaGCekgZ2Z_9n8B6hsSpo6_V89PwjugkOmD445QrqE,9778
+fsspec/implementations/reference.py,sha256=3dPi55riD_cROCafpeoUm2Xbb1vpXpyQijl09f5jTsE,43871
+fsspec/implementations/sftp.py,sha256=fMY9XZcmpjszQ2tCqO_TPaJesaeD_Dv7ptYzgUPGoO0,5631
+fsspec/implementations/smb.py,sha256=_hR5MKwDUCi3u5zEzvnenIWRIGPMhuUryWRYvNSp0WE,10804
+fsspec/implementations/tar.py,sha256=dam78Tp_CozybNqCY2JYgGBS3Uc9FuJUAT9oB0lolOs,4111
+fsspec/implementations/webhdfs.py,sha256=wqVfno7z0TY1HepaIvKTUUcl_bi5NkV6qWsST8t_s7Y,16745
+fsspec/implementations/zip.py,sha256=vc1fNz-yO8uWQ9bQUqBFYpTcgsfZQq9vDwwg4Aufs9Y,4417
+fsspec/mapping.py,sha256=hSsiRo-dgAOj6oHf67bF3i11U4xREglXToHGUX4GhRY,8261
+fsspec/parquet.py,sha256=qVxDhwc960SGOt5etcYAJxCr-7HQKP01687KpDR02Gw,19463
+fsspec/registry.py,sha256=VjnfGUg39lvlCDxOoGSeYm8UL12wZC5yQ5n8-W92eD0,11173
+fsspec/spec.py,sha256=3t96RgizRN_slIuHXnuR0bXjVUfBS1TfuDrEua4oQvE,66277
+fsspec/tests/abstract/__init__.py,sha256=i1wcFixV6QhOwdoB24c8oXjzobISNqiKVz9kl2DvAY8,10028
+fsspec/tests/abstract/__pycache__/__init__.cpython-310.pyc,,
+fsspec/tests/abstract/__pycache__/common.cpython-310.pyc,,
+fsspec/tests/abstract/__pycache__/copy.cpython-310.pyc,,
+fsspec/tests/abstract/__pycache__/get.cpython-310.pyc,,
+fsspec/tests/abstract/__pycache__/put.cpython-310.pyc,,
+fsspec/tests/abstract/common.py,sha256=1GQwNo5AONzAnzZj0fWgn8NJPLXALehbsuGxS3FzWVU,4973
+fsspec/tests/abstract/copy.py,sha256=gU5-d97U3RSde35Vp4RxPY4rWwL744HiSrJ8IBOp9-8,19967
+fsspec/tests/abstract/get.py,sha256=vNR4HztvTR7Cj56AMo7_tx7TeYz1Jgr_2Wb8Lv-UiBY,20755
+fsspec/tests/abstract/put.py,sha256=7aih17OKB_IZZh1Mkq1eBDIjobhtMQmI8x-Pw-S_aZk,21201
+fsspec/transaction.py,sha256=xliRG6U2Zf3khG4xcw9WiB-yAoqJSHEGK_VjHOdtgo0,2398
+fsspec/utils.py,sha256=kYBiiw_0AEFJPCYwYlbZ7QxiGwuXdaaxrK6KM3mcFak,23052
diff --git a/venv/lib/python3.10/site-packages/fsspec-2024.3.1.dist-info/WHEEL b/venv/lib/python3.10/site-packages/fsspec-2024.3.1.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..bab98d675883cc7567a79df485cd7b4f015e376f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/fsspec-2024.3.1.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.43.0)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/venv/lib/python3.10/site-packages/fsspec-2024.3.1.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/fsspec-2024.3.1.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..968fea66e533ba30593c7fbfe750c36fae2f3cfe
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/fsspec-2024.3.1.dist-info/top_level.txt
@@ -0,0 +1 @@
+fsspec
diff --git a/venv/lib/python3.10/site-packages/hjson-3.1.0.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/hjson-3.1.0.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/hjson-3.1.0.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/venv/lib/python3.10/site-packages/hjson-3.1.0.dist-info/LICENSE.txt b/venv/lib/python3.10/site-packages/hjson-3.1.0.dist-info/LICENSE.txt
new file mode 100644
index 0000000000000000000000000000000000000000..2457de3c33ed058a53dee761ffb864fc3f2bc658
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/hjson-3.1.0.dist-info/LICENSE.txt
@@ -0,0 +1,81 @@
+hjson (based on simplejson) is dual-licensed software. It is available under the terms
+of the MIT license, or the Academic Free License version 2.1. The full
+text of each license agreement is included below. This code is also
+licensed to the Python Software Foundation (PSF) under a Contributor
+Agreement.
+
+MIT License
+===========
+
+Copyright (c) 2006 Bob Ippolito
+Copyright (c) 2015 Christian Zangl
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+Academic Free License v. 2.1
+============================
+
+Copyright (c) 2006 Bob Ippolito. All rights reserved.
+Copyright (c) 2015 Christian Zangl
+
+This Academic Free License (the "License") applies to any original work of authorship (the "Original Work") whose owner (the "Licensor") has placed the following notice immediately following the copyright notice for the Original Work:
+
+Licensed under the Academic Free License version 2.1
+
+1) Grant of Copyright License. Licensor hereby grants You a world-wide, royalty-free, non-exclusive, perpetual, sublicenseable license to do the following:
+
+a) to reproduce the Original Work in copies;
+
+b) to prepare derivative works ("Derivative Works") based upon the Original Work;
+
+c) to distribute copies of the Original Work and Derivative Works to the public;
+
+d) to perform the Original Work publicly; and
+
+e) to display the Original Work publicly.
+
+2) Grant of Patent License. Licensor hereby grants You a world-wide, royalty-free, non-exclusive, perpetual, sublicenseable license, under patent claims owned or controlled by the Licensor that are embodied in the Original Work as furnished by the Licensor, to make, use, sell and offer for sale the Original Work and Derivative Works.
+
+3) Grant of Source Code License. The term "Source Code" means the preferred form of the Original Work for making modifications to it and all available documentation describing how to modify the Original Work. Licensor hereby agrees to provide a machine-readable copy of the Source Code of the Original Work along with each copy of the Original Work that Licensor distributes. Licensor reserves the right to satisfy this obligation by placing a machine-readable copy of the Source Code in an information repository reasonably calculated to permit inexpensive and convenient access by You for as long as Licensor continues to distribute the Original Work, and by publishing the address of that information repository in a notice immediately following the copyright notice that applies to the Original Work.
+
+4) Exclusions From License Grant. Neither the names of Licensor, nor the names of any contributors to the Original Work, nor any of their trademarks or service marks, may be used to endorse or promote products derived from this Original Work without express prior written permission of the Licensor. Nothing in this License shall be deemed to grant any rights to trademarks, copyrights, patents, trade secrets or any other intellectual property of Licensor except as expressly stated herein. No patent license is granted to make, use, sell or offer to sell embodiments of any patent claims other than the licensed claims defined in Section 2. No right is granted to the trademarks of Licensor even if such marks are included in the Original Work. Nothing in this License shall be interpreted to prohibit Licensor from licensing under different terms from this License any Original Work that Licensor otherwise would have a right to license.
+
+5) This section intentionally omitted.
+
+6) Attribution Rights. You must retain, in the Source Code of any Derivative Works that You create, all copyright, patent or trademark notices from the Source Code of the Original Work, as well as any notices of licensing and any descriptive text identified therein as an "Attribution Notice." You must cause the Source Code for any Derivative Works that You create to carry a prominent Attribution Notice reasonably calculated to inform recipients that You have modified the Original Work.
+
+7) Warranty of Provenance and Disclaimer of Warranty. Licensor warrants that the copyright in and to the Original Work and the patent rights granted herein by Licensor are owned by the Licensor or are sublicensed to You under the terms of this License with the permission of the contributor(s) of those copyrights and patent rights. Except as expressly stated in the immediately proceeding sentence, the Original Work is provided under this License on an "AS IS" BASIS and WITHOUT WARRANTY, either express or implied, including, without limitation, the warranties of NON-INFRINGEMENT, MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY OF THE ORIGINAL WORK IS WITH YOU. This DISCLAIMER OF WARRANTY constitutes an essential part of this License. No license to Original Work is granted hereunder except under this disclaimer.
+
+8) Limitation of Liability. Under no circumstances and under no legal theory, whether in tort (including negligence), contract, or otherwise, shall the Licensor be liable to any person for any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or the use of the Original Work including, without limitation, damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses. This limitation of liability shall not apply to liability for death or personal injury resulting from Licensor's negligence to the extent applicable law prohibits such limitation. Some jurisdictions do not allow the exclusion or limitation of incidental or consequential damages, so this exclusion and limitation may not apply to You.
+
+9) Acceptance and Termination. If You distribute copies of the Original Work or a Derivative Work, You must make a reasonable effort under the circumstances to obtain the express assent of recipients to the terms of this License. Nothing else but this License (or another written agreement between Licensor and You) grants You permission to create Derivative Works based upon the Original Work or to exercise any of the rights granted in Section 1 herein, and any attempt to do so except under the terms of this License (or another written agreement between Licensor and You) is expressly prohibited by U.S. copyright law, the equivalent laws of other countries, and by international treaty. Therefore, by exercising any of the rights granted to You in Section 1 herein, You indicate Your acceptance of this License and all of its terms and conditions.
+
+10) Termination for Patent Action. This License shall terminate automatically and You may no longer exercise any of the rights granted to You by this License as of the date You commence an action, including a cross-claim or counterclaim, against Licensor or any licensee alleging that the Original Work infringes a patent. This termination provision shall not apply for an action alleging patent infringement by combinations of the Original Work with other software or hardware.
+
+11) Jurisdiction, Venue and Governing Law. Any action or suit relating to this License may be brought only in the courts of a jurisdiction wherein the Licensor resides or in which Licensor conducts its primary business, and under the laws of that jurisdiction excluding its conflict-of-law provisions. The application of the United Nations Convention on Contracts for the International Sale of Goods is expressly excluded. Any use of the Original Work outside the scope of this License or after its termination shall be subject to the requirements and penalties of the U.S. Copyright Act, 17 U.S.C. § 101 et seq., the equivalent laws of other countries, and international treaty. This section shall survive the termination of this License.
+
+12) Attorneys Fees. In any action to enforce the terms of this License or seeking damages relating thereto, the prevailing party shall be entitled to recover its costs and expenses, including, without limitation, reasonable attorneys' fees and costs incurred in connection with such action, including any appeal of such action. This section shall survive the termination of this License.
+
+13) Miscellaneous. This License represents the complete agreement concerning the subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable.
+
+14) Definition of "You" in This License. "You" throughout this License, whether in upper or lower case, means an individual or a legal entity exercising rights under, and complying with all of the terms of, this License. For legal entities, "You" includes any entity that controls, is controlled by, or is under common control with you. For purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
+
+15) Right to Use. You may use the Original Work in all ways not otherwise restricted or conditioned by this License or by law, and Licensor promises not to interfere with or be responsible for such uses by You.
+
+This license is Copyright (C) 2003-2004 Lawrence E. Rosen. All rights reserved. Permission is hereby granted to copy and distribute this license without modification. This license may not be modified without the express written permission of its copyright owner.
diff --git a/venv/lib/python3.10/site-packages/hjson-3.1.0.dist-info/METADATA b/venv/lib/python3.10/site-packages/hjson-3.1.0.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..5d78e45f7013def95c59fe264fd5cf54c1f00b31
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/hjson-3.1.0.dist-info/METADATA
@@ -0,0 +1,125 @@
+Metadata-Version: 2.1
+Name: hjson
+Version: 3.1.0
+Summary: Hjson, a user interface for JSON.
+Home-page: http://github.com/hjson/hjson-py
+Author: Christian Zangl
+Author-email: laktak@cdak.net
+License: MIT License
+Keywords: json comments configuration
+Platform: any
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: License :: OSI Approved :: Academic Free License (AFL)
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+License-File: LICENSE.txt
+
+hjson-py
+========
+
+`Hjson`_, a user interface for JSON
+
+Hjson works with Python 2.5+ and Python 3.3+ (based on `simplejson`_)
+
+Installation
+============
+
+- ``pip install hjson``
+
+- or download from https://pypi.python.org/pypi/hjson
+
+Commandline
+-----------
+
+::
+
+ Usage:
+ hjson [options]
+ hjson [options]
+ hjson (-h | --help)
+ hjson (-V | --version)
+
+ Options:
+ -h --help Show this screen.
+ -j Output as formatted JSON.
+ -c Output as JSON.
+ -V --version Show version.
+
+E.g. ``echo '{"json":"obj"}' | hjson``
+
+
+Usage
+=====
+
+.. code-block:: python
+
+ import hjson
+
+Decoding Hjson
+--------------
+
+.. code-block:: python
+
+ text = """{
+ foo: a
+ bar: 1
+ }"""
+
+ hjson.loads(text)
+
+Result:
+
+.. code-block:: python
+
+ OrderedDict([('foo', 'a'), ('bar', 1)])
+
+Encoding Python object hierarchies
+----------------------------------
+
+.. code-block:: python
+
+ hjson.dumps({'foo': 'text', 'bar': (1, 2)})
+
+Result:
+
+::
+
+ {
+ foo: text
+ bar:
+ [
+ 1
+ 2
+ ]
+ }
+
+Encoding as JSON
+----------------
+
+Note that this is probably not as performant as the simplejson version.
+
+.. code-block:: python
+
+ hjson.dumpsJSON(['foo', {'bar': ('baz', None, 1.0, 2)}])
+
+Result: ``'["foo", {"bar": ["baz", null, 1.0, 2]}]'``
+
+API
+===
+
+`hjson-py`_
+
+.. _Hjson: https://hjson.github.io
+.. _simplejson: https://github.com/simplejson/simplejson
+.. _hjson-py: http://hjson.github.io/hjson-py/
diff --git a/venv/lib/python3.10/site-packages/hjson-3.1.0.dist-info/WHEEL b/venv/lib/python3.10/site-packages/hjson-3.1.0.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..becc9a66ea739ba941d48a749e248761cc6e658a
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/hjson-3.1.0.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.37.1)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/venv/lib/python3.10/site-packages/hjson-3.1.0.dist-info/entry_points.txt b/venv/lib/python3.10/site-packages/hjson-3.1.0.dist-info/entry_points.txt
new file mode 100644
index 0000000000000000000000000000000000000000..f8b7adb03e6805f68c21d7b601e0ac6f8e823e38
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/hjson-3.1.0.dist-info/entry_points.txt
@@ -0,0 +1,2 @@
+[console_scripts]
+hjson = hjson.tool:main
diff --git a/venv/lib/python3.10/site-packages/hjson-3.1.0.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/hjson-3.1.0.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..4b1c5746ac3bdad0432afd48512a61f319fd74cc
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/hjson-3.1.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+hjson
diff --git a/venv/lib/python3.10/site-packages/nvidia_cuda_cupti_cu12-12.1.105.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/nvidia_cuda_cupti_cu12-12.1.105.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/nvidia_cuda_cupti_cu12-12.1.105.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/venv/lib/python3.10/site-packages/nvidia_cuda_cupti_cu12-12.1.105.dist-info/License.txt b/venv/lib/python3.10/site-packages/nvidia_cuda_cupti_cu12-12.1.105.dist-info/License.txt
new file mode 100644
index 0000000000000000000000000000000000000000..b491c70e0aef319022ded661e111ddbd45b8a17f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/nvidia_cuda_cupti_cu12-12.1.105.dist-info/License.txt
@@ -0,0 +1,1568 @@
+End User License Agreement
+--------------------------
+
+
+Preface
+-------
+
+The Software License Agreement in Chapter 1 and the Supplement
+in Chapter 2 contain license terms and conditions that govern
+the use of NVIDIA software. By accepting this agreement, you
+agree to comply with all the terms and conditions applicable
+to the product(s) included herein.
+
+
+NVIDIA Driver
+
+
+Description
+
+This package contains the operating system driver and
+fundamental system software components for NVIDIA GPUs.
+
+
+NVIDIA CUDA Toolkit
+
+
+Description
+
+The NVIDIA CUDA Toolkit provides command-line and graphical
+tools for building, debugging and optimizing the performance
+of applications accelerated by NVIDIA GPUs, runtime and math
+libraries, and documentation including programming guides,
+user manuals, and API references.
+
+
+Default Install Location of CUDA Toolkit
+
+Windows platform:
+
+%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v#.#
+
+Linux platform:
+
+/usr/local/cuda-#.#
+
+Mac platform:
+
+/Developer/NVIDIA/CUDA-#.#
+
+
+NVIDIA CUDA Samples
+
+
+Description
+
+This package includes over 100+ CUDA examples that demonstrate
+various CUDA programming principles, and efficient CUDA
+implementation of algorithms in specific application domains.
+
+
+Default Install Location of CUDA Samples
+
+Windows platform:
+
+%ProgramData%\NVIDIA Corporation\CUDA Samples\v#.#
+
+Linux platform:
+
+/usr/local/cuda-#.#/samples
+
+and
+
+$HOME/NVIDIA_CUDA-#.#_Samples
+
+Mac platform:
+
+/Developer/NVIDIA/CUDA-#.#/samples
+
+
+NVIDIA Nsight Visual Studio Edition (Windows only)
+
+
+Description
+
+NVIDIA Nsight Development Platform, Visual Studio Edition is a
+development environment integrated into Microsoft Visual
+Studio that provides tools for debugging, profiling, analyzing
+and optimizing your GPU computing and graphics applications.
+
+
+Default Install Location of Nsight Visual Studio Edition
+
+Windows platform:
+
+%ProgramFiles(x86)%\NVIDIA Corporation\Nsight Visual Studio Edition #.#
+
+
+1. License Agreement for NVIDIA Software Development Kits
+---------------------------------------------------------
+
+
+Release Date: July 26, 2018
+---------------------------
+
+
+Important NoticeRead before downloading, installing,
+copying or using the licensed software:
+-------------------------------------------------------
+
+This license agreement, including exhibits attached
+("Agreement”) is a legal agreement between you and NVIDIA
+Corporation ("NVIDIA") and governs your use of a NVIDIA
+software development kit (“SDK”).
+
+Each SDK has its own set of software and materials, but here
+is a description of the types of items that may be included in
+a SDK: source code, header files, APIs, data sets and assets
+(examples include images, textures, models, scenes, videos,
+native API input/output files), binary software, sample code,
+libraries, utility programs, programming code and
+documentation.
+
+This Agreement can be accepted only by an adult of legal age
+of majority in the country in which the SDK is used.
+
+If you are entering into this Agreement on behalf of a company
+or other legal entity, you represent that you have the legal
+authority to bind the entity to this Agreement, in which case
+“you” will mean the entity you represent.
+
+If you don’t have the required age or authority to accept
+this Agreement, or if you don’t accept all the terms and
+conditions of this Agreement, do not download, install or use
+the SDK.
+
+You agree to use the SDK only for purposes that are permitted
+by (a) this Agreement, and (b) any applicable law, regulation
+or generally accepted practices or guidelines in the relevant
+jurisdictions.
+
+
+1.1. License
+
+
+1.1.1. License Grant
+
+Subject to the terms of this Agreement, NVIDIA hereby grants
+you a non-exclusive, non-transferable license, without the
+right to sublicense (except as expressly provided in this
+Agreement) to:
+
+ 1. Install and use the SDK,
+
+ 2. Modify and create derivative works of sample source code
+ delivered in the SDK, and
+
+ 3. Distribute those portions of the SDK that are identified
+ in this Agreement as distributable, as incorporated in
+ object code format into a software application that meets
+ the distribution requirements indicated in this Agreement.
+
+
+1.1.2. Distribution Requirements
+
+These are the distribution requirements for you to exercise
+the distribution grant:
+
+ 1. Your application must have material additional
+ functionality, beyond the included portions of the SDK.
+
+ 2. The distributable portions of the SDK shall only be
+ accessed by your application.
+
+ 3. The following notice shall be included in modifications
+ and derivative works of sample source code distributed:
+ “This software contains source code provided by NVIDIA
+ Corporation.”
+
+ 4. Unless a developer tool is identified in this Agreement
+ as distributable, it is delivered for your internal use
+ only.
+
+ 5. The terms under which you distribute your application
+ must be consistent with the terms of this Agreement,
+ including (without limitation) terms relating to the
+ license grant and license restrictions and protection of
+ NVIDIA’s intellectual property rights. Additionally, you
+ agree that you will protect the privacy, security and
+ legal rights of your application users.
+
+ 6. You agree to notify NVIDIA in writing of any known or
+ suspected distribution or use of the SDK not in compliance
+ with the requirements of this Agreement, and to enforce
+ the terms of your agreements with respect to distributed
+ SDK.
+
+
+1.1.3. Authorized Users
+
+You may allow employees and contractors of your entity or of
+your subsidiary(ies) to access and use the SDK from your
+secure network to perform work on your behalf.
+
+If you are an academic institution you may allow users
+enrolled or employed by the academic institution to access and
+use the SDK from your secure network.
+
+You are responsible for the compliance with the terms of this
+Agreement by your authorized users. If you become aware that
+your authorized users didn’t follow the terms of this
+Agreement, you agree to take reasonable steps to resolve the
+non-compliance and prevent new occurrences.
+
+
+1.1.4. Pre-Release SDK
+
+The SDK versions identified as alpha, beta, preview or
+otherwise as pre-release, may not be fully functional, may
+contain errors or design flaws, and may have reduced or
+different security, privacy, accessibility, availability, and
+reliability standards relative to commercial versions of
+NVIDIA software and materials. Use of a pre-release SDK may
+result in unexpected results, loss of data, project delays or
+other unpredictable damage or loss.
+
+You may use a pre-release SDK at your own risk, understanding
+that pre-release SDKs are not intended for use in production
+or business-critical systems.
+
+NVIDIA may choose not to make available a commercial version
+of any pre-release SDK. NVIDIA may also choose to abandon
+development and terminate the availability of a pre-release
+SDK at any time without liability.
+
+
+1.1.5. Updates
+
+NVIDIA may, at its option, make available patches, workarounds
+or other updates to this SDK. Unless the updates are provided
+with their separate governing terms, they are deemed part of
+the SDK licensed to you as provided in this Agreement. You
+agree that the form and content of the SDK that NVIDIA
+provides may change without prior notice to you. While NVIDIA
+generally maintains compatibility between versions, NVIDIA may
+in some cases make changes that introduce incompatibilities in
+future versions of the SDK.
+
+
+1.1.6. Third Party Licenses
+
+The SDK may come bundled with, or otherwise include or be
+distributed with, third party software licensed by a NVIDIA
+supplier and/or open source software provided under an open
+source license. Use of third party software is subject to the
+third-party license terms, or in the absence of third party
+terms, the terms of this Agreement. Copyright to third party
+software is held by the copyright holders indicated in the
+third-party software or license.
+
+
+1.1.7. Reservation of Rights
+
+NVIDIA reserves all rights, title, and interest in and to the
+SDK, not expressly granted to you under this Agreement.
+
+
+1.2. Limitations
+
+The following license limitations apply to your use of the
+SDK:
+
+ 1. You may not reverse engineer, decompile or disassemble,
+ or remove copyright or other proprietary notices from any
+ portion of the SDK or copies of the SDK.
+
+ 2. Except as expressly provided in this Agreement, you may
+ not copy, sell, rent, sublicense, transfer, distribute,
+ modify, or create derivative works of any portion of the
+ SDK. For clarity, you may not distribute or sublicense the
+ SDK as a stand-alone product.
+
+ 3. Unless you have an agreement with NVIDIA for this
+ purpose, you may not indicate that an application created
+ with the SDK is sponsored or endorsed by NVIDIA.
+
+ 4. You may not bypass, disable, or circumvent any
+ encryption, security, digital rights management or
+ authentication mechanism in the SDK.
+
+ 5. You may not use the SDK in any manner that would cause it
+ to become subject to an open source software license. As
+ examples, licenses that require as a condition of use,
+ modification, and/or distribution that the SDK be:
+
+ a. Disclosed or distributed in source code form;
+
+ b. Licensed for the purpose of making derivative works;
+ or
+
+ c. Redistributable at no charge.
+
+ 6. Unless you have an agreement with NVIDIA for this
+ purpose, you may not use the SDK with any system or
+ application where the use or failure of the system or
+ application can reasonably be expected to threaten or
+ result in personal injury, death, or catastrophic loss.
+ Examples include use in avionics, navigation, military,
+ medical, life support or other life critical applications.
+ NVIDIA does not design, test or manufacture the SDK for
+ these critical uses and NVIDIA shall not be liable to you
+ or any third party, in whole or in part, for any claims or
+ damages arising from such uses.
+
+ 7. You agree to defend, indemnify and hold harmless NVIDIA
+ and its affiliates, and their respective employees,
+ contractors, agents, officers and directors, from and
+ against any and all claims, damages, obligations, losses,
+ liabilities, costs or debt, fines, restitutions and
+ expenses (including but not limited to attorney’s fees
+ and costs incident to establishing the right of
+ indemnification) arising out of or related to your use of
+ the SDK outside of the scope of this Agreement, or not in
+ compliance with its terms.
+
+
+1.3. Ownership
+
+ 1. NVIDIA or its licensors hold all rights, title and
+ interest in and to the SDK and its modifications and
+ derivative works, including their respective intellectual
+ property rights, subject to your rights described in this
+ section. This SDK may include software and materials from
+ NVIDIA’s licensors, and these licensors are intended
+ third party beneficiaries that may enforce this Agreement
+ with respect to their intellectual property rights.
+
+ 2. You hold all rights, title and interest in and to your
+ applications and your derivative works of the sample
+ source code delivered in the SDK, including their
+ respective intellectual property rights, subject to
+ NVIDIA’s rights described in this section.
+
+ 3. You may, but don’t have to, provide to NVIDIA
+ suggestions, feature requests or other feedback regarding
+ the SDK, including possible enhancements or modifications
+ to the SDK. For any feedback that you voluntarily provide,
+ you hereby grant NVIDIA and its affiliates a perpetual,
+ non-exclusive, worldwide, irrevocable license to use,
+ reproduce, modify, license, sublicense (through multiple
+ tiers of sublicensees), and distribute (through multiple
+ tiers of distributors) it without the payment of any
+ royalties or fees to you. NVIDIA will use feedback at its
+ choice. NVIDIA is constantly looking for ways to improve
+ its products, so you may send feedback to NVIDIA through
+ the developer portal at https://developer.nvidia.com.
+
+
+1.4. No Warranties
+
+THE SDK IS PROVIDED BY NVIDIA “AS IS” AND “WITH ALL
+FAULTS.” TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND
+ITS AFFILIATES EXPRESSLY DISCLAIM ALL WARRANTIES OF ANY KIND
+OR NATURE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING,
+BUT NOT LIMITED TO, ANY WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE, TITLE, NON-INFRINGEMENT, OR THE
+ABSENCE OF ANY DEFECTS THEREIN, WHETHER LATENT OR PATENT. NO
+WARRANTY IS MADE ON THE BASIS OF TRADE USAGE, COURSE OF
+DEALING OR COURSE OF TRADE.
+
+
+1.5. Limitation of Liability
+
+TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS
+AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL,
+PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, LOSS
+OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF
+PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION
+WITH THIS AGREEMENT OR THE USE OR PERFORMANCE OF THE SDK,
+WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH
+OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE),
+PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF
+LIABILITY. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES
+TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS
+AGREEMENT EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE
+NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS
+LIMIT.
+
+These exclusions and limitations of liability shall apply
+regardless if NVIDIA or its affiliates have been advised of
+the possibility of such damages, and regardless of whether a
+remedy fails its essential purpose. These exclusions and
+limitations of liability form an essential basis of the
+bargain between the parties, and, absent any of these
+exclusions or limitations of liability, the provisions of this
+Agreement, including, without limitation, the economic terms,
+would be substantially different.
+
+
+1.6. Termination
+
+ 1. This Agreement will continue to apply until terminated by
+ either you or NVIDIA as described below.
+
+ 2. If you want to terminate this Agreement, you may do so by
+ stopping to use the SDK.
+
+ 3. NVIDIA may, at any time, terminate this Agreement if:
+
+ a. (i) you fail to comply with any term of this
+ Agreement and the non-compliance is not fixed within
+ thirty (30) days following notice from NVIDIA (or
+ immediately if you violate NVIDIA’s intellectual
+ property rights);
+
+ b. (ii) you commence or participate in any legal
+ proceeding against NVIDIA with respect to the SDK; or
+
+ c. (iii) NVIDIA decides to no longer provide the SDK in
+ a country or, in NVIDIA’s sole discretion, the
+ continued use of it is no longer commercially viable.
+
+ 4. Upon any termination of this Agreement, you agree to
+ promptly discontinue use of the SDK and destroy all copies
+ in your possession or control. Your prior distributions in
+ accordance with this Agreement are not affected by the
+ termination of this Agreement. Upon written request, you
+ will certify in writing that you have complied with your
+ commitments under this section. Upon any termination of
+ this Agreement all provisions survive except for the
+ license grant provisions.
+
+
+1.7. General
+
+If you wish to assign this Agreement or your rights and
+obligations, including by merger, consolidation, dissolution
+or operation of law, contact NVIDIA to ask for permission. Any
+attempted assignment not approved by NVIDIA in writing shall
+be void and of no effect. NVIDIA may assign, delegate or
+transfer this Agreement and its rights and obligations, and if
+to a non-affiliate you will be notified.
+
+You agree to cooperate with NVIDIA and provide reasonably
+requested information to verify your compliance with this
+Agreement.
+
+This Agreement will be governed in all respects by the laws of
+the United States and of the State of Delaware as those laws
+are applied to contracts entered into and performed entirely
+within Delaware by Delaware residents, without regard to the
+conflicts of laws principles. The United Nations Convention on
+Contracts for the International Sale of Goods is specifically
+disclaimed. You agree to all terms of this Agreement in the
+English language.
+
+The state or federal courts residing in Santa Clara County,
+California shall have exclusive jurisdiction over any dispute
+or claim arising out of this Agreement. Notwithstanding this,
+you agree that NVIDIA shall still be allowed to apply for
+injunctive remedies or an equivalent type of urgent legal
+relief in any jurisdiction.
+
+If any court of competent jurisdiction determines that any
+provision of this Agreement is illegal, invalid or
+unenforceable, such provision will be construed as limited to
+the extent necessary to be consistent with and fully
+enforceable under the law and the remaining provisions will
+remain in full force and effect. Unless otherwise specified,
+remedies are cumulative.
+
+Each party acknowledges and agrees that the other is an
+independent contractor in the performance of this Agreement.
+
+The SDK has been developed entirely at private expense and is
+“commercial items” consisting of “commercial computer
+software” and “commercial computer software
+documentation” provided with RESTRICTED RIGHTS. Use,
+duplication or disclosure by the U.S. Government or a U.S.
+Government subcontractor is subject to the restrictions in
+this Agreement pursuant to DFARS 227.7202-3(a) or as set forth
+in subparagraphs (c)(1) and (2) of the Commercial Computer
+Software - Restricted Rights clause at FAR 52.227-19, as
+applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas
+Expressway, Santa Clara, CA 95051.
+
+The SDK is subject to United States export laws and
+regulations. You agree that you will not ship, transfer or
+export the SDK into any country, or use the SDK in any manner,
+prohibited by the United States Bureau of Industry and
+Security or economic sanctions regulations administered by the
+U.S. Department of Treasury’s Office of Foreign Assets
+Control (OFAC), or any applicable export laws, restrictions or
+regulations. These laws include restrictions on destinations,
+end users and end use. By accepting this Agreement, you
+confirm that you are not a resident or citizen of any country
+currently embargoed by the U.S. and that you are not otherwise
+prohibited from receiving the SDK.
+
+Any notice delivered by NVIDIA to you under this Agreement
+will be delivered via mail, email or fax. You agree that any
+notices that NVIDIA sends you electronically will satisfy any
+legal communication requirements. Please direct your legal
+notices or other correspondence to NVIDIA Corporation, 2788
+San Tomas Expressway, Santa Clara, California 95051, United
+States of America, Attention: Legal Department.
+
+This Agreement and any exhibits incorporated into this
+Agreement constitute the entire agreement of the parties with
+respect to the subject matter of this Agreement and supersede
+all prior negotiations or documentation exchanged between the
+parties relating to this SDK license. Any additional and/or
+conflicting terms on documents issued by you are null, void,
+and invalid. Any amendment or waiver under this Agreement
+shall be in writing and signed by representatives of both
+parties.
+
+
+2. CUDA Toolkit Supplement to Software License Agreement for
+NVIDIA Software Development Kits
+------------------------------------------------------------
+
+
+Release date: August 16, 2018
+-----------------------------
+
+The terms in this supplement govern your use of the NVIDIA
+CUDA Toolkit SDK under the terms of your license agreement
+(“Agreement”) as modified by this supplement. Capitalized
+terms used but not defined below have the meaning assigned to
+them in the Agreement.
+
+This supplement is an exhibit to the Agreement and is
+incorporated as an integral part of the Agreement. In the
+event of conflict between the terms in this supplement and the
+terms in the Agreement, the terms in this supplement govern.
+
+
+2.1. License Scope
+
+The SDK is licensed for you to develop applications only for
+use in systems with NVIDIA GPUs.
+
+
+2.2. Distribution
+
+The portions of the SDK that are distributable under the
+Agreement are listed in Attachment A.
+
+
+2.3. Operating Systems
+
+Those portions of the SDK designed exclusively for use on the
+Linux or FreeBSD operating systems, or other operating systems
+derived from the source code to these operating systems, may
+be copied and redistributed for use in accordance with this
+Agreement, provided that the object code files are not
+modified in any way (except for unzipping of compressed
+files).
+
+
+2.4. Audio and Video Encoders and Decoders
+
+You acknowledge and agree that it is your sole responsibility
+to obtain any additional third-party licenses required to
+make, have made, use, have used, sell, import, and offer for
+sale your products or services that include or incorporate any
+third-party software and content relating to audio and/or
+video encoders and decoders from, including but not limited
+to, Microsoft, Thomson, Fraunhofer IIS, Sisvel S.p.A.,
+MPEG-LA, and Coding Technologies. NVIDIA does not grant to you
+under this Agreement any necessary patent or other rights with
+respect to any audio and/or video encoders and decoders.
+
+
+2.5. Licensing
+
+If the distribution terms in this Agreement are not suitable
+for your organization, or for any questions regarding this
+Agreement, please contact NVIDIA at
+nvidia-compute-license-questions@nvidia.com.
+
+
+2.6. Attachment A
+
+The following portions of the SDK are distributable under the
+Agreement:
+
+Component
+
+CUDA Runtime
+
+Windows
+
+cudart.dll, cudart_static.lib, cudadevrt.lib
+
+Mac OSX
+
+libcudart.dylib, libcudart_static.a, libcudadevrt.a
+
+Linux
+
+libcudart.so, libcudart_static.a, libcudadevrt.a
+
+Android
+
+libcudart.so, libcudart_static.a, libcudadevrt.a
+
+Component
+
+CUDA FFT Library
+
+Windows
+
+cufft.dll, cufftw.dll, cufft.lib, cufftw.lib
+
+Mac OSX
+
+libcufft.dylib, libcufft_static.a, libcufftw.dylib,
+libcufftw_static.a
+
+Linux
+
+libcufft.so, libcufft_static.a, libcufftw.so,
+libcufftw_static.a
+
+Android
+
+libcufft.so, libcufft_static.a, libcufftw.so,
+libcufftw_static.a
+
+Component
+
+CUDA BLAS Library
+
+Windows
+
+cublas.dll, cublasLt.dll
+
+Mac OSX
+
+libcublas.dylib, libcublasLt.dylib, libcublas_static.a,
+libcublasLt_static.a
+
+Linux
+
+libcublas.so, libcublasLt.so, libcublas_static.a,
+libcublasLt_static.a
+
+Android
+
+libcublas.so, libcublasLt.so, libcublas_static.a,
+libcublasLt_static.a
+
+Component
+
+NVIDIA "Drop-in" BLAS Library
+
+Windows
+
+nvblas.dll
+
+Mac OSX
+
+libnvblas.dylib
+
+Linux
+
+libnvblas.so
+
+Component
+
+CUDA Sparse Matrix Library
+
+Windows
+
+cusparse.dll, cusparse.lib
+
+Mac OSX
+
+libcusparse.dylib, libcusparse_static.a
+
+Linux
+
+libcusparse.so, libcusparse_static.a
+
+Android
+
+libcusparse.so, libcusparse_static.a
+
+Component
+
+CUDA Linear Solver Library
+
+Windows
+
+cusolver.dll, cusolver.lib
+
+Mac OSX
+
+libcusolver.dylib, libcusolver_static.a
+
+Linux
+
+libcusolver.so, libcusolver_static.a
+
+Android
+
+libcusolver.so, libcusolver_static.a
+
+Component
+
+CUDA Random Number Generation Library
+
+Windows
+
+curand.dll, curand.lib
+
+Mac OSX
+
+libcurand.dylib, libcurand_static.a
+
+Linux
+
+libcurand.so, libcurand_static.a
+
+Android
+
+libcurand.so, libcurand_static.a
+
+Component
+
+CUDA Accelerated Graph Library
+
+Component
+
+NVIDIA Performance Primitives Library
+
+Windows
+
+nppc.dll, nppc.lib, nppial.dll, nppial.lib, nppicc.dll,
+nppicc.lib, nppicom.dll, nppicom.lib, nppidei.dll,
+nppidei.lib, nppif.dll, nppif.lib, nppig.dll, nppig.lib,
+nppim.dll, nppim.lib, nppist.dll, nppist.lib, nppisu.dll,
+nppisu.lib, nppitc.dll, nppitc.lib, npps.dll, npps.lib
+
+Mac OSX
+
+libnppc.dylib, libnppc_static.a, libnppial.dylib,
+libnppial_static.a, libnppicc.dylib, libnppicc_static.a,
+libnppicom.dylib, libnppicom_static.a, libnppidei.dylib,
+libnppidei_static.a, libnppif.dylib, libnppif_static.a,
+libnppig.dylib, libnppig_static.a, libnppim.dylib,
+libnppisu_static.a, libnppitc.dylib, libnppitc_static.a,
+libnpps.dylib, libnpps_static.a
+
+Linux
+
+libnppc.so, libnppc_static.a, libnppial.so,
+libnppial_static.a, libnppicc.so, libnppicc_static.a,
+libnppicom.so, libnppicom_static.a, libnppidei.so,
+libnppidei_static.a, libnppif.so, libnppif_static.a
+libnppig.so, libnppig_static.a, libnppim.so,
+libnppim_static.a, libnppist.so, libnppist_static.a,
+libnppisu.so, libnppisu_static.a, libnppitc.so
+libnppitc_static.a, libnpps.so, libnpps_static.a
+
+Android
+
+libnppc.so, libnppc_static.a, libnppial.so,
+libnppial_static.a, libnppicc.so, libnppicc_static.a,
+libnppicom.so, libnppicom_static.a, libnppidei.so,
+libnppidei_static.a, libnppif.so, libnppif_static.a
+libnppig.so, libnppig_static.a, libnppim.so,
+libnppim_static.a, libnppist.so, libnppist_static.a,
+libnppisu.so, libnppisu_static.a, libnppitc.so
+libnppitc_static.a, libnpps.so, libnpps_static.a
+
+Component
+
+NVIDIA JPEG Library
+
+Linux
+
+libnvjpeg.so, libnvjpeg_static.a
+
+Component
+
+Internal common library required for statically linking to
+cuBLAS, cuSPARSE, cuFFT, cuRAND, nvJPEG and NPP
+
+Mac OSX
+
+libculibos.a
+
+Linux
+
+libculibos.a
+
+Component
+
+NVIDIA Runtime Compilation Library and Header
+
+All
+
+nvrtc.h
+
+Windows
+
+nvrtc.dll, nvrtc-builtins.dll
+
+Mac OSX
+
+libnvrtc.dylib, libnvrtc-builtins.dylib
+
+Linux
+
+libnvrtc.so, libnvrtc-builtins.so
+
+Component
+
+NVIDIA Optimizing Compiler Library
+
+Windows
+
+nvvm.dll
+
+Mac OSX
+
+libnvvm.dylib
+
+Linux
+
+libnvvm.so
+
+Component
+
+NVIDIA Common Device Math Functions Library
+
+Windows
+
+libdevice.10.bc
+
+Mac OSX
+
+libdevice.10.bc
+
+Linux
+
+libdevice.10.bc
+
+Component
+
+CUDA Occupancy Calculation Header Library
+
+All
+
+cuda_occupancy.h
+
+Component
+
+CUDA Half Precision Headers
+
+All
+
+cuda_fp16.h, cuda_fp16.hpp
+
+Component
+
+CUDA Profiling Tools Interface (CUPTI) Library
+
+Windows
+
+cupti.dll
+
+Mac OSX
+
+libcupti.dylib
+
+Linux
+
+libcupti.so
+
+Component
+
+NVIDIA Tools Extension Library
+
+Windows
+
+nvToolsExt.dll, nvToolsExt.lib
+
+Mac OSX
+
+libnvToolsExt.dylib
+
+Linux
+
+libnvToolsExt.so
+
+Component
+
+NVIDIA CUDA Driver Libraries
+
+Linux
+
+libcuda.so, libnvidia-fatbinaryloader.so,
+libnvidia-ptxjitcompiler.so
+
+The NVIDIA CUDA Driver Libraries are only distributable in
+applications that meet this criteria:
+
+ 1. The application was developed starting from a NVIDIA CUDA
+ container obtained from Docker Hub or the NVIDIA GPU
+ Cloud, and
+
+ 2. The resulting application is packaged as a Docker
+ container and distributed to users on Docker Hub or the
+ NVIDIA GPU Cloud only.
+
+
+2.7. Attachment B
+
+
+Additional Licensing Obligations
+
+The following third party components included in the SOFTWARE
+are licensed to Licensee pursuant to the following terms and
+conditions:
+
+ 1. Licensee's use of the GDB third party component is
+ subject to the terms and conditions of GNU GPL v3:
+
+ This product includes copyrighted third-party software licensed
+ under the terms of the GNU General Public License v3 ("GPL v3").
+ All third-party software packages are copyright by their respective
+ authors. GPL v3 terms and conditions are hereby incorporated into
+ the Agreement by this reference: http://www.gnu.org/licenses/gpl.txt
+
+ Consistent with these licensing requirements, the software
+ listed below is provided under the terms of the specified
+ open source software licenses. To obtain source code for
+ software provided under licenses that require
+ redistribution of source code, including the GNU General
+ Public License (GPL) and GNU Lesser General Public License
+ (LGPL), contact oss-requests@nvidia.com. This offer is
+ valid for a period of three (3) years from the date of the
+ distribution of this product by NVIDIA CORPORATION.
+
+ Component License
+ CUDA-GDB GPL v3
+
+ 2. Licensee represents and warrants that any and all third
+ party licensing and/or royalty payment obligations in
+ connection with Licensee's use of the H.264 video codecs
+ are solely the responsibility of Licensee.
+
+ 3. Licensee's use of the Thrust library is subject to the
+ terms and conditions of the Apache License Version 2.0.
+ All third-party software packages are copyright by their
+ respective authors. Apache License Version 2.0 terms and
+ conditions are hereby incorporated into the Agreement by
+ this reference.
+ http://www.apache.org/licenses/LICENSE-2.0.html
+
+ In addition, Licensee acknowledges the following notice:
+ Thrust includes source code from the Boost Iterator,
+ Tuple, System, and Random Number libraries.
+
+ Boost Software License - Version 1.0 - August 17th, 2003
+ . . . .
+
+ Permission is hereby granted, free of charge, to any person or
+ organization obtaining a copy of the software and accompanying
+ documentation covered by this license (the "Software") to use,
+ reproduce, display, distribute, execute, and transmit the Software,
+ and to prepare derivative works of the Software, and to permit
+ third-parties to whom the Software is furnished to do so, all
+ subject to the following:
+
+ The copyright notices in the Software and this entire statement,
+ including the above license grant, this restriction and the following
+ disclaimer, must be included in all copies of the Software, in whole
+ or in part, and all derivative works of the Software, unless such
+ copies or derivative works are solely in the form of machine-executable
+ object code generated by a source language processor.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND
+ NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR
+ ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR
+ OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ OTHER DEALINGS IN THE SOFTWARE.
+
+ 4. Licensee's use of the LLVM third party component is
+ subject to the following terms and conditions:
+
+ ======================================================
+ LLVM Release License
+ ======================================================
+ University of Illinois/NCSA
+ Open Source License
+
+ Copyright (c) 2003-2010 University of Illinois at Urbana-Champaign.
+ All rights reserved.
+
+ Developed by:
+
+ LLVM Team
+
+ University of Illinois at Urbana-Champaign
+
+ http://llvm.org
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to
+ deal with the Software without restriction, including without limitation the
+ rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ sell copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimers.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimers in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the names of the LLVM Team, University of Illinois at Urbana-
+ Champaign, nor the names of its contributors may be used to endorse or
+ promote products derived from this Software without specific prior
+ written permission.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS WITH THE SOFTWARE.
+
+ 5. Licensee's use (e.g. nvprof) of the PCRE third party
+ component is subject to the following terms and
+ conditions:
+
+ ------------
+ PCRE LICENCE
+ ------------
+ PCRE is a library of functions to support regular expressions whose syntax
+ and semantics are as close as possible to those of the Perl 5 language.
+ Release 8 of PCRE is distributed under the terms of the "BSD" licence, as
+ specified below. The documentation for PCRE, supplied in the "doc"
+ directory, is distributed under the same terms as the software itself. The
+ basic library functions are written in C and are freestanding. Also
+ included in the distribution is a set of C++ wrapper functions, and a just-
+ in-time compiler that can be used to optimize pattern matching. These are
+ both optional features that can be omitted when the library is built.
+
+ THE BASIC LIBRARY FUNCTIONS
+ ---------------------------
+ Written by: Philip Hazel
+ Email local part: ph10
+ Email domain: cam.ac.uk
+ University of Cambridge Computing Service,
+ Cambridge, England.
+ Copyright (c) 1997-2012 University of Cambridge
+ All rights reserved.
+
+ PCRE JUST-IN-TIME COMPILATION SUPPORT
+ -------------------------------------
+ Written by: Zoltan Herczeg
+ Email local part: hzmester
+ Emain domain: freemail.hu
+ Copyright(c) 2010-2012 Zoltan Herczeg
+ All rights reserved.
+
+ STACK-LESS JUST-IN-TIME COMPILER
+ --------------------------------
+ Written by: Zoltan Herczeg
+ Email local part: hzmester
+ Emain domain: freemail.hu
+ Copyright(c) 2009-2012 Zoltan Herczeg
+ All rights reserved.
+
+ THE C++ WRAPPER FUNCTIONS
+ -------------------------
+ Contributed by: Google Inc.
+ Copyright (c) 2007-2012, Google Inc.
+ All rights reserved.
+
+ THE "BSD" LICENCE
+ -----------------
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the University of Cambridge nor the name of Google
+ Inc. nor the names of their contributors may be used to endorse or
+ promote products derived from this software without specific prior
+ written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+ 6. Some of the cuBLAS library routines were written by or
+ derived from code written by Vasily Volkov and are subject
+ to the Modified Berkeley Software Distribution License as
+ follows:
+
+ Copyright (c) 2007-2009, Regents of the University of California
+
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+ * Neither the name of the University of California, Berkeley nor
+ the names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior
+ written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+ 7. Some of the cuBLAS library routines were written by or
+ derived from code written by Davide Barbieri and are
+ subject to the Modified Berkeley Software Distribution
+ License as follows:
+
+ Copyright (c) 2008-2009 Davide Barbieri @ University of Rome Tor Vergata.
+
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+ * The name of the author may not be used to endorse or promote
+ products derived from this software without specific prior
+ written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+ 8. Some of the cuBLAS library routines were derived from
+ code developed by the University of Tennessee and are
+ subject to the Modified Berkeley Software Distribution
+ License as follows:
+
+ Copyright (c) 2010 The University of Tennessee.
+
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer listed in this license in the documentation and/or
+ other materials provided with the distribution.
+ * Neither the name of the copyright holders nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ 9. Some of the cuBLAS library routines were written by or
+ derived from code written by Jonathan Hogg and are subject
+ to the Modified Berkeley Software Distribution License as
+ follows:
+
+ Copyright (c) 2012, The Science and Technology Facilities Council (STFC).
+
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+ * Neither the name of the STFC nor the names of its contributors
+ may be used to endorse or promote products derived from this
+ software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE STFC BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ 10. Some of the cuBLAS library routines were written by or
+ derived from code written by Ahmad M. Abdelfattah, David
+ Keyes, and Hatem Ltaief, and are subject to the Apache
+ License, Version 2.0, as follows:
+
+ -- (C) Copyright 2013 King Abdullah University of Science and Technology
+ Authors:
+ Ahmad Abdelfattah (ahmad.ahmad@kaust.edu.sa)
+ David Keyes (david.keyes@kaust.edu.sa)
+ Hatem Ltaief (hatem.ltaief@kaust.edu.sa)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of the King Abdullah University of Science and
+ Technology nor the names of its contributors may be used to endorse
+ or promote products derived from this software without specific prior
+ written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
+
+ 11. Some of the cuSPARSE library routines were written by or
+ derived from code written by Li-Wen Chang and are subject
+ to the NCSA Open Source License as follows:
+
+ Copyright (c) 2012, University of Illinois.
+
+ All rights reserved.
+
+ Developed by: IMPACT Group, University of Illinois, http://impact.crhc.illinois.edu
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal with the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimers in the documentation and/or other materials provided
+ with the distribution.
+ * Neither the names of IMPACT Group, University of Illinois, nor
+ the names of its contributors may be used to endorse or promote
+ products derived from this Software without specific prior
+ written permission.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+ IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+ SOFTWARE.
+
+ 12. Some of the cuRAND library routines were written by or
+ derived from code written by Mutsuo Saito and Makoto
+ Matsumoto and are subject to the following license:
+
+ Copyright (c) 2009, 2010 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ University. All rights reserved.
+
+ Copyright (c) 2011 Mutsuo Saito, Makoto Matsumoto, Hiroshima
+ University and University of Tokyo. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+ * Neither the name of the Hiroshima University nor the names of
+ its contributors may be used to endorse or promote products
+ derived from this software without specific prior written
+ permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ 13. Some of the cuRAND library routines were derived from
+ code developed by D. E. Shaw Research and are subject to
+ the following license:
+
+ Copyright 2010-2011, D. E. Shaw Research.
+
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions, and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions, and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+ * Neither the name of D. E. Shaw Research nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ 14. Some of the Math library routines were written by or
+ derived from code developed by Norbert Juffa and are
+ subject to the following license:
+
+ Copyright (c) 2015-2017, Norbert Juffa
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ 15. Licensee's use of the lz4 third party component is
+ subject to the following terms and conditions:
+
+ Copyright (C) 2011-2013, Yann Collet.
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ 16. The NPP library uses code from the Boost Math Toolkit,
+ and is subject to the following license:
+
+ Boost Software License - Version 1.0 - August 17th, 2003
+ . . . .
+
+ Permission is hereby granted, free of charge, to any person or
+ organization obtaining a copy of the software and accompanying
+ documentation covered by this license (the "Software") to use,
+ reproduce, display, distribute, execute, and transmit the Software,
+ and to prepare derivative works of the Software, and to permit
+ third-parties to whom the Software is furnished to do so, all
+ subject to the following:
+
+ The copyright notices in the Software and this entire statement,
+ including the above license grant, this restriction and the following
+ disclaimer, must be included in all copies of the Software, in whole
+ or in part, and all derivative works of the Software, unless such
+ copies or derivative works are solely in the form of machine-executable
+ object code generated by a source language processor.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND
+ NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR
+ ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR
+ OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ OTHER DEALINGS IN THE SOFTWARE.
+
+ 17. Portions of the Nsight Eclipse Edition is subject to the
+ following license:
+
+ The Eclipse Foundation makes available all content in this plug-in
+ ("Content"). Unless otherwise indicated below, the Content is provided
+ to you under the terms and conditions of the Eclipse Public License
+ Version 1.0 ("EPL"). A copy of the EPL is available at http://
+ www.eclipse.org/legal/epl-v10.html. For purposes of the EPL, "Program"
+ will mean the Content.
+
+ If you did not receive this Content directly from the Eclipse
+ Foundation, the Content is being redistributed by another party
+ ("Redistributor") and different terms and conditions may apply to your
+ use of any object code in the Content. Check the Redistributor's
+ license that was provided with the Content. If no such license exists,
+ contact the Redistributor. Unless otherwise indicated below, the terms
+ and conditions of the EPL still apply to any source code in the
+ Content and such source code may be obtained at http://www.eclipse.org.
+
+ 18. Some of the cuBLAS library routines uses code from
+ OpenAI, which is subject to the following license:
+
+ License URL
+ https://github.com/openai/openai-gemm/blob/master/LICENSE
+
+ License Text
+ The MIT License
+
+ Copyright (c) 2016 OpenAI (http://openai.com), 2016 Google Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ THE SOFTWARE.
+
+ 19. Licensee's use of the Visual Studio Setup Configuration
+ Samples is subject to the following license:
+
+ The MIT License (MIT)
+ Copyright (C) Microsoft Corporation. All rights reserved.
+
+ Permission is hereby granted, free of charge, to any person
+ obtaining a copy of this software and associated documentation
+ files (the "Software"), to deal in the Software without restriction,
+ including without limitation the rights to use, copy, modify, merge,
+ publish, distribute, sublicense, and/or sell copies of the Software,
+ and to permit persons to whom the Software is furnished to do so,
+ subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ 20. Licensee's use of linmath.h header for CPU functions for
+ GL vector/matrix operations from lunarG is subject to the
+ Apache License Version 2.0.
+
+ 21. The DX12-CUDA sample uses the d3dx12.h header, which is
+ subject to the MIT license .
+
+-----------------
diff --git a/venv/lib/python3.10/site-packages/nvidia_cuda_cupti_cu12-12.1.105.dist-info/METADATA b/venv/lib/python3.10/site-packages/nvidia_cuda_cupti_cu12-12.1.105.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..9382bb3fea1153c53386720793963ac5e3f44f6a
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/nvidia_cuda_cupti_cu12-12.1.105.dist-info/METADATA
@@ -0,0 +1,35 @@
+Metadata-Version: 2.1
+Name: nvidia-cuda-cupti-cu12
+Version: 12.1.105
+Summary: CUDA profiling tools runtime libs.
+Home-page: https://developer.nvidia.com/cuda-zone
+Author: Nvidia CUDA Installer Team
+Author-email: cuda_installer@nvidia.com
+License: NVIDIA Proprietary Software
+Keywords: cuda,nvidia,runtime,machine learning,deep learning
+Classifier: Development Status :: 4 - Beta
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: Education
+Classifier: Intended Audience :: Science/Research
+Classifier: License :: Other/Proprietary License
+Classifier: Natural Language :: English
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3 :: Only
+Classifier: Topic :: Scientific/Engineering
+Classifier: Topic :: Scientific/Engineering :: Mathematics
+Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
+Classifier: Topic :: Software Development
+Classifier: Topic :: Software Development :: Libraries
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Operating System :: POSIX :: Linux
+Requires-Python: >=3
+License-File: License.txt
+
+Provides libraries to enable third party tools using GPU profiling APIs.
diff --git a/venv/lib/python3.10/site-packages/nvidia_cuda_cupti_cu12-12.1.105.dist-info/RECORD b/venv/lib/python3.10/site-packages/nvidia_cuda_cupti_cu12-12.1.105.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..d3d72cd0b04af2a44def94e4441c081ba2049beb
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/nvidia_cuda_cupti_cu12-12.1.105.dist-info/RECORD
@@ -0,0 +1,50 @@
+nvidia/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+nvidia/__pycache__/__init__.cpython-310.pyc,,
+nvidia/cuda_cupti/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+nvidia/cuda_cupti/__pycache__/__init__.cpython-310.pyc,,
+nvidia/cuda_cupti/include/Openacc/cupti_openacc.h,sha256=Z0OM5e_hbd3cxdXyn3SCHqBBQawLg4QORnlm57Cr2-M,3513
+nvidia/cuda_cupti/include/Openmp/cupti_openmp.h,sha256=E1WNmeb_7HaUSmBegtUNe4IV1i7pXeNxgzIlyKn1zrM,3491
+nvidia/cuda_cupti/include/Openmp/omp-tools.h,sha256=AmuC_xPC7VPu3B-W4PmXuCNufFawhY8PjNXePaQFAOg,37403
+nvidia/cuda_cupti/include/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+nvidia/cuda_cupti/include/__pycache__/__init__.cpython-310.pyc,,
+nvidia/cuda_cupti/include/cuda_stdint.h,sha256=XbFOk9CtJjKqk7PpYNqbSVsDxAsVM8avA4rWpPi0BjQ,4093
+nvidia/cuda_cupti/include/cupti.h,sha256=JkVyAGTIMYzwm62dfVqas3nMcILhgP_Wdz6fh4_NED0,4697
+nvidia/cuda_cupti/include/cupti_activity.h,sha256=qVVazvOJZbDMzvbqgS8nmaHN4gaxAWO2HA_8D7-Vpiw,311866
+nvidia/cuda_cupti/include/cupti_callbacks.h,sha256=-a47AoM4HoU5IuCCB_L-6lZRdrkDAC4XXLJuoUqojeY,26587
+nvidia/cuda_cupti/include/cupti_checkpoint.h,sha256=rTz8JoWxqESBXyZWUhZJGm4xeYcx4OJOtJ7Ld13T_b0,5264
+nvidia/cuda_cupti/include/cupti_driver_cbid.h,sha256=Uc74JDlJN_3qI04l4gkGzYbB3Ki0l0IgZILZO0WXtVs,70346
+nvidia/cuda_cupti/include/cupti_events.h,sha256=oHIOKSsE5ZAot5tZK-sbS2K9xcgiXBXTZZDkPQuiaNw,52639
+nvidia/cuda_cupti/include/cupti_metrics.h,sha256=iLAOlDrcbHEsIIUmgq0Tp1ZOY9O3Ot3wj2-bI8iYbSs,32148
+nvidia/cuda_cupti/include/cupti_nvtx_cbid.h,sha256=_azPtR1g4qivvX7qbvHRUg0RHCWF7iEOJyHMN9qZe9E,5912
+nvidia/cuda_cupti/include/cupti_pcsampling.h,sha256=uT_DtFN0Bye6ADtxfKXUAc8BcrFefotf-VtTuKQGJx0,32395
+nvidia/cuda_cupti/include/cupti_pcsampling_util.h,sha256=gEiMBes3mtpDJqauxqUtfe0csY4J31qpdg2Cp8On95E,13060
+nvidia/cuda_cupti/include/cupti_profiler_target.h,sha256=LWNFuYyotgGhCKY7TS48uVGxjeuOAuANWSnB8yfOfvo,31596
+nvidia/cuda_cupti/include/cupti_result.h,sha256=sOBZCRuRVHvcbIyDlzyLeina5YXwIQH21rVr3FPoB6M,12026
+nvidia/cuda_cupti/include/cupti_runtime_cbid.h,sha256=ZpomdRK7Fhn_NZYiiq5b3AyNZX3gznot-aX4dk-tsZI,44182
+nvidia/cuda_cupti/include/cupti_target.h,sha256=x4Vz1Upb6m9ixmVpmGaKQldDWYQI3OZ-ocEXGzNK0EE,1263
+nvidia/cuda_cupti/include/cupti_version.h,sha256=7XDJSIWpeJU8lrp0cOyma7dXXSGK4bdT1G8akxu8D_Q,4344
+nvidia/cuda_cupti/include/generated_cudaGL_meta.h,sha256=dfd2QuaRdEjbStOKvaQLi1Md_qrpRQh8PfyZznJ8bWY,3115
+nvidia/cuda_cupti/include/generated_cudaVDPAU_meta.h,sha256=fAedsoQxaU3hIAApAWDOKsa9kgcuQw4tdyf8klLm-3k,1453
+nvidia/cuda_cupti/include/generated_cuda_gl_interop_meta.h,sha256=LXOqvQCej0sCgAT1LUKKYZ466EFxN4hIwf9oIhXOLF0,2250
+nvidia/cuda_cupti/include/generated_cuda_meta.h,sha256=qZhsMxL-CURycqC2YkkioSDiD5pA8q22GOje2bOeviU,87152
+nvidia/cuda_cupti/include/generated_cuda_runtime_api_meta.h,sha256=YCkUMRP93XtDGLEH7DOJCUuhdRcVsO1vQwF_K9AuDfI,64332
+nvidia/cuda_cupti/include/generated_cuda_vdpau_interop_meta.h,sha256=8OLqWN26aEYpTWUXtbHJvA5GYhVv3ybYVOTW7yK37z8,1367
+nvidia/cuda_cupti/include/generated_cudart_removed_meta.h,sha256=X3I5WXmhtsJNNlgY7coJ5vg4t11G5FRR6Xo7MboIeck,5172
+nvidia/cuda_cupti/include/generated_nvtx_meta.h,sha256=YHb_RD8g3s4m8PJn7Z0wnxvUHarl7BOAX5ADr-BL3HI,7513
+nvidia/cuda_cupti/include/nvperf_common.h,sha256=MMZrDvDdtG2DSS0h2B8AR1aPyt6UmeWwH-Dc_XsxaHo,10422
+nvidia/cuda_cupti/include/nvperf_cuda_host.h,sha256=xEapxwvdl96uV-On-c8LY2lvwVNfIjq-rAgj9_dYbqo,8299
+nvidia/cuda_cupti/include/nvperf_host.h,sha256=3mcgAEbB9uaDfWheRqC8gLlTiTggc_auV8PE9dTShx4,66289
+nvidia/cuda_cupti/include/nvperf_target.h,sha256=jVR2zEO2KmMta0C-qTGuS9V6rhVyMNnRnOU4QJSiPrc,21476
+nvidia/cuda_cupti/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+nvidia/cuda_cupti/lib/__pycache__/__init__.cpython-310.pyc,,
+nvidia/cuda_cupti/lib/libcheckpoint.so,sha256=Fib_EZWCvKRmBbxtSXaat1MUuZk91ke9ZKkN7HR7yEM,1534104
+nvidia/cuda_cupti/lib/libcupti.so.12,sha256=q8YxAOnPUWuO0folNUrlPb_o30g4rFJdjXODMsIZjcI,7419504
+nvidia/cuda_cupti/lib/libnvperf_host.so,sha256=lc7EKudwwfIlHSBLA-EtVv2y5VYeSJjAe0A4L-JHRYk,28636664
+nvidia/cuda_cupti/lib/libnvperf_target.so,sha256=-iWHyNIR-8hei4jMoLzr54yMxAyBsMN2POV6yeY_Bmk,5895416
+nvidia/cuda_cupti/lib/libpcsamplingutil.so,sha256=XGCctMdV5khc1HtLdK_imh8aepM88GJz0q6CcPJtb3k,912728
+nvidia_cuda_cupti_cu12-12.1.105.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+nvidia_cuda_cupti_cu12-12.1.105.dist-info/License.txt,sha256=rW9YU_ugyg0VnQ9Y1JrkmDDC-Mk_epJki5zpCttMbM0,59262
+nvidia_cuda_cupti_cu12-12.1.105.dist-info/METADATA,sha256=xrOx7eliZP6--5Pla2AJW0e8XI3H0XDb9ZEN7DXghPs,1553
+nvidia_cuda_cupti_cu12-12.1.105.dist-info/RECORD,,
+nvidia_cuda_cupti_cu12-12.1.105.dist-info/WHEEL,sha256=-kQi_VMfvRQozZJT7HUPMfY-5vLo0LVTmAylNJ3Ft98,106
+nvidia_cuda_cupti_cu12-12.1.105.dist-info/top_level.txt,sha256=fTkAtiFuL16nUrB9ytDDtpytz2t0B4NvYTnRzwAhO14,7
diff --git a/venv/lib/python3.10/site-packages/nvidia_cuda_cupti_cu12-12.1.105.dist-info/WHEEL b/venv/lib/python3.10/site-packages/nvidia_cuda_cupti_cu12-12.1.105.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..06e355fe0e3ed7077903f119ae6928a17da8eb6f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/nvidia_cuda_cupti_cu12-12.1.105.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.37.1)
+Root-Is-Purelib: true
+Tag: py3-none-manylinux1_x86_64
+
diff --git a/venv/lib/python3.10/site-packages/nvidia_cuda_cupti_cu12-12.1.105.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/nvidia_cuda_cupti_cu12-12.1.105.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..862f7abf232cdfbb928609856247292e81c9decb
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/nvidia_cuda_cupti_cu12-12.1.105.dist-info/top_level.txt
@@ -0,0 +1 @@
+nvidia
diff --git a/venv/lib/python3.10/site-packages/packaging-24.0.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/packaging-24.0.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/packaging-24.0.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/venv/lib/python3.10/site-packages/packaging-24.0.dist-info/LICENSE b/venv/lib/python3.10/site-packages/packaging-24.0.dist-info/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..6f62d44e4ef733c0e713afcd2371fed7f2b3de67
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/packaging-24.0.dist-info/LICENSE
@@ -0,0 +1,3 @@
+This software is made available under the terms of *either* of the licenses
+found in LICENSE.APACHE or LICENSE.BSD. Contributions to this software is made
+under the terms of *both* these licenses.
diff --git a/venv/lib/python3.10/site-packages/packaging-24.0.dist-info/LICENSE.APACHE b/venv/lib/python3.10/site-packages/packaging-24.0.dist-info/LICENSE.APACHE
new file mode 100644
index 0000000000000000000000000000000000000000..f433b1a53f5b830a205fd2df78e2b34974656c7b
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/packaging-24.0.dist-info/LICENSE.APACHE
@@ -0,0 +1,177 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
diff --git a/venv/lib/python3.10/site-packages/packaging-24.0.dist-info/LICENSE.BSD b/venv/lib/python3.10/site-packages/packaging-24.0.dist-info/LICENSE.BSD
new file mode 100644
index 0000000000000000000000000000000000000000..42ce7b75c92fb01a3f6ed17eea363f756b7da582
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/packaging-24.0.dist-info/LICENSE.BSD
@@ -0,0 +1,23 @@
+Copyright (c) Donald Stufft and individual contributors.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/venv/lib/python3.10/site-packages/packaging-24.0.dist-info/METADATA b/venv/lib/python3.10/site-packages/packaging-24.0.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..10ab4390a96923dea26efffa4c42eaacb502536b
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/packaging-24.0.dist-info/METADATA
@@ -0,0 +1,102 @@
+Metadata-Version: 2.1
+Name: packaging
+Version: 24.0
+Summary: Core utilities for Python packages
+Author-email: Donald Stufft
+Requires-Python: >=3.7
+Description-Content-Type: text/x-rst
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3 :: Only
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3.12
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Typing :: Typed
+Project-URL: Documentation, https://packaging.pypa.io/
+Project-URL: Source, https://github.com/pypa/packaging
+
+packaging
+=========
+
+.. start-intro
+
+Reusable core utilities for various Python Packaging
+`interoperability specifications `_.
+
+This library provides utilities that implement the interoperability
+specifications which have clearly one correct behaviour (eg: :pep:`440`)
+or benefit greatly from having a single shared implementation (eg: :pep:`425`).
+
+.. end-intro
+
+The ``packaging`` project includes the following: version handling, specifiers,
+markers, requirements, tags, utilities.
+
+Documentation
+-------------
+
+The `documentation`_ provides information and the API for the following:
+
+- Version Handling
+- Specifiers
+- Markers
+- Requirements
+- Tags
+- Utilities
+
+Installation
+------------
+
+Use ``pip`` to install these utilities::
+
+ pip install packaging
+
+The ``packaging`` library uses calendar-based versioning (``YY.N``).
+
+Discussion
+----------
+
+If you run into bugs, you can file them in our `issue tracker`_.
+
+You can also join ``#pypa`` on Freenode to ask questions or get involved.
+
+
+.. _`documentation`: https://packaging.pypa.io/
+.. _`issue tracker`: https://github.com/pypa/packaging/issues
+
+
+Code of Conduct
+---------------
+
+Everyone interacting in the packaging project's codebases, issue trackers, chat
+rooms, and mailing lists is expected to follow the `PSF Code of Conduct`_.
+
+.. _PSF Code of Conduct: https://github.com/pypa/.github/blob/main/CODE_OF_CONDUCT.md
+
+Contributing
+------------
+
+The ``CONTRIBUTING.rst`` file outlines how to contribute to this project as
+well as how to report a potential security issue. The documentation for this
+project also covers information about `project development`_ and `security`_.
+
+.. _`project development`: https://packaging.pypa.io/en/latest/development/
+.. _`security`: https://packaging.pypa.io/en/latest/security/
+
+Project History
+---------------
+
+Please review the ``CHANGELOG.rst`` file or the `Changelog documentation`_ for
+recent changes and project history.
+
+.. _`Changelog documentation`: https://packaging.pypa.io/en/latest/changelog/
+
diff --git a/venv/lib/python3.10/site-packages/packaging-24.0.dist-info/RECORD b/venv/lib/python3.10/site-packages/packaging-24.0.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..a5419e4d96ad680b02d9d92bac5f0655b6e795af
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/packaging-24.0.dist-info/RECORD
@@ -0,0 +1,36 @@
+packaging-24.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+packaging-24.0.dist-info/LICENSE,sha256=ytHvW9NA1z4HS6YU0m996spceUDD2MNIUuZcSQlobEg,197
+packaging-24.0.dist-info/LICENSE.APACHE,sha256=DVQuDIgE45qn836wDaWnYhSdxoLXgpRRKH4RuTjpRZQ,10174
+packaging-24.0.dist-info/LICENSE.BSD,sha256=tw5-m3QvHMb5SLNMFqo5_-zpQZY2S8iP8NIYDwAo-sU,1344
+packaging-24.0.dist-info/METADATA,sha256=0dESdhY_wHValuOrbgdebiEw04EbX4dkujlxPdEsFus,3203
+packaging-24.0.dist-info/RECORD,,
+packaging-24.0.dist-info/WHEEL,sha256=EZbGkh7Ie4PoZfRQ8I0ZuP9VklN_TvcZ6DSE5Uar4z4,81
+packaging/__init__.py,sha256=UzotcV07p8vcJzd80S-W0srhgY8NMVD_XvJcZ7JN-tA,496
+packaging/__pycache__/__init__.cpython-310.pyc,,
+packaging/__pycache__/_elffile.cpython-310.pyc,,
+packaging/__pycache__/_manylinux.cpython-310.pyc,,
+packaging/__pycache__/_musllinux.cpython-310.pyc,,
+packaging/__pycache__/_parser.cpython-310.pyc,,
+packaging/__pycache__/_structures.cpython-310.pyc,,
+packaging/__pycache__/_tokenizer.cpython-310.pyc,,
+packaging/__pycache__/markers.cpython-310.pyc,,
+packaging/__pycache__/metadata.cpython-310.pyc,,
+packaging/__pycache__/requirements.cpython-310.pyc,,
+packaging/__pycache__/specifiers.cpython-310.pyc,,
+packaging/__pycache__/tags.cpython-310.pyc,,
+packaging/__pycache__/utils.cpython-310.pyc,,
+packaging/__pycache__/version.cpython-310.pyc,,
+packaging/_elffile.py,sha256=hbmK8OD6Z7fY6hwinHEUcD1by7czkGiNYu7ShnFEk2k,3266
+packaging/_manylinux.py,sha256=1ng_TqyH49hY6s3W_zVHyoJIaogbJqbIF1jJ0fAehc4,9590
+packaging/_musllinux.py,sha256=kgmBGLFybpy8609-KTvzmt2zChCPWYvhp5BWP4JX7dE,2676
+packaging/_parser.py,sha256=zlsFB1FpMRjkUdQb6WLq7xON52ruQadxFpYsDXWhLb4,10347
+packaging/_structures.py,sha256=q3eVNmbWJGG_S0Dit_S3Ao8qQqz_5PYTXFAKBZe5yr4,1431
+packaging/_tokenizer.py,sha256=alCtbwXhOFAmFGZ6BQ-wCTSFoRAJ2z-ysIf7__MTJ_k,5292
+packaging/markers.py,sha256=eH-txS2zq1HdNpTd9LcZUcVIwewAiNU0grmq5wjKnOk,8208
+packaging/metadata.py,sha256=w7jPEg6mDf1FTZMn79aFxFuk4SKtynUJtxr2InTxlV4,33036
+packaging/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+packaging/requirements.py,sha256=dgoBeVprPu2YE6Q8nGfwOPTjATHbRa_ZGLyXhFEln6Q,2933
+packaging/specifiers.py,sha256=dB2DwbmvSbEuVilEyiIQ382YfW5JfwzXTfRRPVtaENY,39784
+packaging/tags.py,sha256=fedHXiOHkBxNZTXotXv8uXPmMFU9ae-TKBujgYHigcA,18950
+packaging/utils.py,sha256=XgdmP3yx9-wQEFjO7OvMj9RjEf5JlR5HFFR69v7SQ9E,5268
+packaging/version.py,sha256=XjRBLNK17UMDgLeP8UHnqwiY3TdSi03xFQURtec211A,16236
diff --git a/venv/lib/python3.10/site-packages/packaging-24.0.dist-info/WHEEL b/venv/lib/python3.10/site-packages/packaging-24.0.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..3b5e64b5e6c4a210201d1676a891fd57b15cda99
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/packaging-24.0.dist-info/WHEEL
@@ -0,0 +1,4 @@
+Wheel-Version: 1.0
+Generator: flit 3.9.0
+Root-Is-Purelib: true
+Tag: py3-none-any
diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/apis.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/apis.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..05c289f4afadb87465bbf3a0636b7fd33ed5cdd4
Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/apis.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/config.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/config.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..16eeb74a2489393673bde93cfab5cfba8afe3805
Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/config.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/deprecated.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/deprecated.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..da626800194ea3d53c70675af80a0961159701e5
Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/deprecated.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/torch/futures/__init__.py b/venv/lib/python3.10/site-packages/torch/futures/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..6a398bebb599cb4b005ae76ea318e752dcd99a52
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/torch/futures/__init__.py
@@ -0,0 +1,318 @@
+from __future__ import annotations
+
+from typing import cast, Callable, Generic, List, Optional, Type, TypeVar, Union
+
+import torch
+
+__all__ = ['Future', 'collect_all', 'wait_all']
+
+T = TypeVar("T")
+S = TypeVar("S")
+
+
+class _PyFutureMeta(type(torch._C.Future), type(Generic)): # type: ignore[misc, no-redef]
+ pass
+
+
+class Future(torch._C.Future, Generic[T], metaclass=_PyFutureMeta):
+ r"""
+ Wrapper around a ``torch._C.Future`` which encapsulates an asynchronous
+ execution of a callable, e.g. :meth:`~torch.distributed.rpc.rpc_async`. It
+ also exposes a set of APIs to add callback functions and set results.
+
+ .. warning:: GPU support is a beta feature, subject to changes.
+ """
+
+ def __init__(self, *, devices: Optional[List[Union[int, str, torch.device]]] = None):
+ r"""
+ Create an empty unset ``Future``. If the future is intended to hold
+ values containing CUDA tensors, (a superset of) their CUDA devices must
+ be specified at construction. (This is only supported if
+ ``torch.cuda.is_available()`` returns ``True``). This is needed to
+ ensure proper CUDA stream synchronization. The child futures, returned
+ by the ``then`` method, will inherit these devices.
+
+ Args:
+ devices(``List[Union[int, str, torch.device]]``, optional): the set
+ of devices on which tensors contained in this future's value are
+ allowed to reside and on which callbacks are allowed to operate.
+ """
+ if devices is None:
+ devices = []
+ super().__init__([torch.device(d) for d in devices])
+
+ def done(self) -> bool:
+ r"""
+ Return ``True`` if this ``Future`` is done. A ``Future`` is done if it
+ has a result or an exception.
+
+ If the value contains tensors that reside on GPUs, ``Future.done()``
+ will return ``True`` even if the asynchronous kernels that are
+ populating those tensors haven't yet completed running on the device,
+ because at such stage the result is already usable, provided one
+ performs the appropriate synchronizations (see :meth:`wait`).
+ """
+ return super().done()
+
+ def wait(self) -> T:
+ r"""
+ Block until the value of this ``Future`` is ready.
+
+ If the value contains tensors that reside on GPUs, then an additional
+ synchronization is performed with the kernels (executing on the device)
+ which may be asynchronously populating those tensors. Such sync is
+ non-blocking, which means that ``wait()`` will insert the necessary
+ instructions in the current streams to ensure that further operations
+ enqueued on those streams will be properly scheduled after the async
+ kernels but, once that is done, ``wait()`` will return, even if those
+ kernels are still running. No further synchronization is required when
+ accessing and using the values, as long as one doesn't change streams.
+
+ Returns:
+ The value held by this ``Future``. If the function (callback or RPC)
+ creating the value has thrown an error, this ``wait`` method will
+ also throw an error.
+ """
+ return super().wait()
+
+ def value(self) -> T:
+ r"""
+ Obtain the value of an already-completed future.
+
+ This method should only be called after a call to :meth:`wait` has
+ completed, or inside a callback function passed to :meth:`then`. In
+ other cases this ``Future`` may not yet hold a value and calling
+ ``value()`` could fail.
+
+ If the value contains tensors that reside on GPUs, then this method will
+ *not* perform any additional synchronization. This should be done
+ beforehand, separately, through a call to :meth:`wait` (except within
+ callbacks, for which it's already being taken care of by :meth:`then`).
+
+ Returns:
+ The value held by this ``Future``. If the function (callback or RPC)
+ creating the value has thrown an error, this ``value()`` method will
+ also throw an error.
+ """
+ return super().value()
+
+ def then(self, callback: Callable[[Future[T]], S]) -> Future[S]:
+ r"""
+ Append the given callback function to this ``Future``, which will be run
+ when the ``Future`` is completed. Multiple callbacks can be added to
+ the same ``Future``, but the order in which they will be executed cannot
+ be guaranteed (to enforce a certain order consider chaining:
+ ``fut.then(cb1).then(cb2)``). The callback must take one argument, which
+ is the reference to this ``Future``. The callback function can use the
+ :meth:`value` method to get the value. Note that if this ``Future`` is
+ already completed, the given callback will be run immediately inline.
+
+ If the ``Future``'s value contains tensors that reside on GPUs, the
+ callback might be invoked while the async kernels that are populating
+ those tensors haven't yet finished executing on the device. However, the
+ callback will be invoked with some dedicated streams set as current
+ (fetched from a global pool) which will be synchronized with those
+ kernels. Hence any operation performed by the callback on these tensors
+ will be scheduled on the device after the kernels complete. In other
+ words, as long as the callback doesn't switch streams, it can safely
+ manipulate the result without any additional synchronization. This is
+ similar to the non-blocking behavior of :meth:`wait`.
+
+ Similarly, if the callback returns a value that contains tensors that
+ reside on a GPU, it can do so even if the kernels that are producing
+ these tensors are still running on the device, as long as the callback
+ didn't change streams during its execution. If one wants to change
+ streams, one must be careful to re-synchronize them with the original
+ streams, that is, those that were current when the callback was invoked.
+
+ Args:
+ callback(``Callable``): a ``Callable`` that takes this ``Future`` as
+ the only argument.
+
+ Returns:
+ A new ``Future`` object that holds the return value of the
+ ``callback`` and will be marked as completed when the given
+ ``callback`` finishes.
+
+ .. note:: Note that if the callback function throws, either
+ through the original future being completed with an exception and
+ calling ``fut.wait()``, or through other code in the callback, the
+ future returned by ``then`` will be marked appropriately with the
+ encountered error. However, if this callback later completes
+ additional futures, those futures are not marked as completed with
+ an error and the user is responsible for handling completion/waiting
+ on those futures independently.
+
+ Example::
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES)
+ >>> def callback(fut):
+ ... print(f"RPC return value is {fut.wait()}.")
+ >>> fut = torch.futures.Future()
+ >>> # The inserted callback will print the return value when
+ >>> # receiving the response from "worker1"
+ >>> cb_fut = fut.then(callback)
+ >>> chain_cb_fut = cb_fut.then(
+ ... lambda x : print(f"Chained cb done. {x.wait()}")
+ ... )
+ >>> fut.set_result(5)
+ RPC return value is 5.
+ Chained cb done. None
+ """
+ return cast(Future[S], super().then(callback))
+
+ def add_done_callback(self, callback: Callable[[Future[T]], None]) -> None:
+ r"""
+ Append the given callback function to this ``Future``, which will be run
+ when the ``Future`` is completed. Multiple callbacks can be added to
+ the same ``Future``, but the order in which they will be executed cannot
+ be guaranteed. The callback must take one argument, which is the
+ reference to this ``Future``. The callback function can use the
+ :meth:`value` method to get the value. Note that if this ``Future`` is
+ already completed, the given callback will be run inline.
+
+ We recommend that you use the :meth:`then` method as it provides a way
+ to synchronize after your callback has completed. ``add_done_callback``
+ can be cheaper if your callback does not return anything. But both
+ :meth:`then` and ``add_done_callback`` use the same callback
+ registration API under the hood.
+
+ With respect to GPU tensors, this method behaves in the same way as
+ :meth:`then`.
+
+ Args:
+ callback(``Future``): a ``Callable`` that takes in one argument,
+ which is the reference to this ``Future``.
+
+ .. note:: Note that if the callback function throws, either
+ through the original future being completed with an exception and
+ calling ``fut.wait()``, or through other code in the callback,
+ error handling must be carefully taken care of. For example, if
+ this callback later completes additional futures, those futures are
+ not marked as completed with an error and the user is responsible
+ for handling completion/waiting on those futures independently.
+
+ Example::
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES)
+ >>> def callback(fut):
+ ... print("This will run after the future has finished.")
+ ... print(fut.wait())
+ >>> fut = torch.futures.Future()
+ >>> fut.add_done_callback(callback)
+ >>> fut.set_result(5)
+ This will run after the future has finished.
+ 5
+ """
+ super().add_done_callback(callback)
+
+ def set_result(self, result: T) -> None:
+ r"""
+ Set the result for this ``Future``, which will mark this ``Future`` as
+ completed and trigger all attached callbacks. Note that a ``Future``
+ cannot be marked completed twice.
+
+ If the result contains tensors that reside on GPUs, this method can be
+ called even if the asynchronous kernels that are populating those
+ tensors haven't yet completed running on the device, provided that the
+ streams on which those kernels were enqueued are set as the current ones
+ when this method is called. Put simply, it's safe to call this method
+ immediately after launching those kernels, without any additional
+ synchronization, as long as one doesn't change streams in between. This
+ method will record events on all the relevant current streams and will
+ use them to ensure proper scheduling for all the consumers of this
+ ``Future``.
+
+ Args:
+ result (object): the result object of this ``Future``.
+
+ Example::
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES)
+ >>> import threading
+ >>> import time
+ >>> def slow_set_future(fut, value):
+ ... time.sleep(0.5)
+ ... fut.set_result(value)
+ >>> fut = torch.futures.Future()
+ >>> t = threading.Thread(
+ ... target=slow_set_future,
+ ... args=(fut, torch.ones(2) * 3)
+ ... )
+ >>> t.start()
+ >>> print(fut.wait())
+ tensor([3., 3.])
+ >>> t.join()
+ """
+ super().set_result(result)
+
+ def set_exception(self, result: T) -> None:
+ r"""
+ Set an exception for this ``Future``, which will mark this ``Future`` as
+ completed with an error and trigger all attached callbacks. Note that
+ when calling wait()/value() on this ``Future``, the exception set here
+ will be raised inline.
+
+ Args:
+ result (BaseException): the exception for this ``Future``.
+
+ Example::
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES)
+ >>> fut = torch.futures.Future()
+ >>> fut.set_exception(ValueError("foo"))
+ >>> fut.wait()
+ Traceback (most recent call last):
+ ...
+ ValueError: foo
+ """
+ assert isinstance(result, Exception), f"{result} is of type {type(result)}, not an Exception."
+
+ def raise_error(fut_result):
+ raise fut_result
+
+ super()._set_unwrap_func(raise_error)
+ self.set_result(result) # type: ignore[arg-type]
+
+
+def collect_all(futures: List[Future]) -> Future[List[Future]]:
+ r"""
+ Collects the provided :class:`~torch.futures.Future` objects into a single
+ combined :class:`~torch.futures.Future` that is completed when all of the
+ sub-futures are completed.
+
+ Args:
+ futures (list): a list of :class:`~torch.futures.Future` objects.
+
+ Returns:
+ Returns a :class:`~torch.futures.Future` object to a list of the passed
+ in Futures.
+
+ Example::
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES)
+ >>> fut0 = torch.futures.Future()
+ >>> fut1 = torch.futures.Future()
+ >>> fut = torch.futures.collect_all([fut0, fut1])
+ >>> fut0.set_result(0)
+ >>> fut1.set_result(1)
+ >>> fut_list = fut.wait()
+ >>> print(f"fut0 result = {fut_list[0].wait()}")
+ fut0 result = 0
+ >>> print(f"fut1 result = {fut_list[1].wait()}")
+ fut1 result = 1
+ """
+ return cast(Future[List[Future]], torch._C._collect_all(cast(List[torch._C.Future], futures)))
+
+
+def wait_all(futures: List[Future]) -> List:
+ r"""
+ Waits for all provided futures to be complete, and returns
+ the list of completed values. If any of the futures encounters an error,
+ the method will exit early and report the error not waiting for other
+ futures to complete.
+
+ Args:
+ futures (list): a list of :class:`~torch.futures.Future` object.
+
+ Returns:
+ A list of the completed :class:`~torch.futures.Future` results. This
+ method will throw an error if ``wait`` on any
+ :class:`~torch.futures.Future` throws.
+ """
+ return [fut.wait() for fut in torch._C._collect_all(cast(List[torch._C.Future], futures)).wait()]
diff --git a/venv/lib/python3.10/site-packages/torch/futures/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/futures/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..70a947fc558b7ad7fa16a7f58ec8446a7c7529f0
Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/futures/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/torch/jit/__init__.py b/venv/lib/python3.10/site-packages/torch/jit/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e8cb037053ccdb0da56f640972228d288de6ded8
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/torch/jit/__init__.py
@@ -0,0 +1,294 @@
+import warnings
+
+from contextlib import contextmanager
+from typing import Any, Iterator
+
+import torch._C
+
+# These are imported so users can access them from the `torch.jit` module
+from torch._jit_internal import (
+ _Await,
+ _drop,
+ _IgnoreContextManager,
+ _isinstance,
+ _overload,
+ _overload_method,
+ export,
+ Final,
+ Future,
+ ignore,
+ is_scripting,
+ unused,
+)
+from torch.jit._async import fork, wait
+from torch.jit._await import _awaitable, _awaitable_nowait, _awaitable_wait
+from torch.jit._decomposition_utils import _register_decomposition
+from torch.jit._freeze import freeze, optimize_for_inference, run_frozen_optimizations
+from torch.jit._fuser import (
+ fuser,
+ last_executed_optimized_graph,
+ optimized_execution,
+ set_fusion_strategy,
+)
+from torch.jit._ir_utils import _InsertPoint
+from torch.jit._script import (
+ _ScriptProfile,
+ _unwrap_optional,
+ Attribute,
+ CompilationUnit,
+ interface,
+ RecursiveScriptClass,
+ RecursiveScriptModule,
+ script,
+ script_method,
+ ScriptFunction,
+ ScriptModule,
+ ScriptWarning,
+)
+from torch.jit._serialization import (
+ jit_module_from_flatbuffer,
+ load,
+ save,
+ save_jit_module_to_flatbuffer,
+)
+from torch.jit._trace import (
+ _flatten,
+ _get_trace_graph,
+ _script_if_tracing,
+ _unique_state_dict,
+ is_tracing,
+ ONNXTracedModule,
+ TopLevelTracedModule,
+ trace,
+ trace_module,
+ TracedModule,
+ TracerWarning,
+ TracingCheckError,
+)
+
+from torch.utils import set_module
+
+__all__ = [
+ "Attribute",
+ "CompilationUnit",
+ "Error",
+ "Future",
+ "ScriptFunction",
+ "ScriptModule",
+ "annotate",
+ "enable_onednn_fusion",
+ "export",
+ "export_opnames",
+ "fork",
+ "freeze",
+ "ignore",
+ "isinstance",
+ "load",
+ "onednn_fusion_enabled",
+ "optimize_for_inference",
+ "save",
+ "script",
+ "script_if_tracing",
+ "set_fusion_strategy",
+ "strict_fusion",
+ "trace",
+ "trace_module",
+ "unused",
+ "wait",
+]
+
+# For backwards compatibility
+_fork = fork
+_wait = wait
+_set_fusion_strategy = set_fusion_strategy
+
+
+def export_opnames(m):
+ r"""
+ Generate new bytecode for a Script module.
+
+ Returns what the op list would be for a Script Module based off the current code base.
+
+ If you have a LiteScriptModule and want to get the currently present
+ list of ops call _export_operator_list instead.
+ """
+ return torch._C._export_opnames(m._c)
+
+
+# torch.jit.Error
+Error = torch._C.JITException
+set_module(Error, "torch.jit")
+# This is not perfect but works in common cases
+Error.__name__ = "Error"
+Error.__qualname__ = "Error"
+
+
+# for use in python if using annotate
+def annotate(the_type, the_value):
+ """Use to give type of `the_value` in TorchScript compiler.
+
+ This method is a pass-through function that returns `the_value`, used to hint TorchScript
+ compiler the type of `the_value`. It is a no-op when running outside of TorchScript.
+
+ Though TorchScript can infer correct type for most Python expressions, there are some cases where
+ type inference can be wrong, including:
+
+ - Empty containers like `[]` and `{}`, which TorchScript assumes to be container of `Tensor`
+ - Optional types like `Optional[T]` but assigned a valid value of type `T`, TorchScript would assume
+ it is type `T` rather than `Optional[T]`
+
+ Note that `annotate()` does not help in `__init__` method of `torch.nn.Module` subclasses because it
+ is executed in eager mode. To annotate types of `torch.nn.Module` attributes,
+ use :meth:`~torch.jit.Annotate` instead.
+
+ Example:
+
+ .. testcode::
+
+ import torch
+ from typing import Dict
+
+ @torch.jit.script
+ def fn():
+ # Telling TorchScript that this empty dictionary is a (str -> int) dictionary
+ # instead of default dictionary type of (str -> Tensor).
+ d = torch.jit.annotate(Dict[str, int], {})
+
+ # Without `torch.jit.annotate` above, following statement would fail because of
+ # type mismatch.
+ d["name"] = 20
+
+ .. testcleanup::
+
+ del fn
+
+ Args:
+ the_type: Python type that should be passed to TorchScript compiler as type hint for `the_value`
+ the_value: Value or expression to hint type for.
+
+ Returns:
+ `the_value` is passed back as return value.
+ """
+ return the_value
+
+
+def script_if_tracing(fn):
+ """
+ Compiles ``fn`` when it is first called during tracing.
+
+ ``torch.jit.script`` has a non-negligible start up time when it is first called due to
+ lazy-initializations of many compiler builtins. Therefore you should not use
+ it in library code. However, you may want to have parts of your library work
+ in tracing even if they use control flow. In these cases, you should use
+ ``@torch.jit.script_if_tracing`` to substitute for
+ ``torch.jit.script``.
+
+ Args:
+ fn: A function to compile.
+
+ Returns:
+ If called during tracing, a :class:`ScriptFunction` created by `torch.jit.script` is returned.
+ Otherwise, the original function `fn` is returned.
+ """
+ return _script_if_tracing(fn)
+
+
+# for torch.jit.isinstance
+def isinstance(obj, target_type):
+ """
+ Provide container type refinement in TorchScript.
+
+ It can refine parameterized containers of the List, Dict, Tuple, and Optional types. E.g. ``List[str]``,
+ ``Dict[str, List[torch.Tensor]]``, ``Optional[Tuple[int,str,int]]``. It can also
+ refine basic types such as bools and ints that are available in TorchScript.
+
+ Args:
+ obj: object to refine the type of
+ target_type: type to try to refine obj to
+ Returns:
+ ``bool``: True if obj was successfully refined to the type of target_type,
+ False otherwise with no new type refinement
+
+
+ Example (using ``torch.jit.isinstance`` for type refinement):
+ .. testcode::
+
+ import torch
+ from typing import Any, Dict, List
+
+ class MyModule(torch.nn.Module):
+ def __init__(self):
+ super().__init__()
+
+ def forward(self, input: Any): # note the Any type
+ if torch.jit.isinstance(input, List[torch.Tensor]):
+ for t in input:
+ y = t.clamp(0, 0.5)
+ elif torch.jit.isinstance(input, Dict[str, str]):
+ for val in input.values():
+ print(val)
+
+ m = torch.jit.script(MyModule())
+ x = [torch.rand(3,3), torch.rand(4,3)]
+ m(x)
+ y = {"key1":"val1","key2":"val2"}
+ m(y)
+ """
+ return _isinstance(obj, target_type)
+
+
+class strict_fusion:
+ """
+ Give errors if not all nodes have been fused in inference, or symbolically differentiated in training.
+
+ Example:
+ Forcing fusion of additions.
+
+ .. code-block:: python
+
+ @torch.jit.script
+ def foo(x):
+ with torch.jit.strict_fusion():
+ return x + x + x
+
+ """
+
+ def __init__(self):
+ if not torch._jit_internal.is_scripting():
+ warnings.warn("Only works in script mode")
+ pass
+
+ def __enter__(self):
+ pass
+
+ def __exit__(self, type: Any, value: Any, tb: Any) -> None:
+ pass
+
+
+# Context manager for globally hiding source ranges when printing graphs.
+# Note that these functions are exposed to Python as static members of the
+# Graph class, so mypy checks need to be skipped.
+@contextmanager
+def _hide_source_ranges() -> Iterator[None]:
+ old_enable_source_ranges = torch._C.Graph.global_print_source_ranges # type: ignore[attr-defined]
+ try:
+ torch._C.Graph.set_global_print_source_ranges(False) # type: ignore[attr-defined]
+ yield
+ finally:
+ torch._C.Graph.set_global_print_source_ranges(old_enable_source_ranges) # type: ignore[attr-defined]
+
+
+def enable_onednn_fusion(enabled: bool):
+ """Enable or disables onednn JIT fusion based on the parameter `enabled`."""
+ torch._C._jit_set_llga_enabled(enabled)
+
+
+def onednn_fusion_enabled():
+ """Return whether onednn JIT fusion is enabled."""
+ return torch._C._jit_llga_enabled()
+
+
+del Any
+
+if not torch._C._jit_init():
+ raise RuntimeError("JIT initialization failed")
diff --git a/venv/lib/python3.10/site-packages/torch/jit/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/jit/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..795e83729c5ed8789dde833d43758c896712469a
Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/jit/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/torch/jit/__pycache__/_logging.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/jit/__pycache__/_logging.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fd9b03fdc9faada44b811c0cadaa9dbefaaa35ce
Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/jit/__pycache__/_logging.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/torch/jit/__pycache__/_monkeytype_config.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/jit/__pycache__/_monkeytype_config.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d5f7f269864b019f27a98c7062a2207a4d5f365a
Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/jit/__pycache__/_monkeytype_config.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/torch/jit/__pycache__/_pickle.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/jit/__pycache__/_pickle.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1c0c9a7711368e9cab6cf4eb84a1fe7df853f96e
Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/jit/__pycache__/_pickle.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/torch/jit/__pycache__/_recursive.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/jit/__pycache__/_recursive.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f3ebf8567e691d7e8cb5b832613399c1bd091632
Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/jit/__pycache__/_recursive.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/torch/jit/_async.py b/venv/lib/python3.10/site-packages/torch/jit/_async.py
new file mode 100644
index 0000000000000000000000000000000000000000..2134975bb953be85414f9a950c59d483df4847d7
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/torch/jit/_async.py
@@ -0,0 +1,101 @@
+"""Async API.
+
+This module contains the API for parallelism in TorchScript, notably:
+ * torch.jit.fork
+ * torch.jit.wait
+
+This is not intended to be imported directly; please use the exposed
+functionalities in `torch.jit`.
+"""
+
+import torch
+from torch._jit_internal import Future
+from torch.jit._builtins import _register_builtin
+
+from torch.utils import set_module
+
+set_module(Future, "torch.jit")
+
+
+def fork(func, *args, **kwargs):
+ r"""
+ Create an asynchronous task executing `func` and a reference to the value of the result of this execution.
+
+ `fork` will return immediately, so the return value of `func` may not have been computed yet. To force completion
+ of the task and access the return value invoke `torch.jit.wait` on the Future. `fork` invoked
+ with a `func` which returns `T` is typed as `torch.jit.Future[T]`. `fork` calls can be arbitrarily
+ nested, and may be invoked with positional and keyword arguments.
+ Asynchronous execution will only occur when run in TorchScript. If run in pure python,
+ `fork` will not execute in parallel. `fork` will also not execute in parallel when invoked
+ while tracing, however the `fork` and `wait` calls will be captured in the exported IR Graph.
+
+ .. warning::
+ `fork` tasks will execute non-deterministically. We recommend only spawning
+ parallel fork tasks for pure functions that do not modify their inputs,
+ module attributes, or global state.
+
+ Args:
+ func (callable or torch.nn.Module): A Python function or `torch.nn.Module`
+ that will be invoked. If executed in TorchScript, it will execute asynchronously,
+ otherwise it will not. Traced invocations of fork will be captured in the IR.
+ ``*args``, ``**kwargs``: arguments to invoke `func` with.
+ Returns:
+ `torch.jit.Future[T]`: a reference to the execution of `func`. The value `T`
+ can only be accessed by forcing completion of `func` through `torch.jit.wait`.
+
+ Example (fork a free function):
+
+ .. code-block:: python
+
+ import torch
+ from torch import Tensor
+ def foo(a : Tensor, b : int) -> Tensor:
+ return a + b
+ def bar(a):
+ fut : torch.jit.Future[Tensor] = torch.jit.fork(foo, a, b=2)
+ return torch.jit.wait(fut)
+ script_bar = torch.jit.script(bar)
+ input = torch.tensor(2)
+ # only the scripted version executes asynchronously
+ assert script_bar(input) == bar(input)
+ # trace is not run asynchronously, but fork is captured in IR
+ graph = torch.jit.trace(bar, (input,)).graph
+ assert "fork" in str(graph)
+
+ Example (fork a module method):
+
+ .. code-block:: python
+
+ import torch
+ from torch import Tensor
+ class AddMod(torch.nn.Module):
+ def forward(self, a: Tensor, b : int):
+ return a + b
+ class Mod(torch.nn.Module):
+ def __init__(self):
+ super(self).__init__()
+ self.mod = AddMod()
+ def forward(self, input):
+ fut = torch.jit.fork(self.mod, a, b=2)
+ return torch.jit.wait(fut)
+ input = torch.tensor(2)
+ mod = Mod()
+ assert mod(input) == torch.jit.script(mod).forward(input)
+ """
+ return torch._C.fork(func, *args, **kwargs)
+
+
+def wait(future):
+ r"""
+ Force completion of a `torch.jit.Future[T]` asynchronous task, returning the result of the task.
+
+ See :func:`~fork` for docs and examples.
+ Args:
+ future (torch.jit.Future[T]): an asynchronous task reference, created through `torch.jit.fork`
+ Returns:
+ `T`: the return value of the completed task
+ """
+ return torch._C.wait(future)
+
+
+_register_builtin(wait, "aten::wait")
diff --git a/venv/lib/python3.10/site-packages/torch/jit/_await.py b/venv/lib/python3.10/site-packages/torch/jit/_await.py
new file mode 100644
index 0000000000000000000000000000000000000000..a79952bf3e2d4e8da92abdc0b724ff933b4bb86f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/torch/jit/_await.py
@@ -0,0 +1,26 @@
+import torch
+from torch._jit_internal import _Await
+from torch.jit._builtins import _register_builtin
+
+from torch.utils import set_module
+
+set_module(_Await, "torch.jit")
+
+
+def _awaitable(func, *args, **kwargs):
+ r"""Create Await object that will call specified functioni with specified args, when it is requested for the result."""
+ return torch._C._awaitable(func, *args, **kwargs)
+
+
+def _awaitable_wait(aw):
+ r"""Request await the result of execution, if Await is not completed yet, the func will be called immediately."""
+ return torch._C._awaitable_wait(aw)
+
+
+def _awaitable_nowait(o):
+ r"""Create completed Await with specified result."""
+ return torch._C._awaitable_nowait(o)
+
+
+_register_builtin(_awaitable_wait, "prim::awaitable_wait")
+_register_builtin(_awaitable_nowait, "prim::awaitable_nowait")
diff --git a/venv/lib/python3.10/site-packages/torch/jit/_builtins.py b/venv/lib/python3.10/site-packages/torch/jit/_builtins.py
new file mode 100644
index 0000000000000000000000000000000000000000..f50e1bbfedb5c59fa4e5cc3daffb51ce0eb1695e
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/torch/jit/_builtins.py
@@ -0,0 +1,187 @@
+import cmath
+import math
+import warnings
+
+from collections import OrderedDict
+from typing import Dict, Optional
+
+import torch
+import torch.backends.cudnn as cudnn
+
+from ..nn.modules.utils import _list_with_default, _pair, _quadruple, _single, _triple
+
+_builtin_table: Optional[Dict[int, str]] = None
+
+_modules_containing_builtins = (torch, torch._C._nn, torch._C._fft, torch._C._linalg, torch._C._nested, torch._C._sparse, torch._C._special) # type: ignore[attr-defined] # noqa: B950
+
+_builtin_ops = [
+ # Pairs of (function, op_name)
+ (_pair, "aten::_pair"),
+ (_quadruple, "aten::_quadruple"),
+ (_single, "aten::_single"),
+ (_triple, "aten::_triple"),
+ (_list_with_default, "aten::list_with_default"),
+ (OrderedDict, "aten::dict"),
+ (dict, "aten::dict"),
+ (cudnn.is_acceptable, "aten::cudnn_is_acceptable"),
+ (math.ceil, "aten::ceil"),
+ (math.copysign, "aten::copysign"),
+ (math.erf, "aten::erf"),
+ (math.erfc, "aten::erfc"),
+ (math.exp, "aten::exp"),
+ (math.expm1, "aten::expm1"),
+ (math.fabs, "aten::fabs"),
+ (math.floor, "aten::floor"),
+ (math.gamma, "aten::gamma"),
+ (math.lgamma, "aten::lgamma"),
+ (math.log, "aten::log"),
+ (math.log10, "aten::log10"),
+ (math.log1p, "aten::log1p"),
+ (math.pow, "aten::pow"),
+ (math.sqrt, "aten::sqrt"),
+ (math.isnan, "aten::isnan"),
+ (math.asinh, "aten::asinh"),
+ (math.atanh, "aten::atanh"),
+ (math.cosh, "aten::cosh"),
+ (math.sinh, "aten::sinh"),
+ (math.tanh, "aten::tanh"),
+ (math.acos, "aten::acos"),
+ (math.asin, "aten::asin"),
+ (math.atan, "aten::atan"),
+ (math.atan2, "aten::atan2"),
+ (math.cos, "aten::cos"),
+ (math.sin, "aten::sin"),
+ (math.tan, "aten::tan"),
+ (math.asinh, "aten::asinh"),
+ (math.atanh, "aten::atanh"),
+ (math.acosh, "aten::acosh"),
+ (math.fmod, "aten::fmod"),
+ (math.modf, "aten::modf"),
+ (math.factorial, "aten::factorial"),
+ (math.frexp, "aten::frexp"),
+ (math.isinf, "aten::isinf"),
+ (math.degrees, "aten::degrees"),
+ (math.radians, "aten::radians"),
+ (cmath.isnan, "aten::isnan"),
+ (cmath.isfinite, "aten::isfinite"),
+ (cmath.isinf, "aten::isinf"),
+ (cmath.phase, "aten::angle"),
+ (cmath.rect, "aten::polar"),
+ (cmath.log, "aten::log"),
+ (cmath.log10, "aten::log10"),
+ (cmath.sqrt, "aten::sqrt"),
+ (cmath.exp, "aten::exp"),
+ (cmath.sin, "aten::sin"),
+ (cmath.tan, "aten::tan"),
+ (cmath.cos, "aten::cos"),
+ (cmath.asin, "aten::asin"),
+ (cmath.acos, "aten::acos"),
+ (cmath.atan, "aten::atan"),
+ (cmath.sinh, "aten::sinh"),
+ (cmath.cosh, "aten::cosh"),
+ (cmath.tanh, "aten::tanh"),
+ (cmath.asinh, "aten::asinh"),
+ (cmath.acosh, "aten::acosh"),
+ (cmath.atanh, "aten::atanh"),
+ (math.ldexp, "aten::ldexp"),
+ (torch._assert, "aten::_assert"),
+ (torch.autograd.grad, "aten::grad"),
+ (torch.autograd.backward, "aten::backward"),
+ (torch._C._infer_size, "aten::_infer_size"),
+ (torch.nn.functional._no_grad_embedding_renorm_, "aten::_no_grad_embedding_renorm_"), # type: ignore[attr-defined]
+ (torch.nn.functional.assert_int_or_pair, "aten::_assert_int_or_pair"),
+ (torch.nn.init._no_grad_fill_, "aten::_no_grad_fill_"),
+ (torch.nn.init._no_grad_normal_, "aten::_no_grad_normal_"),
+ (torch.nn.init._no_grad_uniform_, "aten::_no_grad_uniform_"),
+ (torch.nn.init._no_grad_zero_, "aten::_no_grad_zero_"),
+ (torch._C._get_tracing_state, "aten::_get_tracing_state"),
+ (torch._C._get_cpu_capability, "aten::_get_cpu_capability"),
+ (warnings.warn, "aten::warn"),
+ (torch._VF.stft, "aten::stft"), # type: ignore[attr-defined]
+ (torch._VF.istft, "aten::istft"), # type: ignore[attr-defined]
+ (torch._VF.cdist, "aten::cdist"), # type: ignore[attr-defined]
+ (torch._VF.norm, "aten::norm"), # type: ignore[attr-defined]
+ (torch._VF.unique_dim, "aten::unique_dim"),
+ (torch._VF.unique_consecutive, "aten::unique_consecutive"), # type: ignore[attr-defined]
+ (torch._VF.nuclear_norm, "aten::nuclear_norm"),
+ (torch._VF.frobenius_norm, "aten::frobenius_norm"),
+ (torch._VF.tensordot, "aten::tensordot"), # type: ignore[attr-defined]
+]
+
+# ops in torch.functional are bound to torch
+# in these cases, we want to resolve the function to their python implementation
+# instead looking up a builtin "aten::" schema
+
+
+def _gen_torch_functional_registered_ops():
+ # eventually ops should encompass all of torch/functional.py, (torch.functional.__all__)
+ # but we are currently only able to compile some of the functions. additionally,
+ # some functions directly map to their aten:: implementations.
+ # TODO: add support for more ops
+ ops = [
+ "stft",
+ "istft",
+ "lu",
+ "cdist",
+ "norm",
+ "unique",
+ "unique_consecutive",
+ "tensordot",
+ ]
+ return {getattr(torch.functional, name) for name in ops}
+
+
+_functional_registered_ops = _gen_torch_functional_registered_ops()
+
+
+def _is_special_functional_bound_op(fn):
+ return fn in _functional_registered_ops
+
+
+# lazily built to ensure the correct initialization order
+def _get_builtin_table():
+ global _builtin_table
+ if _builtin_table is not None:
+ return _builtin_table
+ _builtin_table = {}
+
+ def register_all(mod):
+ for name in dir(mod):
+ v = getattr(mod, name)
+ if (
+ callable(v)
+ and not _is_special_functional_bound_op(v)
+ and v is not torch.no_grad
+ and v is not torch.autocast
+ ):
+ # Fixup inconsistency in segment_reduce
+ if name == "_segment_reduce":
+ name = name[1:]
+ _builtin_ops.append((v, "aten::" + name))
+
+ for mod in _modules_containing_builtins:
+ register_all(mod)
+
+ _builtin_ops.append((math.gcd, "aten::gcd"))
+ _builtin_ops.append((math.isfinite, "aten::isfinite"))
+ _builtin_ops.append((math.remainder, "aten::mathremainder")) # type: ignore[attr-defined]
+
+ import torch.distributed.autograd as dist_autograd
+
+ if dist_autograd.is_available():
+ _builtin_ops.append((dist_autograd.get_gradients, "aten::get_gradients"))
+ _builtin_ops.append((dist_autograd.backward, "aten::dist_backward"))
+
+ # populate the _builtin_table from _builtin_ops
+ for builtin, aten_op in _builtin_ops:
+ _builtin_table[id(builtin)] = aten_op
+
+ return _builtin_table
+
+
+def _register_builtin(fn, op):
+ _get_builtin_table()[id(fn)] = op
+
+
+def _find_builtin(fn):
+ return _get_builtin_table().get(id(fn))
diff --git a/venv/lib/python3.10/site-packages/torch/jit/_check.py b/venv/lib/python3.10/site-packages/torch/jit/_check.py
new file mode 100644
index 0000000000000000000000000000000000000000..790da30e511ca0bc808a02bafb2cb4aef6cb4b83
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/torch/jit/_check.py
@@ -0,0 +1,248 @@
+import ast
+import inspect
+import textwrap
+import warnings
+
+import torch
+
+
+class AttributeTypeIsSupportedChecker(ast.NodeVisitor):
+ """Check the ``__init__`` method of a given ``nn.Module``.
+
+ It ensures that all instance-level attributes can be properly initialized.
+
+ Specifically, we do type inference based on attribute values...even
+ if the attribute in question has already been typed using
+ Python3-style annotations or ``torch.jit.annotate``. This means that
+ setting an instance-level attribute to ``[]`` (for ``List``),
+ ``{}`` for ``Dict``), or ``None`` (for ``Optional``) isn't enough
+ information for us to properly initialize that attribute.
+
+ An object of this class can walk a given ``nn.Module``'s AST and
+ determine if it meets our requirements or not.
+
+ Known limitations
+ 1. We can only check the AST nodes for certain constructs; we can't
+ ``eval`` arbitrary expressions. This means that function calls,
+ class instantiations, and complex expressions that resolve to one of
+ the "empty" values specified above will NOT be flagged as
+ problematic.
+ 2. We match on string literals, so if the user decides to use a
+ non-standard import (e.g. `from typing import List as foo`), we
+ won't catch it.
+
+ Example:
+ .. code-block:: python
+
+ class M(torch.nn.Module):
+ def fn(self):
+ return []
+
+ def __init__(self):
+ super().__init__()
+ self.x: List[int] = []
+
+ def forward(self, x: List[int]):
+ self.x = x
+ return 1
+
+ The above code will pass the ``AttributeTypeIsSupportedChecker``
+ check since we have a function call in ``__init__``. However,
+ it will still fail later with the ``RuntimeError`` "Tried to set
+ nonexistent attribute: x. Did you forget to initialize it in
+ __init__()?".
+
+ Args:
+ nn_module - The instance of ``torch.nn.Module`` whose
+ ``__init__`` method we wish to check
+ """
+
+ def check(self, nn_module: torch.nn.Module) -> None:
+ source_lines = inspect.getsource(nn_module.__class__.__init__)
+
+ # Ignore comments no matter the indentation
+ def is_useless_comment(line):
+ line = line.strip()
+ return line.startswith("#") and not line.startswith("# type:")
+
+ source_lines = "\n".join(
+ [l for l in source_lines.split("\n") if not is_useless_comment(l)]
+ )
+
+ # This AST only contains the `__init__` method of the nn.Module
+ init_ast = ast.parse(textwrap.dedent(source_lines))
+
+ # Get items annotated in the class body
+ self.class_level_annotations = list(nn_module.__annotations__.keys())
+
+ # Flag for later
+ self.visiting_class_level_ann = False
+
+ self.visit(init_ast)
+
+ def _is_empty_container(self, node: ast.AST, ann_type: str) -> bool:
+ if ann_type == "List":
+ # Assigning `[]` to a `List` type gives you a Node where
+ # value=List(elts=[], ctx=Load())
+ if not isinstance(node, ast.List):
+ return False
+ if node.elts:
+ return False
+ elif ann_type == "Dict":
+ # Assigning `{}` to a `Dict` type gives you a Node where
+ # value=Dict(keys=[], values=[])
+ if not isinstance(node, ast.Dict):
+ return False
+ if node.keys:
+ return False
+ elif ann_type == "Optional":
+ # Assigning `None` to an `Optional` type gives you a
+ # Node where value=Constant(value=None, kind=None)
+ if not isinstance(node, ast.Constant):
+ return False
+ if node.value: # type: ignore[attr-defined]
+ return False
+
+ return True
+
+ def visit_Assign(self, node):
+ """Store assignment state when assigning to a Call Node.
+
+ If we're visiting a Call Node (the right-hand side of an
+ assignment statement), we won't be able to check the variable
+ that we're assigning to (the left-hand side of an assignment).
+ Because of this, we need to store this state in visitAssign.
+ (Luckily, we only have to do this if we're assigning to a Call
+ Node, i.e. ``torch.jit.annotate``. If we're using normal Python
+ annotations, we'll be visiting an AnnAssign Node, which has its
+ target built in.)
+ """
+ try:
+ if (
+ isinstance(node.value, ast.Call)
+ and node.targets[0].attr in self.class_level_annotations
+ ):
+ self.visiting_class_level_ann = True
+ except AttributeError:
+ return
+ self.generic_visit(node)
+ self.visiting_class_level_ann = False
+
+ def visit_AnnAssign(self, node):
+ """Visit an AnnAssign node in an ``nn.Module``'s ``__init__`` method.
+
+ It checks if it conforms to our attribute annotation rules."""
+ # If we have a local variable
+ try:
+ if node.target.value.id != "self":
+ return
+ except AttributeError:
+ return
+
+ # If we have an attribute that's already been annotated at the
+ # class level
+ if node.target.attr in self.class_level_annotations:
+ return
+
+ # TODO @ansley: add `Union` once landed
+
+ # NB: Even though `Tuple` is a "container", we don't want to
+ # check for it here. `Tuple` functions as an type with an
+ # "infinite" number of subtypes, in the sense that you can have
+ # `Tuple[())]`, `Tuple[T1]`, `Tuple[T2]`, `Tuple[T1, T2]`,
+ # `Tuple[T2, T1]` and so on, and none of these subtypes can be
+ # used in place of the other. Therefore, assigning an empty
+ # tuple in `__init__` CORRECTLY means that that variable
+ # cannot be reassigned later to a non-empty tuple. Same
+ # deal with `NamedTuple`
+
+ containers = {"List", "Dict", "Optional"}
+
+ # If we're not evaluating one of the specified problem types
+ try:
+ if node.annotation.value.id not in containers:
+ return
+ except AttributeError:
+ # To evaluate a base type (`str`, `int`, etc.), we would
+ # have needed to get the name through `node.annotation.id`
+ # instead of `node.annotation.value.id`. Seems that we're
+ # not evaluating one of our "containers"
+ return
+
+ # Check if the assigned variable is empty
+ ann_type = node.annotation.value.id
+ if not self._is_empty_container(node.value, ann_type):
+ return
+
+ warnings.warn(
+ "The TorchScript type system doesn't support "
+ "instance-level annotations on empty non-base "
+ "types in `__init__`. Instead, either 1) use a "
+ "type annotation in the class body, or 2) wrap "
+ "the type in `torch.jit.Attribute`."
+ )
+
+ def visit_Call(self, node):
+ """Determine if a Call node is 'torch.jit.annotate' in __init__.
+
+ Visit a Call node in an ``nn.Module``'s ``__init__``
+ method and determine if it's ``torch.jit.annotate``. If so,
+ see if it conforms to our attribute annotation rules.
+ """
+ # If we have an attribute that's already been annotated at the
+ # class level
+ if self.visiting_class_level_ann:
+ return
+
+ # If this isn't a call to `torch.jit.annotate`
+ try:
+ if (
+ node.func.value.value.id != "torch"
+ or node.func.value.attr != "jit"
+ or node.func.attr != "annotate"
+ ):
+ self.generic_visit(node)
+ elif (
+ node.func.value.value.id != "jit" or node.func.value.attr != "annotate"
+ ):
+ self.generic_visit(node)
+ except AttributeError:
+ # Looks like we didn't even have the right node structure
+ # to check for `torch.jit.annotate` in the first place
+ self.generic_visit(node)
+
+ # Invariant: we have a `torch.jit.annotate` or a
+ # `torch.annotate` call
+
+ # A Call Node for `torch.jit.annotate` should have an `args`
+ # list of length 2 where args[0] represents the annotation and
+ # args[1] represents the actual value
+ if len(node.args) != 2:
+ return
+
+ if not isinstance(node.args[0], ast.Subscript):
+ return
+
+ # See notes in `visit_AnnAssign` r.e. containers
+
+ containers = {"List", "Dict", "Optional"}
+
+ try:
+ ann_type = node.args[0].value.id # type: ignore[attr-defined]
+ except AttributeError:
+ return
+
+ if ann_type not in containers:
+ return
+
+ # Check if the assigned variable is empty
+ if not self._is_empty_container(node.args[1], ann_type):
+ return
+
+ warnings.warn(
+ "The TorchScript type system doesn't support "
+ "instance-level annotations on empty non-base "
+ "types in `__init__`. Instead, either 1) use a "
+ "type annotation in the class body, or 2) wrap "
+ "the type in `torch.jit.Attribute`."
+ )
diff --git a/venv/lib/python3.10/site-packages/torch/jit/_dataclass_impls.py b/venv/lib/python3.10/site-packages/torch/jit/_dataclass_impls.py
new file mode 100644
index 0000000000000000000000000000000000000000..52056ce46bea76459ac856a3c324ccfc8a310af5
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/torch/jit/_dataclass_impls.py
@@ -0,0 +1,189 @@
+# Functions for synthesizing magic methods for JIT-compiled dataclasses
+import ast
+import dataclasses
+import inspect
+import os
+from functools import partial
+from typing import Callable, Dict, List
+
+from torch._jit_internal import FAKE_FILENAME_PREFIX, is_optional
+from torch._sources import ParsedDef, SourceContext
+
+
+def _get_fake_filename(cls, method_name):
+ return os.path.join(FAKE_FILENAME_PREFIX, cls.__name__, method_name)
+
+
+def compose_fn(cls, name: str, body_lines: List[str], signature: str) -> ParsedDef:
+ body = "\n".join(f" {b}" for b in body_lines)
+ decl = f"def {name}{signature}:\n{body}"
+
+ # Parse the function declaration
+ try:
+ py_ast = ast.parse(decl)
+ except SyntaxError as e:
+ # This should only happen if there's some unforeseeable change
+ # in the dataclasses module that makes our synthesized code fail
+ raise RuntimeError(
+ f"TorchScript failed to synthesize dataclass method '{name}' for class '{cls.__name__}'. "
+ "Please file a bug report at "
+ ) from e
+ fake_filename = _get_fake_filename(cls, name)
+ # Parse the function
+ return ParsedDef(
+ py_ast,
+ ctx=SourceContext(
+ source=decl, filename=fake_filename, file_lineno=0, leading_whitespace_len=0
+ ),
+ source=decl,
+ filename=fake_filename,
+ file_lineno=0,
+ )
+
+
+def synthesize__init__(cls) -> ParsedDef:
+ # Supporting default factories in the way that people expect would sort of require us to
+ # allow compiling lambda functions, which is not currently supported.
+ if any(
+ field.default_factory is not dataclasses.MISSING
+ for field in dataclasses.fields(cls)
+ ):
+ raise NotImplementedError(
+ "Default factory initializers are not supported in TorchScript dataclasses"
+ )
+
+ # Simply read off the generated __init__ signature from CPython's implementation. It'll be
+ # almost correct except for InitVar annotations, which we need to handle specially.
+ signature = inspect.signature(cls.__init__)
+
+ # Handle InitVars if needed (only works on Python 3.8+, when a `type` attribute was added to InitVar);
+ # see CPython commit here https://github.com/python/cpython/commit/01ee12ba35a333e8a6a25c4153c4a21838e9585c
+ init_vars: List[str] = []
+ params = []
+ for name, param in signature.parameters.items():
+ ann = param.annotation
+
+ if isinstance(ann, dataclasses.InitVar):
+ # The TorchScript interpreter can't handle InitVar annotations, so we unwrap the underlying type here
+ init_vars.append(name)
+ params.append(param.replace(annotation=ann.type)) # type: ignore[attr-defined]
+ else:
+ params.append(param)
+
+ signature = signature.replace(parameters=params)
+
+ body = [
+ # Assign all attributes to self
+ f"self.{field.name} = {field.name}"
+ for field in dataclasses.fields(cls)
+ if field.init and field.name not in init_vars
+ ]
+ # Call user's impl of __post_init__ if it exists
+ if hasattr(cls, "__post_init__"):
+ body.append("self.__post_init__(" + ", ".join(init_vars) + ")")
+
+ return compose_fn(cls, "__init__", body or ["pass"], signature=str(signature))
+
+
+# This is a placeholder at the moment since the TorchScript interpreter doesn't call __repr__
+def synthesize__repr__(cls) -> ParsedDef:
+ return compose_fn(
+ cls,
+ "__repr__",
+ [
+ f"return '{cls.__name__}("
+ + ", ".join(
+ [
+ f"{field.name}=self.{field.name}"
+ for field in dataclasses.fields(cls)
+ if field.repr
+ ]
+ )
+ + ")'"
+ ],
+ signature="(self) -> str",
+ )
+
+
+def synthesize__hash__(cls) -> ParsedDef:
+ return compose_fn(
+ cls,
+ "__hash__",
+ [
+ # This is just a placeholder to prevent compilation from failing; this won't even get called at
+ # all right now because the TorchScript interpreter doesn't call custom __hash__ implementations
+ "raise NotImplementedError('__hash__ is not supported for dataclasses in TorchScript')"
+ ],
+ signature="(self) -> int",
+ )
+
+
+# Implementation for __eq__ and __ne__
+def synthesize_equality(cls, name: str, converse: str) -> ParsedDef:
+ return synthesize_comparison(
+ cls,
+ name,
+ allow_eq=True,
+ raise_on_none=False,
+ inner=[f"if val1 {converse} val2: return False"],
+ )
+
+
+def synthesize_inequality(cls, name: str, op: str, allow_eq: bool) -> ParsedDef:
+ return synthesize_comparison(
+ cls,
+ name,
+ allow_eq,
+ raise_on_none=True,
+ inner=[
+ f"if val1 {op} val2: return True",
+ f"elif val2 {op} val1: return False",
+ ],
+ )
+
+
+def synthesize_comparison(
+ cls, name: str, allow_eq: bool, raise_on_none: bool, inner: List[str]
+) -> ParsedDef:
+ body = []
+ for field in dataclasses.fields(cls):
+ if not field.compare:
+ continue
+
+ body.extend(
+ [
+ f"val1 = self.{field.name}",
+ f"val2 = other.{field.name}",
+ ]
+ )
+ body.extend(
+ inner
+ if not is_optional(field.type)
+ else [
+ # Type refinement for optional fields; we need this to avoid type errors from the interpreter
+ "if val1 is not None and val2 is not None:",
+ *[" " + line for line in inner],
+ "elif (val1 is None) != (val2 is None):",
+ f" raise TypeError('Cannot compare {cls.__name__} with None')"
+ if raise_on_none
+ else " return False",
+ ]
+ )
+
+ body.append(f"return {allow_eq}")
+ return compose_fn(
+ cls, name, body, signature=f"(self, other: {cls.__name__}) -> bool"
+ )
+
+
+DATACLASS_MAGIC_METHODS: Dict[str, Callable] = {
+ "__init__": synthesize__init__,
+ "__repr__": synthesize__repr__,
+ "__hash__": synthesize__hash__,
+ "__eq__": partial(synthesize_equality, name="__eq__", converse="!="),
+ "__ne__": partial(synthesize_equality, name="__ne__", converse="=="),
+ "__lt__": partial(synthesize_inequality, name="__lt__", op="<", allow_eq=False),
+ "__le__": partial(synthesize_inequality, name="__le__", op="<", allow_eq=True),
+ "__gt__": partial(synthesize_inequality, name="__gt__", op=">", allow_eq=False),
+ "__ge__": partial(synthesize_inequality, name="__ge__", op=">", allow_eq=True),
+}
diff --git a/venv/lib/python3.10/site-packages/torch/jit/_decomposition_utils.py b/venv/lib/python3.10/site-packages/torch/jit/_decomposition_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..fb4448e2b900f8353554370677ed90054ff2d971
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/torch/jit/_decomposition_utils.py
@@ -0,0 +1,11 @@
+import torch
+from torch._ops import OpOverload, OpOverloadPacket
+
+
+def _register_decomposition(op: OpOverload, graph: torch._C.Graph):
+ assert not isinstance(
+ op, OpOverloadPacket
+ ), f"Must pass specific op overload, not overload packet, found {op}"
+ assert isinstance(op, OpOverload)
+
+ torch._C._jit_register_decomposition_for_schema(op._schema, graph)
diff --git a/venv/lib/python3.10/site-packages/torch/jit/_decompositions.py b/venv/lib/python3.10/site-packages/torch/jit/_decompositions.py
new file mode 100644
index 0000000000000000000000000000000000000000..babb70eaf7cb11a937b5feacd40c4104fc3ae8d1
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/torch/jit/_decompositions.py
@@ -0,0 +1,127 @@
+import torch
+from torch import Tensor
+
+aten = torch.ops.aten
+import inspect
+import warnings
+from typing import Dict, List, Optional, Set
+
+from torch.types import Number
+
+decomposition_table: Dict[str, torch.jit.ScriptFunction] = {}
+function_name_set: Set[str] = set()
+
+
+def check_decomposition_has_type_annotations(f):
+ inspect_empty = inspect._empty # type: ignore[attr-defined]
+ sig = inspect.signature(f)
+ for param in sig.parameters.values():
+ assert (
+ param.annotation != inspect_empty
+ ), f"No signature on param {param.name} for function {f.name}"
+
+ assert (
+ sig.return_annotation != inspect_empty
+ ), f"No return annotation for function {f.name}"
+
+
+def signatures_match(decomposition_sig, torch_op_sig):
+ decomp_params = decomposition_sig.parameters
+ op_params = torch_op_sig.parameters
+
+ if len(decomp_params) != len(op_params):
+ return False
+
+ for decomp_param, op_param in zip(decomp_params.values(), op_params.values()):
+ # can't check full equality yet because not all fields are correcly deduced
+ # in the torch_op_sig - like default value
+ # can't check 'kind' bc
+ # kwarg-only values with defaults not yet supported in TS
+ inspect_empty = inspect._empty # type: ignore[attr-defined]
+ for field in ["name", "annotation"]:
+ if field == "name" and decomp_param.name == "self":
+ warnings.warn("PyTorch uses 'input' instead of 'self' on public api")
+
+ if getattr(decomp_param, field) != getattr(op_param, field):
+ return False
+
+ decomp_default = decomp_param.default
+ op_default = op_param.default
+ # default value not always correctly inferred as being present on torch schema,
+ # but if specified on both they should be equal
+ if decomp_default != inspect_empty and op_default != inspect_empty:
+ if decomp_default != op_default:
+ return False
+
+ return decomposition_sig.return_annotation == torch_op_sig.return_annotation
+
+
+def register_decomposition(aten_op, registry=None):
+ def decomposition_decorator(f):
+ nonlocal registry
+ if registry is None:
+ registry = decomposition_table
+
+ assert isinstance(aten_op, torch._ops.OpOverload)
+
+ # Need unique name for jit function serialization
+ assert (
+ f.__name__ not in function_name_set
+ ), f"Duplicated function name {f.__name__}"
+ function_name_set.add(f.__name__)
+
+ scripted_func = torch.jit.script(f)
+ torch._C._jit_pass_inline(scripted_func.graph)
+
+ for _ in range(2):
+ torch._C._jit_pass_peephole(scripted_func.graph)
+ torch._C._jit_pass_constant_propagation(scripted_func.graph)
+
+ registry[str(aten_op._schema)] = scripted_func
+ return f
+
+ return decomposition_decorator
+
+
+# TODO: replace torch.sigmoid -> aten.sigmoid
+
+
+@register_decomposition(aten.var.correction)
+def var_decomposition(
+ input: Tensor,
+ dim: Optional[List[int]] = None,
+ correction: Optional[Number] = None,
+ keepdim: bool = False,
+) -> Tensor:
+ if dim is None:
+ dim_i: List[int] = []
+ dim = dim_i
+
+ if isinstance(dim, (tuple, list)) and len(dim) == 0:
+ n = input.numel()
+ else:
+ n = 1
+ for dim_i in dim: # type: ignore[assignment]
+ n *= input.shape[dim_i] # type: ignore[call-overload]
+
+ mean = aten.mean(input, dim, True)
+ sub = input - mean
+ sq = sub * sub
+ sum = aten.sum(sq, dim, keepdim)
+
+ if correction is None:
+ denom = float(n - 1)
+ else:
+ if isinstance(correction, int):
+ denom = float(n - correction)
+ elif isinstance(correction, float):
+ denom = float(n) - correction
+ else:
+ raise RuntimeError("correction must be int or float")
+
+ return sum / max(0, denom)
+
+
+@register_decomposition(aten.var.default)
+def var(input: Tensor, unbiased: bool = True) -> Tensor:
+ return var_decomposition(input, correction=(1 if unbiased else 0))
diff --git a/venv/lib/python3.10/site-packages/torch/jit/_freeze.py b/venv/lib/python3.10/site-packages/torch/jit/_freeze.py
new file mode 100644
index 0000000000000000000000000000000000000000..731f28305628b0f2097ac6d959ff40b3b3af9393
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/torch/jit/_freeze.py
@@ -0,0 +1,227 @@
+"""Freezing.
+
+This is not intended to be imported directly; please use the exposed
+functionalities in `torch.jit`.
+"""
+
+from typing import List, Optional
+
+import torch
+from torch.jit._script import RecursiveScriptModule, ScriptModule
+
+
+def freeze(
+ mod, preserved_attrs: Optional[List[str]] = None, optimize_numerics: bool = True
+):
+ r"""Freeze ScriptModule, inline submodules, and attributes as constants.
+
+ Freezing a :class:`ScriptModule` will clone it and attempt to inline the cloned
+ module's submodules, parameters, and attributes as constants in the TorchScript IR Graph.
+ By default, `forward` will be preserved, as well as attributes & methods specified in
+ `preserved_attrs`. Additionally, any attribute that is modified within a preserved
+ method will be preserved.
+
+ Freezing currently only accepts ScriptModules that are in eval mode.
+
+ Freezing applies generic optimization that will speed up your model regardless of machine.
+ To further optimize using server-specific settings, run `optimize_for_inference` after
+ freezing.
+
+ Args:
+ mod (:class:`ScriptModule`): a module to be frozen
+ preserved_attrs (Optional[List[str]]): a list of attributes to preserve in addition to the forward method.
+ Attributes modified in preserved methods will also be preserved.
+ optimize_numerics (bool): If ``True``, a set of optimization passes will be run that does not strictly
+ preserve numerics. Full details of optimization can be found at `torch.jit.run_frozen_optimizations`.
+
+ Returns:
+ Frozen :class:`ScriptModule`.
+
+ Example (Freezing a simple module with a Parameter):
+
+ .. testcode::
+ import torch
+ class MyModule(torch.nn.Module):
+ def __init__(self, N, M):
+ super().__init__()
+ self.weight = torch.nn.Parameter(torch.rand(N, M))
+ self.linear = torch.nn.Linear(N, M)
+
+ def forward(self, input):
+ output = self.weight.mm(input)
+ output = self.linear(output)
+ return output
+
+ scripted_module = torch.jit.script(MyModule(2, 3).eval())
+ frozen_module = torch.jit.freeze(scripted_module)
+ # parameters have been removed and inlined into the Graph as constants
+ assert len(list(frozen_module.named_parameters())) == 0
+ # See the compiled graph as Python code
+ print(frozen_module.code)
+
+ Example (Freezing a module with preserved attributes)
+
+ .. testcode::
+ import torch
+ class MyModule2(torch.nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.modified_tensor = torch.tensor(10.)
+ self.version = 1
+
+ def forward(self, input):
+ self.modified_tensor += 1
+ return input + self.modified_tensor
+
+ scripted_module = torch.jit.script(MyModule2().eval())
+ frozen_module = torch.jit.freeze(scripted_module, preserved_attrs=["version"])
+ # we've manually preserved `version`, so it still exists on the frozen module and can be modified
+ assert frozen_module.version == 1
+ frozen_module.version = 2
+ # `modified_tensor` is detected as being mutated in the forward, so freezing preserves
+ # it to retain model semantics
+ assert frozen_module(torch.tensor(1)) == torch.tensor(12)
+ # now that we've run it once, the next result will be incremented by one
+ assert frozen_module(torch.tensor(1)) == torch.tensor(13)
+
+ Note:
+ Freezing submodule attributes is also supported:
+ frozen_module = torch.jit.freeze(scripted_module, preserved_attrs=["submodule.version"])
+
+ Note:
+ If you're not sure why an attribute is not being inlined as a constant, you can run
+ `dump_alias_db` on frozen_module.forward.graph to see if freezing has detected the
+ attribute is being modified.
+
+ Note:
+ Because freezing makes weights constants and removes module hierarchy, `to` and other
+ nn.Module methods to manipulate device or dtype no longer work. As a workaround,
+ You can remap devices by specifying `map_location` in `torch.jit.load`, however
+ device-specific logic may have been baked into the model.
+ """
+ if not isinstance(mod, ScriptModule):
+ raise RuntimeError(
+ "Freezing expects a ScriptModule as input. "
+ "Please use torch.jit.script or torch.jit.trace to script your 'nn.Module'."
+ )
+
+ if mod.training:
+ raise RuntimeError(
+ "Freezing is currently only implemented for modules in eval mode. "
+ "Please call .eval() on your module before freezing."
+ )
+
+ preserved_attrs = preserved_attrs if preserved_attrs is not None else []
+
+ out = RecursiveScriptModule(torch._C._freeze_module(mod._c, preserved_attrs))
+ RecursiveScriptModule._finalize_scriptmodule(out)
+
+ preserved_methods = [x for x in preserved_attrs if mod._c._has_method(x)]
+ run_frozen_optimizations(out, optimize_numerics, preserved_methods)
+
+ return out
+
+
+def run_frozen_optimizations(
+ mod, optimize_numerics: bool = True, preserved_methods: Optional[List[str]] = None
+):
+ r"""
+ Run a series of optimizations looking for patterns that occur in frozen graphs.
+
+ The current set of optimizations includes:
+ - Dropout Removal
+ - Pretranspose Linear Layers
+ - Concat Linear Layers with same input Tensor
+ - Conv -> Batchnorm folding
+ - Conv -> Add/Sub folding
+ - Conv -> Mul/Div folding
+
+ Args:
+ mod (:class:`ScriptModule`): a frozen module to be optimized
+
+ optimize_numerics (bool): If ``True``, a set of optimization passes will be run that does not strictly
+ preserve numerics. These optimizations preserve default rtol and atol of `torch.testing.assert_close`
+ when applied on a single transformation, however in a module where many transformations are applied
+ the rtol or atol may no longer fall within the default `assert_close` tolerance. Conv -> Batchnorm folding,
+ Conv-Add/Sub, and Conv -> Mul/Div folding all may alter numerics.
+
+ Returns:
+ None
+
+ Note:
+ In rare occassions, this can result in slower execution.
+
+ Example (Freezing a module with Conv->Batchnorm)
+ .. code-block:: python
+ import torch
+ in_channels, out_channels = 3, 32
+ conv = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=True)
+ bn = torch.nn.BatchNorm2d(out_channels, eps=.001)
+ mod = torch.nn.Sequential(conv, bn)
+ # set optimize to False here, by default freezing runs run_frozen_optimizations
+ frozen_mod = torch.jit.freeze(torch.jit.script(mod.eval()), optimize=False)
+ # inspect frozen mod
+ assert "batch_norm" in str(frozen_mod.graph)
+ torch.jit.run_frozen_optimizations(frozen_mod)
+ assert "batch_norm" not in str(frozen_mod.graph)
+
+ """
+ if mod._c._has_method("forward"):
+ torch._C._jit_pass_optimize_frozen_graph(mod.graph, optimize_numerics)
+
+ if preserved_methods is None:
+ preserved_methods = []
+
+ for method in preserved_methods:
+ torch._C._jit_pass_optimize_frozen_graph(
+ mod.__getattr__(method).graph, optimize_numerics
+ )
+
+
+def optimize_for_inference(
+ mod: ScriptModule, other_methods: Optional[List[str]] = None
+) -> ScriptModule:
+ """
+ Perform a set of optimization passes to optimize a model for the purposes of inference.
+
+ If the model is not already frozen, optimize_for_inference
+ will invoke `torch.jit.freeze` automatically.
+
+ In addition to generic optimizations that should speed up your model regardless
+ of environment, prepare for inference will also bake in build specific settings
+ such as the presence of CUDNN or MKLDNN, and may in the future make transformations
+ which speed things up on one machine but slow things down on another. Accordingly,
+ serialization is not implemented following invoking `optimize_for_inference` and
+ is not guaranteed.
+
+ This is still in prototype, and may have the potential to slow down your model.
+ Primary use cases that have been targeted so far have been vision models on cpu
+ and gpu to a lesser extent.
+
+ Example (optimizing a module with Conv->Batchnorm)::
+
+ import torch
+ in_channels, out_channels = 3, 32
+ conv = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=True)
+ bn = torch.nn.BatchNorm2d(out_channels, eps=.001)
+ mod = torch.nn.Sequential(conv, bn)
+ frozen_mod = torch.jit.optimize_for_inference(torch.jit.script(mod.eval()))
+ assert "batch_norm" not in str(frozen_mod.graph)
+ # if built with MKLDNN, convolution will be run with MKLDNN weights
+ assert "MKLDNN" in frozen_mod.graph
+ """
+ if not isinstance(mod, ScriptModule):
+ raise RuntimeError(
+ "optimize_for_inference expects a ScriptModule as input. "
+ "Please use torch.jit.script or torch.jit.trace to script your 'nn.Module'."
+ )
+
+ if other_methods is None:
+ other_methods = []
+
+ if hasattr(mod, "training"):
+ mod = freeze(mod.eval(), preserved_attrs=other_methods)
+
+ torch._C._jit_pass_optimize_for_inference(mod._c, other_methods)
+
+ return mod
diff --git a/venv/lib/python3.10/site-packages/torch/jit/_fuser.py b/venv/lib/python3.10/site-packages/torch/jit/_fuser.py
new file mode 100644
index 0000000000000000000000000000000000000000..0ca9cb6860106976d3204dd8ee857c2aa02baa4a
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/torch/jit/_fuser.py
@@ -0,0 +1,160 @@
+import contextlib
+from typing import List, Tuple
+
+import torch
+
+
+@contextlib.contextmanager
+def optimized_execution(should_optimize):
+ """Context manager that controls whether the JIT's executor will run optimizations before executing a function."""
+ stored_flag = torch._C._get_graph_executor_optimize()
+ torch._C._set_graph_executor_optimize(should_optimize)
+ try:
+ yield
+ finally:
+ torch._C._set_graph_executor_optimize(stored_flag)
+
+
+@contextlib.contextmanager
+def fuser(name):
+ """Context manager that facilitates switching between backend fusers.
+
+ Valid names:
+ * ``fuser0`` - enables only legacy fuser
+ * ``fuser1`` - enables only NNC
+ * ``fuser2`` - enables only nvFuser
+ * ``fuser3`` - enables oneDNN Graph
+ """
+ old_cpu_fuse = torch._C._jit_can_fuse_on_cpu()
+ old_gpu_fuse = torch._C._jit_can_fuse_on_gpu()
+ old_texpr_fuser_state = torch._C._jit_texpr_fuser_enabled()
+ old_nvfuser_state = torch._C._jit_nvfuser_enabled()
+ old_llga_state = torch._C._jit_llga_enabled()
+ if name == "fuser0": # legacy fuser
+ torch._C._jit_override_can_fuse_on_cpu(True)
+ torch._C._jit_override_can_fuse_on_gpu(True)
+ torch._C._jit_set_texpr_fuser_enabled(False)
+ torch._C._jit_set_nvfuser_enabled(False)
+ torch._C._jit_set_llga_enabled(False)
+ elif name == "fuser1": # NNC
+ old_profiling_executor = torch._C._jit_set_profiling_executor(True)
+ old_profiling_mode = torch._C._get_graph_executor_optimize(True)
+ torch._C._jit_override_can_fuse_on_cpu(True)
+ torch._C._jit_override_can_fuse_on_gpu(True)
+ torch._C._jit_set_texpr_fuser_enabled(True)
+ torch._C._jit_set_nvfuser_enabled(False)
+ torch._C._jit_set_llga_enabled(False)
+ elif name == "fuser2": # nvFuser
+ torch._C._jit_override_can_fuse_on_cpu(False)
+ torch._C._jit_override_can_fuse_on_gpu(False)
+ torch._C._jit_set_texpr_fuser_enabled(False)
+ torch._C._jit_set_nvfuser_enabled(True)
+ torch._C._jit_set_llga_enabled(False)
+ elif name == "fuser3": # oneDNN Graph
+ old_profiling_executor = torch._C._jit_set_profiling_executor(True)
+ old_profiling_mode = torch._C._get_graph_executor_optimize(True)
+ torch._C._jit_override_can_fuse_on_cpu(True)
+ torch._C._jit_override_can_fuse_on_gpu(False)
+ torch._C._jit_set_texpr_fuser_enabled(True)
+ torch._C._jit_set_nvfuser_enabled(False)
+ torch._C._jit_set_llga_enabled(True)
+ elif name == "none": # Turn Pytorch fuser off
+ torch._C._jit_override_can_fuse_on_cpu(False)
+ torch._C._jit_override_can_fuse_on_gpu(False)
+ torch._C._jit_set_texpr_fuser_enabled(False)
+ torch._C._jit_set_nvfuser_enabled(False)
+ torch._C._jit_set_llga_enabled(False)
+ else:
+ raise Exception(f"unrecognized fuser option (name: {name})")
+ try:
+ yield
+ finally:
+ if name in ["fuser1", "fuser3"]: # NNC or oneDNN Graph
+ torch._C._jit_set_profiling_executor(old_profiling_executor) # type: ignore[possibly-undefined]
+ torch._C._get_graph_executor_optimize(old_profiling_mode) # type: ignore[possibly-undefined]
+ # recover the previous values
+ torch._C._jit_override_can_fuse_on_cpu(old_cpu_fuse)
+ torch._C._jit_override_can_fuse_on_gpu(old_gpu_fuse)
+ torch._C._jit_set_texpr_fuser_enabled(old_texpr_fuser_state)
+ torch._C._jit_set_nvfuser_enabled(old_nvfuser_state)
+ torch._C._jit_set_llga_enabled(old_llga_state)
+
+
+last_executed_optimized_graph = torch._C._last_executed_optimized_graph
+
+
+def _get_differentiable_graph_node(node, diff_node):
+ if node.kind() == "prim::DifferentiableGraph":
+ diff_node.append(node)
+ else:
+ for block in node.blocks():
+ for n in block.nodes():
+ _get_differentiable_graph_node(n, diff_node)
+
+
+def _graph_for(self, *args, **kwargs):
+ return _script_method_graph_for(self, self, *args, **kwargs)
+
+
+def _script_method_graph_for(self, parent, *args, **kwargs):
+ try:
+ dbs = parent.get_debug_state()
+ eps = list(dbs.execution_plans.values())
+ assert len(eps) == 1
+ graph = eps[0].graph.copy()
+
+ # graph_executor_states for differentiable node
+ fw_states = eps[0].code.differentiable_op_executor_states()
+ diff_nodes: List[torch._C.Node] = []
+ for n in graph.nodes():
+ _get_differentiable_graph_node(n, diff_nodes)
+
+ assert len(fw_states) == len(diff_nodes)
+ # swap each differentiable graph with optimized graph in their execution plan
+ for n, state in zip(diff_nodes, fw_states):
+ fw_execution_plans = list(state.execution_plans.values())
+ # we can only update the subgraph when there's a unique execution
+ # plan. Avoid assert here so we would skip the ones that can't be
+ # updated while try the best effort to update other nodes.
+ if len(fw_execution_plans) == 1:
+ n.g_("Subgraph", fw_execution_plans[0].graph)
+
+ return graph
+ except Exception:
+ # fallback approach, we just ran the graph and return the recorded optimized
+ # graph
+ self(*args, **kwargs)
+ return last_executed_optimized_graph()
+
+
+def set_fusion_strategy(strategy: List[Tuple[str, int]]):
+ """Set the type and number of specializations that can occur during fusion.
+
+ Usage: provide a list of pairs (type, depth) where type is one of "STATIC" or "DYNAMIC"
+ and depth is an integer.
+
+ Behavior - static vs dynamic:
+ In STATIC fusion, fused ops are compiled to have fixed input shapes. The shape is determined
+ based on some initial profiling runs.
+ In DYNAMIC fusion, fused ops are compiled to have variable input shapes, so that multiple
+ shapes are possible.
+
+ In both cases, we also recompile on new striding behavior, device, or dtype.
+
+ Behavior - fallback functions & depth:
+ When an input doesn't match the format required by the specialized compiled op, it will run
+ a fallback function. Fallback functions are recursively be compiled and specialized based
+ on the observed tensor shapes. Since compilation can be slow, the "depth" parameter is provided to
+ limit the number of specializations that can be compiled, before giving up on recompiling and
+ falling back to a completely un-fused, un-specialized implementation.
+
+ The list of (type, depth) pairs controls the type of specializations and the number of
+ specializations. For example: [("STATIC", 2), ("DYNAMIC", 2)] indicates that the first
+ two specializations will use static fusions, the following two specializations will use
+ dynamic fusion, and any inputs that satisfy none of the 4 options will run an
+ unfused implementation.
+
+ NB: in the future, if more as more fusion backends are added there may be more granular
+ apis for specific fusers.
+ """
+ return torch._C._jit_set_fusion_strategy(strategy)
diff --git a/venv/lib/python3.10/site-packages/torch/jit/_ir_utils.py b/venv/lib/python3.10/site-packages/torch/jit/_ir_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..028247f54011c3d48d1cc24a6b73c2324ccbecf5
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/torch/jit/_ir_utils.py
@@ -0,0 +1,25 @@
+from typing import Union
+
+import torch
+
+
+class _InsertPoint:
+ def __init__(
+ self,
+ insert_point_graph: torch._C.Graph,
+ insert_point: Union[torch._C.Node, torch._C.Block],
+ ):
+ self.insert_point = insert_point
+ self.g = insert_point_graph
+ self.guard = None
+
+ def __enter__(self):
+ self.prev_insert_point = self.g.insertPoint()
+ self.g.setInsertPoint(self.insert_point)
+
+ def __exit__(self, *args):
+ self.g.setInsertPoint(self.prev_insert_point)
+
+
+def insert_point_guard(self, insert_point: Union[torch._C.Node, torch._C.Block]):
+ return _InsertPoint(self, insert_point)
diff --git a/venv/lib/python3.10/site-packages/torch/jit/_logging.py b/venv/lib/python3.10/site-packages/torch/jit/_logging.py
new file mode 100644
index 0000000000000000000000000000000000000000..497c34293d95af01ea59a6afac438ffd082c8164
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/torch/jit/_logging.py
@@ -0,0 +1,10 @@
+import torch
+
+add_stat_value = torch.ops.prim.AddStatValue
+
+set_logger = torch._C._logging_set_logger
+LockingLogger = torch._C.LockingLogger
+AggregationType = torch._C.AggregationType
+NoopLogger = torch._C.NoopLogger
+
+time_point = torch.ops.prim.TimePoint
diff --git a/venv/lib/python3.10/site-packages/torch/jit/_monkeytype_config.py b/venv/lib/python3.10/site-packages/torch/jit/_monkeytype_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..3b19e8438d4ea51afbd54d056754eefa3b13fcd4
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/torch/jit/_monkeytype_config.py
@@ -0,0 +1,192 @@
+import inspect
+import pathlib
+import sys
+import typing
+from collections import defaultdict
+from types import CodeType
+from typing import Dict, Iterable, List, Optional
+
+import torch
+
+_IS_MONKEYTYPE_INSTALLED = True
+try:
+ import monkeytype # type: ignore[import]
+ from monkeytype import trace as monkeytype_trace
+ from monkeytype.config import _startswith, LIB_PATHS # type: ignore[import]
+ from monkeytype.db.base import ( # type: ignore[import]
+ CallTraceStore,
+ CallTraceStoreLogger,
+ CallTraceThunk,
+ )
+ from monkeytype.tracing import CallTrace, CodeFilter # type: ignore[import]
+except ImportError:
+ _IS_MONKEYTYPE_INSTALLED = False
+
+
+# Checks whether a class is defind in `torch.*` modules
+def is_torch_native_class(cls):
+ if not hasattr(cls, "__module__"):
+ return False
+
+ parent_modules = cls.__module__.split(".")
+ if not parent_modules:
+ return False
+
+ root_module = sys.modules.get(parent_modules[0])
+ return root_module is torch
+
+
+def get_type(type):
+ """Convert the given type to a torchScript acceptable format."""
+ if isinstance(type, str):
+ return type
+ elif inspect.getmodule(type) == typing:
+ # If the type is a type imported from typing
+ # like Tuple, List, Dict then replace `typing.`
+ # with a null string. This needs to be done since
+ # typing.List is not accepted by TorchScript.
+ type_to_string = str(type)
+ return type_to_string.replace(type.__module__ + ".", "")
+ elif is_torch_native_class(type):
+ # If the type is a subtype of torch module, then TorchScript expects a fully qualified name
+ # for the type which is obtained by combining the module name and type name.
+ return type.__module__ + "." + type.__name__
+ else:
+ # For all other types use the name for the type.
+ return type.__name__
+
+
+def get_optional_of_element_type(types):
+ """Extract element type, return as `Optional[element type]` from consolidated types.
+
+ Helper function to extracts the type of the element to be annotated to Optional
+ from the list of consolidated types and returns `Optional[element type]`.
+ TODO: To remove this check once Union support lands.
+ """
+ elem_type = types[1] if type(None) == types[0] else types[0]
+ elem_type = get_type(elem_type)
+
+ # Optional type is internally converted to Union[type, NoneType], which
+ # is not supported yet in TorchScript. Hence, representing the optional type as string.
+ return "Optional[" + elem_type + "]"
+
+
+def get_qualified_name(func):
+ return func.__qualname__
+
+
+if _IS_MONKEYTYPE_INSTALLED:
+
+ class JitTypeTraceStoreLogger(CallTraceStoreLogger):
+ """A JitTypeCallTraceLogger that stores logged traces in a CallTraceStore."""
+
+ def __init__(self, store: CallTraceStore):
+ super().__init__(store)
+
+ def log(self, trace: CallTrace) -> None:
+ self.traces.append(trace)
+
+ class JitTypeTraceStore(CallTraceStore):
+ def __init__(self):
+ super().__init__()
+ # A dictionary keeping all collected CallTrace
+ # key is fully qualified name of called function
+ # value is list of all CallTrace
+ self.trace_records: Dict[str, list] = defaultdict(list)
+
+ def add(self, traces: Iterable[CallTrace]):
+ for t in traces:
+ qualified_name = get_qualified_name(t.func)
+ self.trace_records[qualified_name].append(t)
+
+ def filter(
+ self,
+ qualified_name: str,
+ qualname_prefix: Optional[str] = None,
+ limit: int = 2000,
+ ) -> List[CallTraceThunk]:
+ return self.trace_records[qualified_name]
+
+ def analyze(self, qualified_name: str) -> Dict:
+ # Analyze the types for the given module
+ # and create a dictionary of all the types
+ # for arguments.
+ records = self.trace_records[qualified_name]
+ all_args = defaultdict(set)
+ for record in records:
+ for arg, arg_type in record.arg_types.items():
+ all_args[arg].add(arg_type)
+ return all_args
+
+ def consolidate_types(self, qualified_name: str) -> Dict:
+ all_args = self.analyze(qualified_name)
+ # If there are more types for an argument,
+ # then consolidate the type to `Any` and replace the entry
+ # by type `Any`.
+ for arg, types in all_args.items():
+ types = list(types)
+ type_length = len(types)
+ if type_length == 2 and type(None) in types:
+ # TODO: To remove this check once Union suppport in TorchScript lands.
+ all_args[arg] = get_optional_of_element_type(types)
+ elif type_length > 1:
+ all_args[arg] = "Any"
+ elif type_length == 1:
+ all_args[arg] = get_type(types[0])
+ return all_args
+
+ def get_args_types(self, qualified_name: str) -> Dict:
+ return self.consolidate_types(qualified_name)
+
+ class JitTypeTraceConfig(monkeytype.config.Config):
+ def __init__(self, s: JitTypeTraceStore):
+ super().__init__()
+ self.s = s
+
+ def trace_logger(self) -> JitTypeTraceStoreLogger:
+ """Return a JitCallTraceStoreLogger that logs to the configured trace store."""
+ return JitTypeTraceStoreLogger(self.trace_store())
+
+ def trace_store(self) -> CallTraceStore:
+ return self.s
+
+ def code_filter(self) -> Optional[CodeFilter]:
+ return jit_code_filter
+
+else:
+ # When MonkeyType is not installed, we provide dummy class definitions
+ # for the below classes.
+ class JitTypeTraceStoreLogger: # type: ignore[no-redef]
+ def __init__(self):
+ pass
+
+ class JitTypeTraceStore: # type: ignore[no-redef]
+ def __init__(self):
+ self.trace_records = None
+
+ class JitTypeTraceConfig: # type: ignore[no-redef]
+ def __init__(self):
+ pass
+
+ monkeytype_trace = None # type: ignore[assignment] # noqa: F811
+
+
+def jit_code_filter(code: CodeType) -> bool:
+ """Codefilter for Torchscript to trace forward calls.
+
+ The custom CodeFilter is required while scripting a FX Traced forward calls.
+ FX Traced forward calls have `code.co_filename` start with '<' which is used
+ to exclude tracing of stdlib and site-packages in the default code filter.
+ Since we need all forward calls to be traced, this custom code filter
+ checks for code.co_name to be 'forward' and enables tracing for all such calls.
+ The code filter is similar to default code filter for monkeytype and
+ excludes tracing of stdlib and site-packages.
+ """
+ # Filter code without a source file and exclude this check for 'forward' calls.
+ if code.co_name != "forward" and (
+ not code.co_filename or code.co_filename[0] == "<"
+ ):
+ return False
+
+ filename = pathlib.Path(code.co_filename).resolve()
+ return not any(_startswith(filename, lib_path) for lib_path in LIB_PATHS)
diff --git a/venv/lib/python3.10/site-packages/torch/jit/_passes/__init__.py b/venv/lib/python3.10/site-packages/torch/jit/_passes/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/venv/lib/python3.10/site-packages/torch/jit/_passes/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/jit/_passes/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c5d80b6794fbe38759098f4b6a3df9e8663d9c0a
Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/jit/_passes/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/torch/jit/_passes/__pycache__/_property_propagation.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/jit/_passes/__pycache__/_property_propagation.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..882fc2d1eb2abb0679cd212721c77790d8de2777
Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/jit/_passes/__pycache__/_property_propagation.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/torch/jit/_passes/_property_propagation.py b/venv/lib/python3.10/site-packages/torch/jit/_passes/_property_propagation.py
new file mode 100644
index 0000000000000000000000000000000000000000..8ebd21e4bc107a9c7e999f36eef6bdde4bc6ec75
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/torch/jit/_passes/_property_propagation.py
@@ -0,0 +1,46 @@
+"""
+Tools to help with tensor property propagation.
+
+This is not intended to be imported directly; please use the exposed
+functionalities in `torch.jit`.
+"""
+
+from typing import Any, List
+
+import torch
+from torch import TensorType
+from torch._C import Graph
+
+
+def apply_input_props_using_example(graph: Graph, example_input: List[Any]):
+ """
+ Applies properties for each tensor in the graph inputs
+ using the example supplied.
+ """
+ graph_inputs = list(graph.inputs())
+ if len(graph_inputs) == 0:
+ return
+
+ # Strip self args off for methods
+ in_0 = graph_inputs[0]
+ if isinstance(in_0.type(), torch._C.ClassType) and in_0.debugName() == "self":
+ graph_inputs = graph_inputs[1:]
+
+ if not len(graph_inputs) == len(example_input):
+ raise RuntimeError(
+ "Number of inputs in graph does not match number of inputs in the example"
+ )
+
+ for i, (graph_i, example_i) in enumerate(zip(graph_inputs, example_input)):
+ if example_i is None:
+ continue # Skip the type check
+
+ if isinstance(example_i, torch.Tensor) != isinstance(
+ graph_i.type(), TensorType
+ ):
+ raise RuntimeError(
+ f"Input {i} does not match type of example", graph_i, example_i
+ )
+
+ if isinstance(example_i, torch.Tensor):
+ graph_i.setType(TensorType.create_from_tensor(example_i)) # type: ignore[arg-type]
diff --git a/venv/lib/python3.10/site-packages/torch/jit/_pickle.py b/venv/lib/python3.10/site-packages/torch/jit/_pickle.py
new file mode 100644
index 0000000000000000000000000000000000000000..1cb4a0a93efde1e03673ebb45a919df1f417113e
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/torch/jit/_pickle.py
@@ -0,0 +1,37 @@
+# These functions are referenced from the pickle archives produced by
+# ScriptModule.save()
+
+
+# These (`build_*`) functions used to be used by `pickler.cpp` to specify
+# the type of the list for certain special types, but now all lists get
+# a type attached and restored via `restore_type_tag` below. The legacy
+# functions should stick around for backwards-compatibility.
+
+
+def build_intlist(data):
+ return data
+
+
+def build_tensorlist(data):
+ return data
+
+
+def build_doublelist(data):
+ return data
+
+
+def build_boollist(data):
+ return data
+
+
+def build_tensor_from_id(data):
+ if isinstance(data, int):
+ # just the id, can't really do anything
+ return data
+
+
+def restore_type_tag(value, type_str):
+ # The type_ptr is used by the jit unpickler to restore the full static type
+ # to container types like list when they are re-loaded, but this doesn't
+ # matter for Python, so just return the plain value
+ return value
diff --git a/venv/lib/python3.10/site-packages/torch/jit/_recursive.py b/venv/lib/python3.10/site-packages/torch/jit/_recursive.py
new file mode 100644
index 0000000000000000000000000000000000000000..7795dd7e518279fb6f7fa5d396a2bdb6bd1d2ee5
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/torch/jit/_recursive.py
@@ -0,0 +1,1076 @@
+import collections
+import functools
+import inspect
+import sys
+import textwrap
+import types
+import warnings
+from typing import Dict, List, Set, Type
+
+import torch
+
+import torch._jit_internal as _jit_internal
+from torch._sources import fake_range
+from torch.jit._builtins import _find_builtin
+from torch.jit._check import AttributeTypeIsSupportedChecker
+from torch.jit._state import _add_script_class, _get_script_class, _python_cu
+from torch.jit.frontend import (
+ get_class_properties,
+ get_default_args,
+ get_jit_class_def,
+ get_jit_def,
+)
+from torch.nn import Module
+
+
+ScriptMethodStub = collections.namedtuple(
+ "ScriptMethodStub", ("resolution_callback", "def_", "original_method")
+)
+PropertyStub = collections.namedtuple("PropertyStub", ("resolution_callback", "def_"))
+
+
+# TODO: there should be a more principled way of doing this.
+ignored_attributes = [
+ "_version",
+ "_parameters",
+ "_buffers",
+ "_non_persistent_buffers_set",
+ "_backward_hooks",
+ "_backward_pre_hooks",
+ "_forward_hooks",
+ "_forward_hooks_with_kwargs",
+ "_forward_pre_hooks",
+ "_forward_pre_hooks_with_kwargs",
+ "_forward_hooks_always_called",
+ "_state_dict_hooks",
+ "_state_dict_pre_hooks",
+ "_load_state_dict_pre_hooks",
+ "_load_state_dict_post_hooks",
+ "_modules",
+ "_initializing",
+ "dump_patches",
+]
+
+
+def _compile_and_register_class(obj, rcb, qualified_name):
+ script_class = _get_script_class(obj)
+
+ if not script_class:
+ ast = get_jit_class_def(obj, obj.__name__)
+ defaults = torch.jit.frontend.get_default_args_for_class(obj)
+ script_class = torch._C._jit_script_class_compile(
+ qualified_name, ast, defaults, rcb
+ )
+ _add_script_class(obj, script_class)
+
+ return script_class
+
+
+def make_stub(func, name):
+ rcb = _jit_internal.createResolutionCallbackFromClosure(func)
+ ast = get_jit_def(func, name, self_name="RecursiveScriptModule")
+ return ScriptMethodStub(rcb, ast, func)
+
+
+def make_stub_from_method(nn_module, method_name):
+ func = getattr(nn_module, method_name)
+ if isinstance(func, ScriptMethodStub):
+ return func
+ # Make sure the name present in the resulting AST will match the name
+ # requested here. The only time they don't match is if you do something
+ # like:
+ # def _forward(self):
+ # pass
+ # forward = _forward
+ # In this case, the actual function object will have the name `_forward`,
+ # even though we requested a stub for `forward`.
+ return make_stub(func, method_name)
+
+
+def make_stubs_from_exported_methods(mod):
+ stubs = []
+ for name in dir(mod):
+ item = getattr(mod, name, None)
+ if (
+ _jit_internal.get_torchscript_modifier(item)
+ is _jit_internal.FunctionModifiers.EXPORT
+ ):
+ stubs.append(make_stub_from_method(mod, name))
+
+ return stubs
+
+
+def jit_ignored_properties(module):
+ user_annotated_ignored_attributes = getattr(
+ module, "__jit_ignored_attributes__", list()
+ )
+
+ def get_properties_names(module):
+ return {k for k, v in vars(module).items() if isinstance(v, property)}
+
+ properties = get_properties_names(type(module))
+ user_annoted_ignored_properties = set()
+
+ for ignored_attr in user_annotated_ignored_attributes:
+ if ignored_attr in properties:
+ user_annoted_ignored_properties.add(ignored_attr)
+ return user_annoted_ignored_properties
+
+
+# base types that can be constants
+# in addition, tuples and lists of these base types are also considered constants
+# If you edit this list, then you also need to edit the handlers in
+# ConstantValue in jit/script/init.cpp
+_constant_types = (
+ bool,
+ float,
+ int,
+ str,
+ type(None),
+ torch.device,
+ torch.layout,
+ torch.dtype,
+)
+
+
+def _get_valid_constant(attr, v, owner_type):
+ if isinstance(v, _constant_types):
+ return v
+ elif isinstance(v, (tuple, list)):
+ return tuple(_get_valid_constant(attr, x, owner_type) for x in v)
+ constants = ", ".join(torch.typename(typ) for typ in _constant_types)
+ raise TypeError(
+ textwrap.dedent(
+ f"""
+ '{torch.typename(type(v))}' object in attribute '{owner_type}.{attr}' is not a valid constant.
+ Valid constants are:
+ 1. a nn.ModuleList
+ 2. a value of type {{{constants}}}
+ 3. a list or tuple of (2)
+ """
+ )
+ )
+
+
+class SourceContext(torch._C._jit_tree_views.SourceRangeFactory):
+ def __init__(self, source, filename, file_lineno, leading_whitespace_len):
+ super().__init__(source, filename, file_lineno, leading_whitespace_len)
+
+
+def get_annotations(obj):
+ if sys.version_info < (3, 10):
+ return getattr(obj, "__annotations__", {})
+ # In Python-3.10+ it is recommended to use inspect.get_annotations
+ # See https://docs.python.org/3.10/howto/annotations.html
+ # But also, in 3.10 annotations from base class are not inherited
+ # by unannotated derived one, so they must be manually extracted
+ annotations = inspect.get_annotations(obj)
+ if annotations:
+ return annotations
+
+ def get_cls_annotations(cls):
+ cls_annotations = inspect.get_annotations(cls)
+ if cls_annotations:
+ return cls_annotations
+ for base in cls.__bases__:
+ cls_annotations = get_cls_annotations(base)
+ if cls_annotations:
+ return cls_annotations
+ return {}
+
+ cls = obj if isinstance(obj, type) else type(obj)
+ return get_cls_annotations(cls)
+
+
+def infer_concrete_type_builder(nn_module, share_types=True):
+ """
+ Build a ConcreteModuleTypeBuilder from an nn.Module.
+
+ This ConcreteModuleType doesn't have a JIT type associated with it yet, it
+ must be filled in by the caller.
+ """
+ concrete_type_builder = torch._C.ConcreteModuleTypeBuilder(type(nn_module))
+ if isinstance(nn_module, (torch.nn.ModuleDict)):
+ concrete_type_builder.set_module_dict()
+ if isinstance(nn_module, (torch.nn.ModuleList, torch.nn.Sequential)):
+ concrete_type_builder.set_module_list()
+ if isinstance(nn_module, (torch.nn.ParameterList)):
+ concrete_type_builder.set_parameter_list()
+ if isinstance(nn_module, (torch.nn.ParameterDict)):
+ concrete_type_builder.set_parameter_dict()
+
+ class_annotations = get_annotations(nn_module)
+ if isinstance(nn_module, (torch.ao.quantization.QuantWrapper)):
+ class_annotations = {}
+
+ # Get user-annotated ignored attributes.
+ user_annotated_ignored_attributes = getattr(
+ nn_module, "__jit_ignored_attributes__", list()
+ )
+ concrete_type_builder.add_ignored_attributes(user_annotated_ignored_attributes)
+ ignored_properties = jit_ignored_properties(nn_module)
+
+ # try to infer the type from type annotation or from the object itself
+ def infer_type(name, item):
+ # The forward function from Module is special; never use this annotations; we
+ # need to infer type directly using JIT. I originally wanted to write
+ # this test as isinstance(class_annotations[name], Callable) but
+ # isinstance on typing things doesn't seem to work: isinstance(list, Callable)
+ # is also true!
+ inferred = False
+ try:
+ if (
+ name in class_annotations
+ and class_annotations[name]
+ != torch.nn.Module.__annotations__["forward"]
+ ):
+ ann_to_type = torch.jit.annotations.ann_to_type(
+ class_annotations[name], fake_range()
+ )
+ attr_type = torch._C.InferredType(ann_to_type)
+ elif isinstance(item, torch.jit.Attribute):
+ ann_to_type = torch.jit.annotations.ann_to_type(item.type, fake_range())
+ attr_type = torch._C.InferredType(ann_to_type)
+ else:
+ attr_type = torch._C._jit_try_infer_type(item)
+ inferred = True
+ except RuntimeError as re:
+ raise RuntimeError(f"Error inferring type for {name}: {item}: {re}") from re
+
+ return attr_type, inferred
+
+ added_names = set()
+
+ for name, item in nn_module._parameters.items():
+ if name in user_annotated_ignored_attributes:
+ continue
+
+ assert item is None or isinstance(item, torch.Tensor)
+ attr_type, _ = infer_type(name, item)
+ # We currently have the invariant in various places in our code
+ # that parameters must be Tensors. However, the nn.Module API also
+ # allows NoneType parameters. These parameters are not returned as
+ # part of `parameters()` and its variants, but are available
+ # through direct attribute access.
+ concrete_type_builder.add_attribute(name, attr_type.type(), True, False)
+ added_names.add(name)
+
+ for name, item in nn_module._buffers.items():
+ if name in user_annotated_ignored_attributes:
+ continue
+
+ assert item is None or isinstance(item, torch.Tensor)
+ attr_type, _ = infer_type(name, item)
+ concrete_type_builder.add_attribute(name, attr_type.type(), False, True)
+ added_names.add(name)
+
+ for name, item in nn_module._modules.items():
+ if name in user_annotated_ignored_attributes:
+ continue
+
+ attr_type, _ = infer_type(name, item)
+ if item is None:
+ # Modules can be None. We don't have direct support for optional
+ # Modules, so the register it as an NoneType attribute instead.
+ concrete_type_builder.add_attribute(name, attr_type.type(), False, False)
+ continue
+ if attr_type.success():
+ assert attr_type.type().is_interface_type()
+ # if the type can be inferred, it should be a module interface type
+ sub_concrete_type = torch._C.ConcreteModuleType.from_jit_type(
+ attr_type.type()
+ )
+ else:
+ # otherwise we get the concrete module type for item and add it to concrete_type
+ sub_concrete_type = get_module_concrete_type(item, share_types)
+ concrete_type_builder.add_module(name, sub_concrete_type)
+
+ added_names.add(name)
+
+ # populate constants_set
+ constants_set = set(getattr(nn_module, "__constants__", ()))
+
+ # Constants annotated via `Final[T]` rather than being added to `__constants__`
+ for name, ann in class_annotations.items():
+ if torch._jit_internal.is_final(ann):
+ constants_set.add(name)
+
+ for name in constants_set:
+ if name in added_names:
+ # TODO: We should really error in this case, but its bc-breaking so
+ # we need to warn for at least one release
+ if name in nn_module._modules:
+ hint = "submodule"
+ elif name in nn_module._buffers:
+ hint = "buffer"
+ elif name in nn_module._parameters:
+ hint = "parameter"
+ else:
+ raise AssertionError(
+ "added_names must be submodule, parameter, or buffer"
+ )
+
+ warnings.warn(
+ f"'{name}' was found in ScriptModule constants, "
+ f" but it is a non-constant {hint}. Consider removing it."
+ )
+ continue
+ if not hasattr(nn_module, name):
+ # TODO: We should really error in this case, but its bc-breaking so
+ # we need to warn for at least one release
+ warnings.warn(
+ f"'{name}' was found in ScriptModule constants, "
+ "but was not actually set in __init__. "
+ "Consider removing it."
+ )
+ continue
+ value = getattr(nn_module, name)
+ concrete_type_builder.add_constant(
+ name, _get_valid_constant(name, value, type(nn_module).__name__)
+ )
+ added_names.add(name)
+
+ # populate overloads
+ overloads = getattr(nn_module, "__overloads__", {})
+ # update with any annotated overloads
+ overloads.update(
+ get_overload_name_mapping(
+ get_overload_annotations(nn_module, ignored_properties)
+ )
+ )
+ for name, overloaded_names in overloads.items():
+ concrete_type_builder.add_overload(name, overloaded_names)
+
+ for name, value in nn_module.__dict__.items():
+ if name in ignored_attributes or name.startswith("__"):
+ # Python objects have lots of random attributes attached to them;
+ # PyTorch adds a few more. Prevent these from getting compiled.
+ continue
+
+ if name in user_annotated_ignored_attributes:
+ continue
+
+ if name in added_names:
+ # Don't re-add anything we already added
+ continue
+
+ isoverloadpacket = isinstance(value, torch._ops.OpOverloadPacket)
+ if isoverloadpacket:
+ value = value.op
+ # Handle Python function attributes
+ if inspect.isfunction(value):
+ try:
+ scripted_fn = torch.jit.script(value)
+ concrete_type_builder.add_function_attribute(
+ name, torch._C._jit_try_infer_type(scripted_fn).type(), value
+ )
+ except Exception as e:
+ # If we fail to script the function, it isn't a hard error.
+ # Instead, we will add it to the list of attributes we failed
+ # to convert, with the compilation error.
+ hint = (
+ "(This function exists as an attribute on the Python module, "
+ "but we failed to compile it to a TorchScript function. "
+ f"\nThe error stack is reproduced here:\n{e}"
+ )
+ concrete_type_builder.add_failed_attribute(name, hint)
+ pass
+
+ continue
+
+ # Handle calls to builtin functions (either bespoke builtins from torch.jit._builtins or
+ # a call to an aten function like torch.add)
+ builtin_symbol_name = _find_builtin(value)
+ if builtin_symbol_name:
+ concrete_type_builder.add_builtin_function(name, builtin_symbol_name)
+ continue
+
+ # Handle Script function attributes
+ if isinstance(value, torch.jit.ScriptFunction):
+ concrete_type_builder.add_function_attribute(
+ name, torch._C._jit_try_infer_type(value).type(), value
+ )
+ continue
+
+ # If we got here, this is a regular "data" attribute, add it to the concrete type
+ attr_type, inferred = infer_type(name, value)
+ if attr_type.success():
+ concrete_type_builder.add_attribute(name, attr_type.type(), False, False)
+ else:
+ # TODO: could add more detail here. For example, what the user should do
+ # when the pytype is `list` or `NoneType`
+ inferred_msg = (
+ "Its type was inferred; try adding a type annotation for the attribute."
+ if inferred
+ else ""
+ )
+ additional_info = f"{attr_type.reason()}. {inferred_msg}"
+ hint = (
+ "(This attribute exists on the Python module, "
+ f"but we failed to convert Python type: '{torch.typename(type(value))}' "
+ f"to a TorchScript type. {additional_info})"
+ )
+ concrete_type_builder.add_failed_attribute(name, hint)
+
+ # add hooks to concrete type
+ for hook in nn_module._forward_hooks.values():
+ concrete_type_builder.add_forward_hook(hook)
+ for pre_hook in nn_module._forward_pre_hooks.values():
+ concrete_type_builder.add_forward_pre_hook(pre_hook)
+
+ return concrete_type_builder
+
+
+class ConcreteTypeStore:
+ type_store: Dict[Type[Module], List[torch._C.ConcreteModuleType]]
+ methods_compiled: Set[torch._C.ConcreteModuleType]
+
+ def __init__(self):
+ # Python module type => List[ConcreteModuleType)]
+ self.type_store = {}
+ # ConcreteTypes that have had their methods already compiled
+ self.methods_compiled = set()
+
+ def get_or_create_concrete_type(self, nn_module):
+ """Infer a ConcreteType from this `nn.Module` instance. Underlying JIT types are re-used if possible."""
+ concrete_type_builder = infer_concrete_type_builder(nn_module)
+
+ nn_module_type = type(nn_module)
+ if nn_module_type not in self.type_store:
+ self.type_store[nn_module_type] = []
+
+ # Search the type store for an already-available JIT type
+ known_types = self.type_store[nn_module_type]
+ for known_type in known_types:
+ if known_type.equals(concrete_type_builder):
+ return known_type
+
+ # We didn't find anything; generate a new JIT type from this concrete type
+ concrete_type = concrete_type_builder.build()
+ self.type_store[nn_module_type].append(concrete_type)
+ return concrete_type
+
+
+concrete_type_store = ConcreteTypeStore()
+
+
+def create_methods_and_properties_from_stubs(
+ concrete_type, method_stubs, property_stubs
+):
+ method_defs = [m.def_ for m in method_stubs]
+ method_rcbs = [m.resolution_callback for m in method_stubs]
+ method_defaults = [get_default_args(m.original_method) for m in method_stubs]
+
+ property_defs = [p.def_ for p in property_stubs]
+ property_rcbs = [p.resolution_callback for p in property_stubs]
+
+ concrete_type._create_methods_and_properties(
+ property_defs, property_rcbs, method_defs, method_rcbs, method_defaults
+ )
+
+
+def create_hooks_from_stubs(concrete_type, hook_stubs, pre_hook_stubs):
+ hook_defs = [h.def_ for h in hook_stubs]
+ hook_rcbs = [h.resolution_callback for h in hook_stubs]
+
+ pre_hook_defs = [h.def_ for h in pre_hook_stubs]
+ pre_hook_rcbs = [h.resolution_callback for h in pre_hook_stubs]
+
+ concrete_type._create_hooks(hook_defs, hook_rcbs, pre_hook_defs, pre_hook_rcbs)
+
+
+def get_module_concrete_type(nn_module, share_types=True):
+ """
+ Get a concrete type for nn_modules.
+
+ If share_types is True, the concrete type is fetched from concrete_type_store.
+ If it is False, a new concrete type is created without first searching concrete_type_store.
+
+ Args:
+ nn_module: The original Python nn.Module that we are creating a ScriptModule for.
+ share_types = Whether to share underlying JIT types between modules (if possible).
+
+ Returns:
+ A concrete type for nn_module.
+ """
+ assert isinstance(nn_module, Module)
+ if isinstance(nn_module, torch.jit.ScriptModule) and hasattr(
+ nn_module, "_concrete_type"
+ ):
+ return nn_module._concrete_type
+
+ if share_types:
+ # Look into the store of cached JIT types
+ concrete_type = concrete_type_store.get_or_create_concrete_type(nn_module)
+ else:
+ # Get a concrete type directly, without trying to re-use an existing JIT
+ # type from the type store.
+ concrete_type_builder = infer_concrete_type_builder(nn_module, share_types)
+ concrete_type_builder.set_poisoned()
+ concrete_type = concrete_type_builder.build()
+
+ return concrete_type
+
+
+def create_script_class(obj):
+ """
+ Create and return a RecursiveScriptClass instance from a Python object.
+
+ Arguments:
+ obj: A Python object.
+ """
+ qualified_class_name = _jit_internal._qualified_name(type(obj))
+ rcb = _jit_internal.createResolutionCallbackForClassMethods(type(obj))
+ # Script the type of obj if it hasn't already been scripted.
+ _compile_and_register_class(type(obj), rcb, qualified_class_name)
+ class_ty = _python_cu.get_class(qualified_class_name)
+ # Create an empty torch._C.ScriptObject with the scripted type.
+ cpp_object = torch._C._create_object_with_type(class_ty)
+ # Copy all of the attributes over to the torch._C.ScriptObject.
+ for name, value in obj.__dict__.items():
+ cpp_object.setattr(name, value)
+
+ # Wrap the torch._C.ScriptObject in a RecursiveScriptClass instance.
+ return wrap_cpp_class(cpp_object)
+
+
+def create_script_module(nn_module, stubs_fn, share_types=True, is_tracing=False):
+ """
+ Create a new ScriptModule from an nn.Module.
+
+ Args:
+ nn_module: The original Python nn.Module that we are creating a ScriptModule for.
+ stubs_fn: Lambda that takes an nn.Module and generates a list of ScriptMethodStubs to compile.
+ share_types: Whether to share underlying JIT types between modules (if possible).
+ NOTE: Only set to False this when we cannot guarantee type sharing will work
+ correctly. This only happens today for traced modules, where the same
+ module can produce different traced methods depending on the inputs.
+ is_tracing: Whether this function is called during tracing or scripting. If tracing,
+ we don't need to do AttributeTypeIsSupportedChecker because all the unsupported
+ attributes will be baked as constant in the tracing graph. In addition,
+ this check significantly slows down the traced modules when the module size is big.
+ """
+ assert not isinstance(nn_module, torch.jit.RecursiveScriptModule)
+ check_module_initialized(nn_module)
+ concrete_type = get_module_concrete_type(nn_module, share_types)
+ if not is_tracing:
+ AttributeTypeIsSupportedChecker().check(nn_module)
+ return create_script_module_impl(nn_module, concrete_type, stubs_fn)
+
+
+def create_script_module_impl(nn_module, concrete_type, stubs_fn):
+ """
+ Convert an nn.Module to a RecursiveScriptModule.
+
+ Args:
+ nn_module: The original Python nn.Module that we are creating a ScriptModule for.
+ concrete_type: The fully initialized ConcreteType of the module.
+ stubs_fn: Lambda that takes an nn.Module and generates a list of ScriptMethodStubs to compile.
+ """
+ cpp_module = torch._C._create_module_with_type(concrete_type.jit_type)
+ method_stubs = stubs_fn(nn_module)
+ property_stubs = get_property_stubs(nn_module)
+ hook_stubs, pre_hook_stubs = get_hook_stubs(nn_module)
+
+ user_annotated_ignored_attributes = getattr(
+ nn_module, "__jit_ignored_attributes__", list()
+ )
+ ignored_properties = jit_ignored_properties(nn_module)
+
+ def init_fn(script_module):
+ # Initialize the ScriptModule:
+ # 1. Copy the attributes/parameters/buffers from the original `nn_module` to the new ScriptModule.
+ for name in concrete_type.get_attributes().keys():
+ orig_value = getattr(nn_module, name)
+ orig_value = (
+ orig_value.value
+ if isinstance(orig_value, torch.jit.Attribute)
+ else orig_value
+ )
+ cpp_module.setattr(name, orig_value)
+
+ # 2. Copy the submodules from the original `nn_module` to the new ScriptModule,
+ # recursively scripting them.
+ for name, sub_concrete_type in concrete_type.get_modules():
+ orig_value = getattr(nn_module, name)
+ assert isinstance(
+ orig_value, Module
+ ), f"Expected Module but got {type(orig_value)}"
+ module_type = sub_concrete_type.jit_type
+ if isinstance(module_type, torch._C.InterfaceType):
+ # use the interface inference rule to compile the module
+ scripted = interface_script(module_type, orig_value)
+ elif isinstance(orig_value, torch.jit.ScriptModule):
+ scripted = orig_value
+ else:
+ # always reuse the provided stubs_fn to infer the methods to compile
+ scripted = create_script_module_impl(
+ orig_value, sub_concrete_type, stubs_fn
+ )
+
+ cpp_module.setattr(name, scripted)
+ script_module._modules[name] = scripted
+
+ # 3. Copy @ignored/@unused methods and attrs from the original `nn_module` to the new ScriptModule.
+ # This ensures we can access these Python methods on the ScriptModule.
+ for name in dir(nn_module):
+ if name in ignored_properties:
+ continue
+ item = getattr(nn_module, name, None)
+ if inspect.ismethod(item) and _jit_internal.is_ignored_fn(item):
+ unbound_function = getattr(nn_module, name).__func__
+ bound_method = unbound_function.__get__(script_module)
+ setattr(script_module, name, bound_method)
+ elif concrete_type.is_ignored_attribute(name):
+ setattr(script_module, name, item)
+
+ # For convenience, attach the concrete type to the new ScriptModule
+ script_module._concrete_type = concrete_type
+
+ # Actually create the ScriptModule, initializing it with the function we just defined
+ script_module = torch.jit.RecursiveScriptModule._construct(cpp_module, init_fn)
+
+ # Compile methods if necessary
+ if concrete_type not in concrete_type_store.methods_compiled:
+ create_methods_and_properties_from_stubs(
+ concrete_type, method_stubs, property_stubs
+ )
+ # Create hooks after methods to ensure no name collisions between hooks and methods.
+ # If done before, hooks can overshadow methods that aren't exported.
+ create_hooks_from_stubs(concrete_type, hook_stubs, pre_hook_stubs)
+ torch._C._run_emit_module_hook(cpp_module)
+ concrete_type_store.methods_compiled.add(concrete_type)
+
+ # Copy the forward hooks and pre-hooks to the new ScriptModule
+ # to allow the hooks to be run from eager as ScriptFunctions
+ for idx, fn in enumerate(script_module._c._get_forward_pre_hooks()):
+ script_module._forward_pre_hooks[idx] = fn
+ for idx, fn in enumerate(script_module._c._get_forward_hooks()):
+ script_module._forward_hooks[idx] = fn
+
+ # Special handling so methods like __len__ work in script methods on classes derived from containers
+ if (
+ isinstance(
+ nn_module, (torch.nn.ModuleList, torch.nn.Sequential, torch.nn.ModuleDict)
+ )
+ and "__len__" not in cpp_module._method_names()
+ ):
+ script_module.define(f"def __len__(self):\n return {len(nn_module)}\n")
+ if (
+ isinstance(nn_module, torch.nn.ModuleDict)
+ and "__contains__" not in cpp_module._method_names()
+ ):
+ if len(nn_module.keys()):
+ keys = repr(list(nn_module.keys()))
+ script_module.define(
+ f"def __contains__(self, key: str):\n return key in {keys}\n"
+ )
+ else:
+ script_module.define("def __contains__(self, key: str):\n return False\n")
+
+ # Make the compiled methods available to the Python ScriptModule class.
+ for method_stub in method_stubs:
+ if method_stub.original_method is None:
+ # define()'d methods don't have an Python original_method, so we
+ # don't need to do any Python re-wrapping stuff
+ continue
+
+ name = method_stub.original_method.__name__
+ if name != method_stub.def_.name().name:
+ # TODO: Why skip this? Because @torch.jit._overload_method will
+ # mangle the name of the function.
+ continue
+ script_method = cpp_module._get_method(name)
+
+ # Wrap the original to propagate docstrings and such.
+ # TODO: we don't currently do this functions that are recursively
+ # compiled, we should.
+ wrapped_script_method = functools.wraps(method_stub.original_method)(
+ script_method
+ )
+
+ # Add the methods to the script_module directly. This ensures they will
+ # be found first when `name` is looked up (as opposed to the stubs or
+ # nn.Module.forward)
+ script_module.__dict__[name] = wrapped_script_method
+
+ # Make module properties available on the Python ScriptModule class.
+ for property_stub in property_stubs:
+ property_name = property_stub.def_.name().name
+ fget = cpp_module._get_method(property_stub.def_.getter_name().name)
+ # Setter is optional, so it may not exist.
+ setter_name = property_stub.def_.setter_name()
+ fset = cpp_module._get_method(setter_name.name) if setter_name else None
+ script_module.__dict__[property_name] = property(property_name, fget, fset) # type: ignore[arg-type]
+
+ # copy over python methods to script module if they aren't defined on the script module
+ # this is currently an internal api used only on module containers
+ for name in dir(nn_module):
+ if name in ignored_properties:
+ continue
+ item = getattr(nn_module, name, None)
+ if (
+ _jit_internal.get_torchscript_modifier(item)
+ is _jit_internal.FunctionModifiers.COPY_TO_SCRIPT_WRAPPER
+ ):
+ add_python_attr_to_scripted_model(script_module, nn_module, name)
+
+ return script_module
+
+
+# We define shims of certain attributes on the RecursiveScriptModule to support
+# magic methods. To check if a script model defines an attribute we need
+# to also check that the attribute is not the shim
+def script_model_defines_attr(script_model, attr):
+ script_attr = getattr(script_model, attr, None)
+ if script_attr is None:
+ return False
+ default_attr = getattr(torch.jit.RecursiveScriptModule, attr, None)
+ if default_attr is None:
+ return False
+ return script_attr != default_attr
+
+
+def add_python_attr_to_scripted_model(script_model, orig, attr):
+ if hasattr(orig, attr) and script_model_defines_attr(script_model, attr):
+ setattr(script_model, attr, getattr(orig, attr))
+
+
+def get_overload_annotations(mod, jit_ignored_properties):
+ # original function => [(mangled overload name, overload function)]
+ overloads = {}
+
+ for name in dir(type(mod)):
+ if name in jit_ignored_properties:
+ continue
+ item = getattr(mod, name, None)
+ if not callable(item):
+ continue
+
+ # builtin functions like repr() in python 2 do not have __module__ defined
+ if hasattr(item, "__module__") and item.__module__ is not None:
+ method_overloads = _jit_internal._get_overloaded_methods(
+ item, mod.__class__
+ )
+ if method_overloads is None:
+ continue
+
+ if item.__func__ in method_overloads:
+ raise RuntimeError(
+ _jit_internal.get_overload_no_implementation_error_message(
+ "method", item.__func__
+ )
+ )
+
+ names = [name + "__" + str(i) for i in range(len(method_overloads))]
+ overloads[item] = list(zip(names, method_overloads))
+
+ return overloads
+
+
+def get_overload_name_mapping(overload_info):
+ # Same format as __overloads__
+ # original function => [overload names]
+ overload_name_mappings: Dict[str, List[str]] = {}
+ for orig_fn, overloads in overload_info.items():
+ original_name = orig_fn.__name__
+ if original_name not in overload_name_mappings:
+ overload_name_mappings[original_name] = []
+
+ for overload_name, _ in overloads:
+ overload_name_mappings[original_name].append(overload_name)
+ return overload_name_mappings
+
+
+def _check_no_signature(func):
+ signature = torch.jit.annotations.get_signature(
+ func, None, fake_range(), inspect.ismethod(func)
+ )
+ if signature is None:
+ qual_name = _jit_internal._qualified_name(func)
+ raise RuntimeError(
+ f"Must explicitly add type annotations to overloaded functions: {qual_name}"
+ )
+
+
+def make_stubs_for_overloads(overload_info):
+ overload_stubs = []
+ for orig_fn, overloads in overload_info.items():
+ orig_ast = get_jit_def(
+ orig_fn, orig_fn.__name__, self_name="RecursiveScriptModule"
+ )
+ for overload_name, overload_fn in overloads:
+ _check_no_signature(overload_fn)
+ over_ast = get_jit_def(
+ overload_fn, overload_fn.__name__, self_name="RecursiveScriptModule"
+ )
+ new_ast = torch._C._replace_overloaded_method_decl(
+ over_ast.decl(), orig_ast, overload_name
+ )
+ _rcb = _jit_internal.createResolutionCallbackFromClosure(orig_fn)
+ overload_stubs.append(ScriptMethodStub(_rcb, new_ast, overload_fn))
+ return overload_stubs
+
+
+def check_module_initialized(mod):
+ assert isinstance(mod, torch.nn.Module)
+ if not hasattr(mod, "_parameters"):
+ raise RuntimeError(
+ f"'{torch.typename(type(mod))}' has not been initialized, did you forget to call 'super()'?"
+ )
+
+ # This is to avoid importing torch.distributed.nn
+ if not hasattr(mod, "remote_parameters"):
+ for name, param in mod._parameters.items():
+ if param is not None and torch.nn.parameter.is_lazy(param):
+ raise RuntimeError(
+ "'{}' has uninitialized parameters {}. Did you forget to run a forward pass?".format(
+ torch.typename(type(mod)), name
+ )
+ )
+ for name, buf in mod._buffers.items():
+ if buf is not None and torch.nn.parameter.is_lazy(buf):
+ raise RuntimeError(
+ "'{}' has uninitialized buffers {}. Did you forget to run a forward pass?".format(
+ torch.typename(type(mod)), name
+ )
+ )
+
+
+def infer_methods_to_compile(nn_module):
+ """Implement the default rules for which methods should act as starting points for compilation.
+
+ (TODO add a link when the rules are published).
+ """
+ check_module_initialized(nn_module)
+ user_annotated_ignored_attributes = getattr(
+ nn_module, "__jit_ignored_attributes__", list()
+ )
+ ignored_properties = jit_ignored_properties(nn_module)
+
+ methods: List[str] = []
+ if hasattr(nn_module, "forward") and not _jit_internal.is_ignored_fn(
+ nn_module.forward
+ ):
+ forward_func = getattr(nn_module.forward, "__func__", None)
+ module_forward = getattr(torch.nn.Module, "forward", None)
+ if forward_func != module_forward:
+ methods = ["forward"]
+
+ exported = []
+ for name in dir(nn_module):
+ if name in ignored_properties:
+ continue
+ item = getattr(nn_module, name, None)
+ if (
+ _jit_internal.get_torchscript_modifier(item)
+ is _jit_internal.FunctionModifiers.EXPORT
+ ):
+ exported.append(name)
+
+ methods = methods + exported
+
+ overload_name_mappings = dict(getattr(nn_module, "__overloads__", {}))
+ overload_info = get_overload_annotations(nn_module, ignored_properties)
+ overload_name_mappings.update(get_overload_name_mapping(overload_info))
+ overload_stubs = make_stubs_for_overloads(overload_info)
+
+ nn_module.__overloads__ = overload_name_mappings
+
+ # we shouldn't directly compile overloaded methods, just its overloads
+ def ignore_overloaded(method_name):
+ return method_name not in overload_name_mappings
+
+ filtered_methods = filter(ignore_overloaded, methods)
+
+ # Unique the methods. We don't want to use a set to store the methods because it
+ # introduces non-determinism to compile order.
+ uniquer: Set[str] = set()
+ uniqued_methods = []
+ for name in filtered_methods:
+ if name in uniquer:
+ continue
+ uniqued_methods.append(name)
+ uniquer.add(name)
+
+ stubs = []
+ for method in uniqued_methods:
+ stubs.append(make_stub_from_method(nn_module, method))
+ return overload_stubs + stubs
+
+
+def get_hook_stubs(nn_module):
+ """Return forward hook and pre_hook ScriptModuleStubs."""
+ check_module_initialized(nn_module)
+ hook_map: Dict = {}
+
+ hook_stubs = []
+ for hook in nn_module._forward_hooks.values():
+ if hook.__name__ in hook_map:
+ if id(hook) != id(hook_map[hook.__name__]):
+ raise RuntimeError(
+ f"Hook '{hook.__name__}' on {type(nn_module).__name__} "
+ "has at least two different python definitions."
+ " Please use unique names for all hooks."
+ )
+ else:
+ hook_map[hook.__name__] = hook
+ hook_stubs.append(make_stub(hook, hook.__name__))
+
+ pre_hook_stubs = []
+ for pre_hook in nn_module._forward_pre_hooks.values():
+ if pre_hook.__name__ in hook_map:
+ if id(pre_hook) != id(hook_map[pre_hook.__name__]):
+ raise RuntimeError(
+ f"Pre-hook '{pre_hook.__name__}' on {type(nn_module).__name__} "
+ "has at least two different python definitions."
+ " Please use unique names for all hooks."
+ )
+ else:
+ hook_map[pre_hook.__name__] = pre_hook
+ pre_hook_stubs.append(make_stub(pre_hook, pre_hook.__name__))
+
+ return hook_stubs, pre_hook_stubs
+
+
+def get_property_stubs(nn_module):
+ """Create property stubs for the properties of the module by creating method stubs for the getter and setter."""
+ module_ty = type(nn_module)
+ properties_asts = get_class_properties(module_ty, self_name="RecursiveScriptModule")
+ rcbs = {}
+
+ for name in dir(module_ty):
+ item = getattr(module_ty, name, None)
+ if isinstance(item, property):
+ if not item.fget:
+ raise RuntimeError(
+ f"Property {name} of {nn_module.__name__} must have a getter"
+ )
+
+ rcbs[name] = _jit_internal.createResolutionCallbackFromClosure(item.fget)
+
+ stubs = [PropertyStub(rcbs[ast.name().name], ast) for ast in properties_asts]
+ return stubs
+
+
+def interface_script(mod_interface, nn_module):
+ """
+ Make a ScriptModule from an nn.Module, using the interface methods rule for determining which methods to compile.
+
+ Args:
+ mod_interface: the interface type that the module have
+ nn_module: The original Python nn.Module that we are creating a ScriptModule for.
+ """
+ if isinstance(nn_module, torch.jit.ScriptModule):
+ return nn_module
+
+ check_module_initialized(nn_module)
+
+ def infer_interface_methods_to_compile(nn_module):
+ """Rule to infer the methods from the interface type.
+
+ It is used to know which methods need to act as starting points for compilation.
+ """
+ stubs = []
+ for method in mod_interface.getMethodNames():
+ stubs.append(make_stub_from_method(nn_module, method))
+ return stubs
+
+ return create_script_module(nn_module, infer_interface_methods_to_compile)
+
+
+def try_compile_fn(fn, loc):
+ if _jit_internal.is_ignored_fn(fn):
+ # Don't do anything for @ignore'd functions
+ return None
+
+ if isinstance(fn, torch.nn.Module):
+ # Since modules are callable pybind recognizes them as functions, but
+ # don't do anything for them
+ return None
+
+ if not inspect.isfunction(fn) and not inspect.ismethod(fn):
+ raise RuntimeError(
+ f"`{fn}` is not a function. Recursive scripting only supports "
+ "Python functions or methods currently.\n"
+ f"Consider manually annotating `{fn}` with @torch.jit.script."
+ )
+
+ # The object returned by __prepare_scriptable__ might have a different closure.
+ # Resolve it here to get the right resolution callback.
+ fn = fn.__prepare_scriptable__() if hasattr(fn, "__prepare_scriptable__") else fn # type: ignore[operator]
+
+ # We don't have the actual scope where the function was defined, but we can
+ # extract the necessary info from the closed over variables on the function
+ # object
+ rcb = _jit_internal.createResolutionCallbackFromClosure(fn)
+ return torch.jit.script(fn, _rcb=rcb)
+
+
+def wrap_cpp_class(cpp_class):
+ """Wrap this torch._C.Object in a Python RecursiveScriptClass."""
+ return torch.jit.RecursiveScriptClass(cpp_class)
+
+
+def wrap_cpp_module(cpp_module):
+ """Wrap this torch._C.ScriptModule in a Python ScriptModule, recursively for all submodules."""
+
+ def init_fn(script_module):
+ for name, cpp_module in torch._C.ModuleDict(script_module._c).items():
+ setattr(script_module, name, wrap_cpp_module(cpp_module))
+ script_module._concrete_type = torch._C.ConcreteModuleType.from_jit_type(
+ script_module._c._type()
+ )
+
+ for idx, fn in enumerate(script_module._c._get_forward_pre_hooks()):
+ script_module._forward_pre_hooks[idx] = fn
+ for idx, fn in enumerate(script_module._c._get_forward_hooks()):
+ script_module._forward_hooks[idx] = fn
+
+ return torch.jit.RecursiveScriptModule._construct(cpp_module, init_fn)
+
+
+def compile_unbound_method(concrete_type, fn):
+ if _jit_internal.is_ignored_fn(fn):
+ return None
+ stub = make_stub(fn, fn.__name__)
+ with torch._jit_internal._disable_emit_hooks():
+ # We don't want to call the hooks here since the graph that is calling
+ # this function is not yet complete
+ create_methods_and_properties_from_stubs(concrete_type, (stub,), ())
+ return stub
+
+
+def lazy_bind(concrete_type, unbound_method):
+ """
+ Return a function that lazily binds `unbound_method` to a provided Module IValue, then invokes the method.
+
+ We do this so that any Python shenanigans that
+ will poison type sharing are impossible at compile time.
+ """
+
+ def lazy_binding_method(cpp_module, *args):
+ def init_fn(script_module):
+ orig_class = concrete_type.py_class
+
+ # Copy @ignored/@unused methods from the original module to the new one.
+ # This ensures they are available during execution.
+ for name in dir(orig_class):
+ item = getattr(orig_class, name, None)
+ if _jit_internal.is_ignored_fn(item):
+ setattr(script_module, name, item)
+
+ # Copy constants over so they are available during execution.
+ for name, value in concrete_type.get_constants().items():
+ setattr(script_module, name, value)
+
+ script_module = torch.jit.RecursiveScriptModule._construct(cpp_module, init_fn)
+ method = types.MethodType(unbound_method, script_module)
+ return method(*args)
+
+ # make the lazy binding method "look like" the original method
+ lazy_binding_method.original_fn = unbound_method # type: ignore[attr-defined]
+ lazy_binding_method.__name__ = unbound_method.__name__
+ torch._jit_internal.copy_torchscript_modifier(unbound_method, lazy_binding_method)
+
+ return lazy_binding_method
diff --git a/venv/lib/python3.10/site-packages/torch/jit/_script.py b/venv/lib/python3.10/site-packages/torch/jit/_script.py
new file mode 100644
index 0000000000000000000000000000000000000000..5e29c43d45309c9ddfd035e6eac84b3da127c698
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/torch/jit/_script.py
@@ -0,0 +1,1690 @@
+"""TorchScript.
+
+This module contains functionality to support the JIT's scripting frontend, notably:
+ - torch.jit.script
+
+This is not intended to be imported directly; please use the exposed
+functionalities in `torch.jit`.
+"""
+import collections
+import copy
+import enum
+import functools
+import inspect
+import pickle
+import warnings
+from typing import Any, Callable, Dict, List, Set, Tuple, Union
+
+import torch
+import torch._jit_internal as _jit_internal
+from torch._classes import classes
+from torch._jit_internal import _qualified_name
+from torch.jit._builtins import _register_builtin
+from torch.jit._fuser import _graph_for, _script_method_graph_for
+
+from torch.jit._monkeytype_config import (
+ JitTypeTraceConfig,
+ JitTypeTraceStore,
+ monkeytype_trace,
+)
+from torch.jit._recursive import (
+ _compile_and_register_class,
+ infer_methods_to_compile,
+ ScriptMethodStub,
+ wrap_cpp_module,
+)
+from torch.jit._state import (
+ _enabled,
+ _set_jit_function_cache,
+ _set_jit_overload_cache,
+ _try_get_jit_cached_function,
+ _try_get_jit_cached_overloads,
+)
+from torch.jit.frontend import get_default_args, get_jit_class_def, get_jit_def
+from torch.nn import Module
+from torch.overrides import (
+ has_torch_function,
+ has_torch_function_unary,
+ has_torch_function_variadic,
+)
+from torch.package import PackageExporter, PackageImporter
+from torch.utils import set_module
+from ._serialization import validate_map_location
+
+type_trace_db = JitTypeTraceStore() # DB to hold all call traces from MonkeyType
+
+torch._C.ScriptMethod.graph_for = _script_method_graph_for # type: ignore[attr-defined]
+torch._C.ScriptFunction.graph_for = _graph_for # type: ignore[attr-defined]
+ScriptFunction = torch._C.ScriptFunction
+ScriptFunction.__doc__ = """
+Functionally equivalent to a :class:`ScriptModule`, but represents a single
+function and does not have any attributes or Parameters.
+"""
+set_module(ScriptFunction, "torch.jit")
+
+
+# Throws an error if a jit function is pickled.
+# Helps to avoid Python crashes for Python versions 3.9.5 + when protocol 0 or 1 is given as an argument.
+def _reduce(cls):
+ raise pickle.PickleError("ScriptFunction cannot be pickled")
+
+
+ScriptFunction.__reduce__ = _reduce # type: ignore[assignment]
+
+
+if _enabled:
+ Attribute = collections.namedtuple("Attribute", ["value", "type"])
+else:
+
+ def Attribute(value, type): # type: ignore[no-redef]
+ return value
+
+
+Attribute.__doc__ = """
+ This method is a pass-through function that returns `value`, mostly
+ used to indicate to the TorchScript compiler that the left-hand side
+ expression is a class instance attribute with type of `type`. Note that
+ `torch.jit.Attribute` should only be used in `__init__` method of `jit.ScriptModule`
+ subclasses.
+
+ Though TorchScript can infer correct type for most Python expressions, there are some cases where
+ type inference can be wrong, including:
+
+ - Empty containers like `[]` and `{}`, which TorchScript assumes to be container of `Tensor`
+ - Optional types like `Optional[T]` but assigned a valid value of type `T`, TorchScript would assume
+ it is type `T` rather than `Optional[T]`
+
+ In eager mode, it is simply a pass-through function that returns `value`
+ without other implications.
+
+ Example:
+
+ .. testcode::
+
+ import torch
+ from typing import Dict
+
+ class AttributeModule(torch.jit.ScriptModule):
+ def __init__(self):
+ super().__init__()
+ self.foo = torch.jit.Attribute(0.1, float)
+
+ # we should be able to use self.foo as a float here
+ assert 0.0 < self.foo
+
+ self.names_ages = torch.jit.Attribute({}, Dict[str, int])
+ self.names_ages["someone"] = 20
+ assert isinstance(self.names_ages["someone"], int)
+
+ m = AttributeModule()
+ # m will contain two attributes
+ # 1. foo of type float
+ # 2. names_ages of type Dict[str, int]
+
+ .. testcleanup::
+
+ del AttributeModule
+ del m
+
+ Note: it's now preferred to instead use type annotations instead of `torch.jit.Attribute`:
+
+ .. testcode::
+
+ import torch
+ from typing import Dict
+
+ class AttributeModule(torch.nn.Module):
+ names: Dict[str, int]
+
+ def __init__(self):
+ super().__init__()
+ self.names = {}
+
+ m = AttributeModule()
+
+ .. testcleanup::
+
+ del AttributeModule
+ del m
+
+ Args:
+ value: An initial value to be assigned to attribute.
+ type: A Python type
+
+ Returns:
+ Returns `value`
+"""
+
+
+def _get_type_trace_db():
+ # This is a private API. Use of this for external purposes is discouraged.
+ return type_trace_db
+
+
+# Gets a function from the name of a method on a type
+def _get_function_from_type(cls, name):
+ return getattr(cls, name, None)
+
+
+# ScriptClasses must be new-style classes because we construct them using their
+# __new__ method.
+def _is_new_style_class(cls):
+ if hasattr(cls, "__class__"):
+ return "__dict__" in dir(cls) or hasattr(cls, "__slots__")
+
+
+# These OrderedDictWrapper classes replace the actual OrderedDicts in
+# module with versions that get/set properties inside of Module.
+# This allows us to reuse most of nn.Module while still storing the
+# data in C++.
+# Each OrderedDict needs to support:
+# x not in view
+# x in view
+# view[name] = ...
+# view.values()
+# del view[name]
+# view.items()
+# view.keys()
+# len(view)
+
+
+class OrderedDictWrapper:
+ def __init__(self, _c):
+ self._c = _c
+
+ def keys(self):
+ return [k for k, v in self.items()]
+
+ def values(self):
+ return [v for k, v in self.items()]
+
+ def __len__(self):
+ return len(self.values())
+
+ def __delitem__(self, k):
+ raise RuntimeError("cannot delete methods or parameters of a script module")
+
+ def items(self):
+ return self._c.items()
+
+ def __setitem__(self, k, v):
+ if k not in self:
+ raise RuntimeError(
+ f"Can't add a new parameter after ScriptModule construction. Tried to add '{k}"
+ )
+ self._c.setattr(k, v)
+
+ def __contains__(self, k):
+ return self._c.contains(k)
+
+ def __getitem__(self, k):
+ if k not in self:
+ raise KeyError(k)
+ return self._c.getattr(k)
+
+
+class OrderedModuleDict(OrderedDictWrapper):
+ def __init__(self, module, python_dict):
+ super().__init__(torch._C.ModuleDict(module))
+ # contains _both_ script modules and non-script python-only modules
+
+ # because script modules are subclassed in python and the
+ # C++ Module class will not hold references to them,
+ # to ensure that you always get the same python value here
+ # we store it in the python dict as well
+ self._python_modules = python_dict
+
+ def items(self):
+ r = self._python_modules.items()
+ return r
+
+ def __contains__(self, k):
+ return k in self._python_modules
+
+ def __setitem__(self, k, v):
+ # Cases where sub-module can be re-assigned after ScriptModule construction
+ # 1. If the attr is an module interface type, it's guaranteed that the module is
+ # not inlined in the graph, so it's safe to swap a new ScriptModule in.
+ # 2. if the new value if a ScriptModule with the same JIT type, IR won't change
+ # and it's legit to swap a new module in.
+ # In these two cases we allow swapping a new scripted module and update the
+ # corresponding python module dict to keep sync.
+ # Note: the value to be swapped in has to be ScriptModule instead of nn.Module,
+ # otherwise it's illegal and we throw error.
+ if isinstance(v, ScriptModule):
+ self._c.setattr(k, v)
+ self._python_modules[k] = v
+ else:
+ raise RuntimeError(
+ "Cannot re-assign modules in a ScriptModule with non-scripted "
+ f"module, tried to replace existing module '{k}': {v}"
+ )
+
+ def __getitem__(self, k):
+ return self._python_modules[k]
+
+
+# For each user-defined class that subclasses ScriptModule, this meta-class:
+# (1) finds all the methods annotated with @script_method in a ScriptModule and
+# removes them from the class attributes
+# (2) puts a wrapper around the class's __init__ method to recursively compile
+# all of the script_methods with the module after the original __init__ has
+# run. This has to occur after the user-defined __init__ so that submodules and
+# parameters are initialized _before_ the script compiler resolve references to
+# `self.param` or `self.module`.
+class ScriptMeta(type):
+ def __init__(cls, name, bases, attrs): # noqa: B902
+ # Aggregate all the ScriptMethods and constants from superclasses
+ cls._methods: Dict[str, Any] = {}
+ cls._constants_set = set(getattr(cls, "__constants__", ()))
+ for base in reversed(bases):
+ for k, v in getattr(base, "_methods", {}).items():
+ cls._methods[k] = v
+ base_constants: Set = getattr(base, "_constants_set", set())
+ cls._constants_set = cls._constants_set.union(base_constants)
+
+ # find all the script methods of the current class
+ for k, v in sorted(attrs.items()):
+ if isinstance(v, ScriptMethodStub):
+ delattr(cls, k)
+ cls._methods[v.original_method.__name__] = v
+
+ if getattr(cls, "_disable_script_meta", False):
+ # We leave built-in ScriptModule types alone, since this metaclass
+ # is only for compiling user classes that inherit from
+ # ScriptModule.
+ return super().__init__(name, bases, attrs)
+
+ original_init = getattr(cls, "__init__", lambda self: None)
+
+ @functools.wraps(original_init)
+ def init_then_script(self, *args, **kwargs):
+ num_methods = len(cls._methods)
+ original_init(self, *args, **kwargs)
+ added_methods_in_init = len(cls._methods) > num_methods
+
+ if type(self) == cls:
+
+ def make_stubs(module):
+ cls = type(module)
+ if hasattr(cls, "_methods"):
+ return [v for k, v in sorted(cls._methods.items())]
+ else:
+ return infer_methods_to_compile(module)
+
+ self.__dict__[
+ "_actual_script_module"
+ ] = torch.jit._recursive.create_script_module(
+ self, make_stubs, share_types=not added_methods_in_init
+ )
+
+ # Delete the Python attributes that now shadow the ScriptModule
+ # ones, so that __getattr__ and __setattr__ will properly find
+ # the scripted versions.
+ concrete_type = self._actual_script_module._concrete_type
+ for name in concrete_type.get_attributes():
+ delattr(self, name)
+ for name, _ in concrete_type.get_modules():
+ delattr(self, name)
+ for name in ("_parameters", "_buffers", "_modules"):
+ delattr(self, name)
+
+ cls.__init__ = init_then_script # type: ignore[misc]
+ super().__init__(name, bases, attrs)
+
+
+class _CachedForward:
+ def __get__(self, obj, cls):
+ return self.__getattr__("forward") # type: ignore[attr-defined]
+
+
+class ScriptWarning(Warning):
+ pass
+
+
+def script_method(fn):
+ if not _enabled:
+ return fn
+ # NOTE: we need to traverse two frames here because the meta-class frame
+ # for ScriptModule will be present, as opposed to invoking @script on a
+ # a function or invoking define() on a CompilationUnit.
+ # The stack will look like:
+ #
+ # 0. createResolutionCallback()
+ # 1. script_method()
+ # 2. ScriptModule metaclass frame
+ # 3. Surrounding scope
+ #
+ # createResolutionCallback internally adds 1 to get us to the scope of this
+ # function (the calling function). Adding 2 gets us to the proper surrounding scope.
+ _rcb = _jit_internal.createResolutionCallbackFromFrame(frames_up=2)
+ ast = get_jit_def(fn, fn.__name__, self_name="ScriptModule")
+ return ScriptMethodStub(_rcb, ast, fn)
+
+
+class ConstMap:
+ def __init__(self, const_mapping):
+ self.const_mapping = const_mapping
+
+ def __getattr__(self, attr):
+ return self.const_mapping[attr]
+
+
+def unpackage_script_module(
+ importer: PackageImporter, script_module_id: str
+) -> torch.nn.Module:
+ """
+ Call by ``torch.package.PackageImporter``'s Pickler's ``persistent_load`` function.
+
+ Performs work of loading and returning a ScriptModule from a ``torch.package`` archive.
+ """
+ if not isinstance(importer.zip_reader, torch._C.PyTorchFileReader):
+ raise RuntimeError(
+ "Loading ScriptObjects from a PackageImporter created from a "
+ "directory is not supported. Use a package archive file instead."
+ )
+ cu = torch._C.CompilationUnit()
+ cpp_module = torch._C._import_ir_module_from_package(
+ cu,
+ importer.zip_reader,
+ importer.storage_context,
+ validate_map_location(importer.last_map_location),
+ script_module_id,
+ )
+ return wrap_cpp_module(cpp_module)
+
+
+if _enabled:
+ _magic_methods = [
+ "__iter__",
+ "__len__",
+ "__neg__",
+ "__mul__",
+ "__contains__",
+ "__add__",
+ "__sub__",
+ "__pow__",
+ "__truediv__",
+ "__mod__",
+ "__ne__",
+ "__eq__",
+ "__lt__",
+ "__gt__",
+ "__le__",
+ "__ge__",
+ "__and__",
+ "__or__",
+ "__xor__",
+ "__getitem__",
+ "__setitem__",
+ "__call__",
+ "__int__",
+ "__float__",
+ "__bool__",
+ "__str__",
+ "__enter__",
+ "__exit__",
+ ]
+
+ class RecursiveScriptClass:
+ """Wrapper for a TorchScript class instance for use in Python.
+
+ An analogue of RecursiveScriptModule for regular objects that are not modules.
+ This class is a wrapper around a torch._C.ScriptObject that represents an instance
+ of a TorchScript class and allows it to be used in Python.
+
+ Attributes:
+ _c [torch._C.ScriptObject]: The C++ object to which attribute lookups and method
+ calls are forwarded.
+ _props [Dict[str, property]]: A dictionary of properties fetched from self._c and
+ exposed on this wrppaer.
+ """
+
+ def __init__(self, cpp_class):
+ super().__init__()
+ self.__dict__["_initializing"] = True
+ self._c = cpp_class
+
+ # Add wrapped object's properties to this class instance.
+ self._props = {
+ prop.name: property(prop.getter, prop.setter)
+ for prop in self._c._properties()
+ }
+
+ self.__dict__["_initializing"] = False
+
+ def __getattr__(self, attr):
+ if self.__dict__.get("_initializing"):
+ return super().__getattr__(attr) # type: ignore[misc]
+
+ if attr in self._props:
+ return self._props[attr].fget() # type: ignore[call-arg, misc]
+
+ return getattr(self._c, attr)
+
+ def __setattr__(self, attr, value):
+ if self.__dict__.get("_initializing"):
+ return super().__setattr__(attr, value)
+
+ if attr in self._props:
+ return self._props[attr].fset(value) # type: ignore[call-arg, misc]
+
+ setattr(self._c, attr, value)
+
+ # Delegate calls to magic methods like __len__ to the C++ module backing the
+ # RecursiveScriptClass.
+ def forward_magic_method(self, method_name, *args, **kwargs):
+ if not self._c._has_method(method_name):
+ raise TypeError()
+
+ self_method = self.__getattr__(method_name)
+ return self_method(*args, **kwargs)
+
+ def __getstate__(self):
+ raise pickle.PickleError("ScriptClasses cannot be pickled")
+
+ def __iadd__(self, other):
+ if self._c._has_method("__iadd__"):
+ return self.forward_magic_method("__iadd__", other)
+ else:
+ return self.forward_magic_method("__add__", other)
+
+ for method_name in _magic_methods:
+
+ def method_template(self, *args, **kwargs):
+ return self.forward_magic_method(method_name, *args, **kwargs)
+
+ setattr(RecursiveScriptClass, method_name, method_template)
+
+ # this is a Python 'non-data descriptor' that causes the first access
+ # to ScriptModule's forward to look up the forward method and stash
+ # it in the objects dict. Due to the standard rules for attribute lookup,
+ # subsequent lookups will just directly return the previously looked up method.
+ # This is necessary because nn.Module defines forward as a method. If we
+ # did nothing, __getattr__ would not be called. Instead we'd get nn.Module.forward
+ # which always throws an exception.
+
+ class ScriptModule(Module, metaclass=ScriptMeta):
+ r"""Wrapper for C++ torch::jit::Module with methods, attributes, and parameters.
+
+ A wrapper around C++ ``torch::jit::Module``. ``ScriptModule``\s
+ contain methods, attributes, parameters, and
+ constants. These can be accessed the same way as on a normal ``nn.Module``.
+ """
+
+ __jit_unused_properties__ = [
+ "code",
+ "code_with_constants",
+ "graph",
+ "inlined_graph",
+ "original_name",
+ ]
+
+ def __init__(self):
+ super().__init__()
+
+ forward: Callable[..., Any] = _CachedForward() # type: ignore[assignment]
+
+ def __getattr__(self, attr):
+ if "_actual_script_module" not in self.__dict__:
+ return super().__getattr__(attr)
+ return getattr(self._actual_script_module, attr)
+
+ def __setattr__(self, attr, value):
+ if "_actual_script_module" not in self.__dict__:
+ # Unwrap torch.jit.Attribute into a regular setattr + record
+ # the provided type in __annotations__.
+ #
+ # This ensures that if we use the attr again in `__init__`, it
+ # will look like the actual value, not an instance of Attribute.
+ if isinstance(value, Attribute):
+ # NB: Ensure that we set __annotations__ on the specific
+ # class in question, and not on a superclass (which would
+ # be wrong wrong wrong!).
+ # See also https://github.com/pytorch/pytorch/issues/39463
+ if "__annotations__" not in self.__class__.__dict__:
+ self.__class__.__annotations__ = {}
+ self.__annotations__[attr] = value.type
+ value = value.value
+ return super().__setattr__(attr, value)
+
+ setattr(self._actual_script_module, attr, value)
+
+ def define(self, src):
+ if "_actual_script_module" in self.__dict__:
+ # If we have completed initialization, just defer to the
+ # backing RecursiveScriptModule to eagerly compile the provided
+ # source.
+ return self._actual_script_module.define(src)
+
+ # Otherwise, we are still in the object's __init__.
+ # In that case, add `src` as a stub to be compiled.
+ #
+ # We use frames_up=1 to get to the proper surrounding scope. The stack
+ # will look like:
+ # 0. createResolutionCallback
+ # 1. define()
+ # 2. surrounding scope.
+ #
+ # createResolutionCallback internally adds 1 to get us to our frame, then
+ # we add 1 to get to the proper surrounding scope.
+ rcb = _jit_internal.createResolutionCallbackFromFrame(frames_up=1)
+ ast = torch._C._parse_source_def(src)
+ self._methods[ast.name().name] = ScriptMethodStub(rcb, ast, None)
+
+ def _replicate_for_data_parallel(self):
+ return self._actual_script_module._replicate_for_data_parallel()
+
+ def __reduce_package__(self, exporter: PackageExporter):
+ """Save a ScriptModule inside of a ``torch.package`` archive.
+
+ Called by ``torch.package.PackageExporter``'s Pickler's ``persistent_id`` when
+ saving TorchScript objects. Performs act of saving a ScriptModule inside of
+ a ``torch.package`` archive.
+
+ Returns method to load the ScriptModule from a ``torch.package.PackageImporter``'s
+ Pickler's ``persistent_load`` function.
+ """
+ script_module_id = exporter.get_unique_id()
+ exporter.script_module_serializer.serialize(self._c, int(script_module_id))
+ return (unpackage_script_module, (script_module_id,))
+
+ class RecursiveScriptModule(ScriptModule):
+ # XXX: RecursiveScriptModule inherits from ScriptModule for the sole
+ # reason that it retains the existing isinstance(ScriptModule)
+ # behavior.
+ r"""Retain the existing isinstance(ScriptModule) behavior.
+
+ The core data structure in TorchScript is the ``ScriptModule``. It is an
+ analogue of torch's ``nn.Module`` and represents an entire model as a tree of
+ submodules. Like normal modules, each individual module in a ``ScriptModule`` can
+ have submodules, parameters, and methods. In ``nn.Module``\s methods are implemented
+ as Python functions, but in ``ScriptModule``\s methods are implemented as
+ TorchScript functions, a statically-typed subset of Python that contains all
+ of PyTorch's built-in Tensor operations. This difference allows your
+ ``ScriptModule``\s code to run without the need for a Python interpreter.
+
+ ``ScriptModule``\s should not be created manually, instead use
+ either :func:`tracing ` or :func:`scripting `.
+ Tracing and scripting can be applied incrementally and :ref:`composed as necessary `.
+
+ * Tracing records the tensor operations as executed with a set of example inputs and uses these
+ operations to construct a computation graph. You can use the full dynamic behavior of Python with tracing,
+ but values other than Tensors and control flow aren't captured in the graph.
+
+ * Scripting inspects the Python code of the model
+ and compiles it to TorchScript. Scripting allows the use of many `types`_ of values and supports dynamic control flow.
+ Many, but not all features of Python are supported by the compiler, so changes to the source code may be necessary.
+ """
+
+ _disable_script_meta = True
+
+ def __init__(self, cpp_module):
+ self.__dict__["_initializing"] = True
+ self._c = cpp_module
+ super().__init__()
+ # Delete the 'training' attribute set up by `Module.__init__`. It
+ # will get set on the underlying cpp module, so we delete it here
+ # to avoid this version shadowing the cpp module version.
+ delattr(self, "training")
+
+ @staticmethod
+ def _construct(cpp_module, init_fn):
+ """
+ Construct a RecursiveScriptModule that's ready for use.
+
+ PyTorch code should use this to construct a RecursiveScriptModule instead
+ of instead of calling `__init__` directly, as it makes sure the
+ object is properly finalized (and in the future, we may take
+ control of how the RecursiveScriptModule instance is created).
+
+ Args:
+ cpp_module: The C++ Module that will hold the actual state of
+ this RecursiveScriptModule instance.
+ init_fn: Lambda that initializes the RecursiveScriptModule passed to it.
+ """
+ script_module = RecursiveScriptModule(cpp_module)
+ init_fn(script_module)
+
+ # Finalize the ScriptModule: replace the nn.Module state with our
+ # custom implementations and flip the _initializing bit.
+ RecursiveScriptModule._finalize_scriptmodule(script_module)
+ return script_module
+
+ @staticmethod
+ def _finalize_scriptmodule(script_module):
+ script_module._parameters = OrderedDictWrapper(
+ torch._C.ParameterDict(script_module._c)
+ )
+ script_module._buffers = OrderedDictWrapper(
+ torch._C.BufferDict(script_module._c)
+ )
+ script_module._modules = OrderedModuleDict(
+ script_module._c, script_module._modules
+ )
+ script_module._initializing = False
+
+ def _reconstruct(self, cpp_module):
+ """
+ Re-construct an instance of RecursiveScriptModule using an instance of a C++ module.
+
+ Args:
+ cpp_module: The C++ module that this RecursiveScriptModule will be rebuilt around.
+ """
+ self.__init__(cpp_module) # type: ignore[misc]
+
+ # Copy the concrete type from the C++ module to this ScriptModule.
+ self._concrete_type = torch._C.ConcreteModuleType.from_jit_type(
+ self._c._type()
+ )
+
+ # Copy submodules from the C++ module to this ScriptModule.
+ modules = {}
+ for name, cpp_module in torch._C.ModuleDict(self._c).items():
+ modules[name] = wrap_cpp_module(cpp_module)
+ self._modules = OrderedModuleDict(self._c, modules) # type: ignore[assignment]
+
+ # Copy parameters and buffers.
+ self._parameters = OrderedDictWrapper(torch._C.ParameterDict(self._c)) # type: ignore[assignment]
+ self._buffers = OrderedDictWrapper(torch._C.BufferDict(self._c)) # type: ignore[assignment]
+
+ # Get rid of the functions from the old C++ module.
+ self.__dict__ = {
+ k: v
+ for k, v in self.__dict__.items()
+ if not isinstance(v, torch._C.ScriptMethod)
+ }
+ self.__dict__["_initializing"] = False
+
+ @property
+ def graph(self):
+ r"""Return a string representation of the internal graph for the ``forward`` method.
+
+ See :ref:`interpreting-graphs` for details.
+ """
+ return self._c._get_method("forward").graph
+
+ @property
+ def inlined_graph(self):
+ r"""
+ Return a string representation of the internal graph for the ``forward`` method.
+
+ This graph will be preprocessed to inline all function and method calls.
+ See :ref:`interpreting-graphs` for details.
+ """
+ return self.forward.inlined_graph # type: ignore[attr-defined]
+
+ @property
+ def code(self):
+ r"""
+ Return a pretty-printed representation (as valid Python syntax) of the internal graph for the ``forward`` method.
+
+ See :ref:`inspecting-code` for details.
+ """
+ return self.forward.code # type: ignore[attr-defined]
+
+ @property
+ def code_with_constants(self):
+ r"""Return a tuple.
+
+ Returns a tuple of:
+
+ [0] a pretty-printed representation (as valid Python syntax) of
+ the internal graph for the ``forward`` method. See `code`.
+ [1] a ConstMap following the CONSTANT.cN format of the output in [0].
+ The indices in the [0] output are keys to the underlying constant's values.
+
+ See :ref:`inspecting-code` for details.
+ """
+ r = self.forward.code_with_constants # type: ignore[attr-defined]
+ return (r[0], ConstMap(r[1]))
+
+ def save(self, f, **kwargs):
+ r"""Save with a file-like object.
+
+ save(f, _extra_files={})
+
+ See :func:`torch.jit.save ` which accepts a file-like object.
+ This function, torch.save(), converts the object to a string, treating it as a path.
+ DO NOT confuse these two functions when it comes to the 'f' parameter functionality.
+ """
+ return self._c.save(str(f), **kwargs)
+
+ def _save_for_lite_interpreter(self, *args, **kwargs):
+ r"""Add (or update) the bytecode session to the script model.
+
+ _save_for_lite_interpreter(f)
+
+ The updated model is used
+ in lite interpreter for mobile applications.
+
+ Args:
+ f: a string containing a file name.
+ _extra_files: Map from filename to contents which will be stored as part of 'f'.
+
+ """
+ return self._c._save_for_mobile(*args, **kwargs)
+
+ def _save_to_buffer_for_lite_interpreter(self, *args, **kwargs):
+ return self._c._save_to_buffer_for_mobile(*args, **kwargs)
+
+ def save_to_buffer(self, *args, **kwargs):
+ return self._c.save_to_buffer(*args, **kwargs)
+
+ def get_debug_state(self, *args, **kwargs):
+ return self._c.get_debug_state()
+
+ def extra_repr(self):
+ return f"original_name={self.original_name}"
+
+ def graph_for(self, *args, **kwargs):
+ return self.forward.graph_for(self, *args, **kwargs) # type: ignore[attr-defined]
+
+ @property
+ def original_name(self):
+ if type(self) == str(self._c._type().name()):
+ return ""
+ return str(self._c._type().name())
+
+ def define(self, src):
+ # We use frames_up=1 to get to the proper surrounding scope. The stack
+ # will look like:
+ # 0. createResolutionCallback
+ # 1. define()
+ # 2. surrounding scope.
+ #
+ # createResolutionCallback internally adds 1 to get us to our frame, then
+ # we add 1 to get to the proper surrounding scope.
+ rcb = _jit_internal.createResolutionCallbackFromFrame(frames_up=1)
+ self._c._define(self._concrete_type, src, rcb)
+
+ def __getattr__(self, attr):
+ if "_initializing" not in self.__dict__:
+ raise RuntimeError(
+ "ScriptModule has not been initialized, did you forget to call super's init?"
+ )
+
+ if self._initializing:
+ return super().__getattr__(attr)
+
+ # _modules check is before hasattr since modules are included as attributes in _c,
+ # but we want to get the python wrapper from _modules instead of the raw _c object.
+ if attr in self._modules:
+ return self._modules[attr]
+ elif self._c.hasattr(attr):
+ return self._c.getattr(attr)
+ elif self._c._has_method(attr):
+ script_method = self._c._get_method(attr)
+ # cache method so future calls do not go through __getattr__
+ # to improve invocation performance
+ self.__dict__[attr] = script_method
+ return script_method
+
+ return super().__getattr__(attr)
+
+ def __setattr__(self, attr, value):
+ if self._initializing:
+ return super().__setattr__(attr, value)
+
+ if attr in self._modules:
+ self._modules[attr] = value
+ elif self._c.hasattr(attr):
+ self._c.setattr(attr, value)
+ elif (
+ hasattr(self, "_concrete_type")
+ and attr in self._concrete_type.get_constants().keys()
+ ):
+ # TODO: we don't have _concrete_type set after load(), and in general we lose constant information.
+ # We should encode constants as class type attributes (or something) so it persists across save/load.
+ raise AttributeError(
+ f"Cannot mutate TorchScript constant value: '{attr}'. Value: '{value}'"
+ )
+ else:
+ # We allow setting Python attributes on the ScriptModule, for
+ # when people want to stash some convenience info on it.
+ # TODO: it's possible that the following is confusing:
+ # s = torch.jit.script(...)
+ # s.python_attr = ...
+ # s.save() <--- this doesn't have `python_attr`
+ # It's fairly trivial to save enough info to warn in this case.
+ return super().__setattr__(attr, value)
+
+ def __copy__(self):
+ return torch.jit._recursive.wrap_cpp_module(copy.copy(self._c))
+
+ def __deepcopy__(self, memo):
+ return torch.jit._recursive.wrap_cpp_module(copy.deepcopy(self._c, memo))
+
+ # Python magic methods do method lookups on an object's class type, instead of looking up
+ # the method defines on the class instance. In order to continue to expose the magic methods
+ # of builtin-containers (ModuleList, Sequential, ModuleDict) to Python, we
+ # define magic methods here as a shim to the correct attribute.
+ def forward_magic_method(self, method_name, *args, **kwargs):
+ self_method = getattr(self, method_name)
+ if getattr(self_method, "__func__", None) == getattr(
+ RecursiveScriptModule, method_name
+ ):
+ raise NotImplementedError()
+ return self_method(*args, **kwargs)
+
+ def __iter__(self):
+ return self.forward_magic_method("__iter__")
+
+ def __getitem__(self, idx):
+ return self.forward_magic_method("__getitem__", idx)
+
+ def __len__(self):
+ return self.forward_magic_method("__len__")
+
+ def __contains__(self, key):
+ return self.forward_magic_method("__contains__", key)
+
+ # dir is defined by the base nn.Module, so instead of throwing if
+ # it is not overridden, we call into the nn.Module __dir__ method
+ def __dir__(self):
+ self_method = self.__dir__
+ if (
+ self_method.__func__ # type: ignore[attr-defined]
+ == _get_function_from_type(RecursiveScriptModule, "__dir__")
+ ):
+ return super().__dir__()
+ return self_method()
+
+ # to resolve bool(value), Python looks if __bool__ is defined then __iter__
+ # is defined then returns true for classes. Since __iter__() on this
+ # class throws if it isn't overridden, we define __bool__ to preserve default behavior
+ def __bool__(self):
+ self_method = self.__bool__
+ if (
+ self_method.__func__ # type: ignore[attr-defined]
+ == _get_function_from_type(RecursiveScriptModule, "__bool__")
+ ):
+ return True
+ return self_method()
+
+ def _replicate_for_data_parallel(self):
+ # we have to initialize ScriptModule properly so that
+ # it works with pybind11
+ def init_fn(script_module):
+ # Don't do anything here, we'll initialize the ScriptModule below
+ return
+
+ return RecursiveScriptModule._construct(
+ self._c._replicate_for_data_parallel(), init_fn
+ )
+
+ # Need to copy all RecursiveScriptModule methods to ScriptModule.
+ #
+ # This is because `super().foo()` does not use
+ # `__getattr__` to look up `foo`. So we need to make each method available on
+ # the ScriptModule manually.
+ for name, item in RecursiveScriptModule.__dict__.items():
+ if not callable(item) and not isinstance(item, property):
+ continue
+ if name.startswith("__") or hasattr(ScriptModule, name):
+ continue
+ # We can copy over the implementation wholesale because besides the
+ # `super()` thing above, ScriptModule behaves exactly like
+ # RecursiveScriptModule
+ setattr(ScriptModule, name, item)
+
+ def _get_methods(cls):
+ import inspect
+
+ # In Python 3 unbound methods are functions, but in Python 2 they are methods
+ return inspect.getmembers(
+ cls, predicate=lambda x: inspect.isfunction(x) or inspect.ismethod(x)
+ )
+
+ _compiled_methods_allowlist = {
+ "forward",
+ "register_buffer",
+ "register_parameter",
+ "register_module",
+ "add_module",
+ "_apply",
+ "apply",
+ "cuda",
+ "cpu",
+ "to",
+ "type",
+ "float",
+ "double",
+ "half",
+ "state_dict",
+ "_save_to_state_dict",
+ "load_state_dict",
+ "_load_from_state_dict",
+ "_named_members",
+ "parameters",
+ "named_parameters",
+ "buffers",
+ "named_buffers",
+ "children",
+ "named_children",
+ "modules",
+ "named_modules",
+ "zero_grad",
+ "share_memory",
+ "_get_name",
+ "extra_repr",
+ "_slow_forward",
+ "_tracing_name",
+ "eval",
+ "train",
+ "get_extra_state",
+ "set_extra_state",
+ }
+
+ def _make_fail(name):
+ def fail(self, *args, **kwargs):
+ raise RuntimeError(name + " is not supported on ScriptModules")
+
+ return fail
+
+ for name, method in _get_methods(torch.nn.Module):
+ if name.startswith("__") or name.endswith("_call_impl"):
+ continue
+ if (
+ name not in RecursiveScriptModule.__dict__
+ and name not in _compiled_methods_allowlist
+ ):
+ setattr(RecursiveScriptModule, method.__name__, _make_fail(name))
+
+
+else:
+ # TODO MAKE SURE THAT DISABLING WORKS
+ class RecursiveScriptClass: # type: ignore[no-redef]
+ pass
+
+ class ScriptModule(torch.nn.Module): # type: ignore[no-redef]
+ def __init__(self, arg=None):
+ super().__init__()
+
+ class RecursiveScriptModule(ScriptModule): # type: ignore[no-redef]
+ def __init__(self, arg=None):
+ super().__init__()
+
+
+def call_prepare_scriptable_func_impl(obj, memo):
+ if not isinstance(obj, torch.nn.Module):
+ return obj
+
+ obj_id = id(obj)
+
+ # If obj_id is in memo, obj has already been prepared or is being
+ # prepared in another call up the stack.
+ if obj_id in memo:
+ return memo[id(obj)]
+
+ obj = obj.__prepare_scriptable__() if hasattr(obj, "__prepare_scriptable__") else obj # type: ignore[operator]
+ # Record obj in memo to avoid infinite recursion in the case of cycles in the module
+ # hierarchy when recursing below.
+ memo[obj_id] = obj
+
+ new_obj_dict = {}
+
+ for name, sub_module in obj.__dict__.items():
+ if name == "_modules":
+ for k, v in sub_module.items():
+ sub_module[k] = call_prepare_scriptable_func_impl(v, memo)
+ new_obj_dict[name] = sub_module
+ elif isinstance(sub_module, torch.nn.Module) and not isinstance(
+ sub_module, ScriptModule
+ ):
+ new_obj_dict[name] = call_prepare_scriptable_func_impl(sub_module, memo)
+ else:
+ new_obj_dict[name] = sub_module
+
+ for k, v in new_obj_dict.items():
+ obj.__dict__[name] = v
+
+ return obj
+
+
+def call_prepare_scriptable_func(obj):
+ memo: Dict[int, torch.nn.Module] = {}
+ return call_prepare_scriptable_func_impl(obj, memo)
+
+
+def create_script_dict(obj):
+ """
+ Create a ``torch._C.ScriptDict`` instance with the data from ``obj``.
+
+ Args:
+ obj (dict): The Python dictionary that is used to initialize the ``ScriptDict``
+ returned by this function.
+
+ Returns:
+ An instance of ``torch._C.ScriptDict`` that has the same data as ``obj``
+ and can be passed between Python and TorchScript with reference semantics and
+ zero copy overhead.
+ """
+ return torch._C.ScriptDict(obj) # type: ignore[attr-defined]
+
+
+def create_script_list(obj, type_hint=None):
+ """
+ Create a ``torch._C.ScriptList`` instance with the data from ``obj``.
+
+ Args:
+ obj (dict): The Python list that is used to initialize the ``ScriptList``
+ returned by this function.
+ Returns:
+ An instance of ``torch._C.ScriptList`` that has the same data as ``obj``
+ and can be passed between Python and TorchScript with reference semantics and
+ zero copy overhead.
+ """
+ return torch._C.ScriptList(obj) # type: ignore[attr-defined]
+
+
+def script(
+ obj,
+ optimize=None,
+ _frames_up=0,
+ _rcb=None,
+ example_inputs: Union[List[Tuple], Dict[Callable, List[Tuple]], None] = None,
+):
+ r"""Script the function.
+
+ Scripting a function or ``nn.Module`` will inspect the source code, compile
+ it as TorchScript code using the TorchScript compiler, and return a :class:`ScriptModule` or
+ :class:`ScriptFunction`. TorchScript itself is a subset of the Python language, so not all
+ features in Python work, but we provide enough functionality to compute on
+ tensors and do control-dependent operations. For a complete guide, see the
+ :ref:`language-reference`.
+
+ Scripting a dictionary or list copies the data inside it into a TorchScript instance than can be
+ subsequently passed by reference between Python and TorchScript with zero copy overhead.
+
+ ``torch.jit.script`` can be used as a function for modules, functions, dictionaries and lists
+ and as a decorator ``@torch.jit.script`` for :ref:`torchscript-classes` and functions.
+
+ Args:
+ obj (Callable, class, or nn.Module): The ``nn.Module``, function, class type,
+ dictionary, or list to compile.
+ example_inputs (Union[List[Tuple], Dict[Callable, List[Tuple]], None]): Provide example inputs
+ to annotate the arguments for a function or ``nn.Module``.
+
+ Returns:
+ If ``obj`` is ``nn.Module``, ``script`` returns
+ a :class:`ScriptModule` object. The returned :class:`ScriptModule` will
+ have the same set of sub-modules and parameters as the
+ original ``nn.Module``. If ``obj`` is a standalone function,
+ a :class:`ScriptFunction` will be returned. If ``obj`` is a ``dict``, then
+ ``script`` returns an instance of `torch._C.ScriptDict`. If ``obj`` is a ``list``,
+ then ``script`` returns an instance of `torch._C.ScriptList`.
+
+ **Scripting a function**
+ The ``@torch.jit.script`` decorator will construct a :class:`ScriptFunction`
+ by compiling the body of the function.
+
+ Example (scripting a function):
+
+ .. testcode::
+
+ import torch
+
+ @torch.jit.script
+ def foo(x, y):
+ if x.max() > y.max():
+ r = x
+ else:
+ r = y
+ return r
+
+ print(type(foo)) # torch.jit.ScriptFunction
+
+ # See the compiled graph as Python code
+ print(foo.code)
+
+ # Call the function using the TorchScript interpreter
+ foo(torch.ones(2, 2), torch.ones(2, 2))
+
+ .. testoutput::
+ :hide:
+
+ ...
+
+ ****Scripting a function using example_inputs**
+ Example inputs can be used to annotate a function arguments.
+
+ Example (annotating a function before scripting):
+
+ .. testcode::
+
+ import torch
+
+ def test_sum(a, b):
+ return a + b
+
+ # Annotate the arguments to be int
+ scripted_fn = torch.jit.script(test_sum, example_inputs=[(3, 4)])
+
+ print(type(scripted_fn)) # torch.jit.ScriptFunction
+
+ # See the compiled graph as Python code
+ print(scripted_fn.code)
+
+ # Call the function using the TorchScript interpreter
+ scripted_fn(20, 100)
+
+ .. testoutput::
+ :hide:
+
+ ...
+
+ **Scripting an nn.Module**
+ Scripting an ``nn.Module`` by default will compile the ``forward`` method and recursively
+ compile any methods, submodules, and functions called by ``forward``. If a ``nn.Module`` only uses
+ features supported in TorchScript, no changes to the original module code should be necessary. ``script``
+ will construct :class:`ScriptModule` that has copies of the attributes, parameters, and methods of
+ the original module.
+
+ Example (scripting a simple module with a Parameter):
+
+ .. testcode::
+
+ import torch
+
+ class MyModule(torch.nn.Module):
+ def __init__(self, N, M):
+ super().__init__()
+ # This parameter will be copied to the new ScriptModule
+ self.weight = torch.nn.Parameter(torch.rand(N, M))
+
+ # When this submodule is used, it will be compiled
+ self.linear = torch.nn.Linear(N, M)
+
+ def forward(self, input):
+ output = self.weight.mv(input)
+
+ # This calls the `forward` method of the `nn.Linear` module, which will
+ # cause the `self.linear` submodule to be compiled to a `ScriptModule` here
+ output = self.linear(output)
+ return output
+
+ scripted_module = torch.jit.script(MyModule(2, 3))
+
+ Example (scripting a module with traced submodules):
+
+ .. testcode::
+
+ import torch
+ import torch.nn as nn
+ import torch.nn.functional as F
+
+ class MyModule(nn.Module):
+ def __init__(self):
+ super().__init__()
+ # torch.jit.trace produces a ScriptModule's conv1 and conv2
+ self.conv1 = torch.jit.trace(nn.Conv2d(1, 20, 5), torch.rand(1, 1, 16, 16))
+ self.conv2 = torch.jit.trace(nn.Conv2d(20, 20, 5), torch.rand(1, 20, 16, 16))
+
+ def forward(self, input):
+ input = F.relu(self.conv1(input))
+ input = F.relu(self.conv2(input))
+ return input
+
+ scripted_module = torch.jit.script(MyModule())
+
+ To compile a method other than ``forward`` (and recursively compile anything it calls), add
+ the :func:`@torch.jit.export ` decorator to the method. To opt out of compilation
+ use :func:`@torch.jit.ignore ` or :func:`@torch.jit.unused `.
+
+ Example (an exported and ignored method in a module)::
+
+ import torch
+ import torch.nn as nn
+
+ class MyModule(nn.Module):
+ def __init__(self):
+ super().__init__()
+
+ @torch.jit.export
+ def some_entry_point(self, input):
+ return input + 10
+
+ @torch.jit.ignore
+ def python_only_fn(self, input):
+ # This function won't be compiled, so any
+ # Python APIs can be used
+ import pdb
+ pdb.set_trace()
+
+ def forward(self, input):
+ if self.training:
+ self.python_only_fn(input)
+ return input * 99
+
+ scripted_module = torch.jit.script(MyModule())
+ print(scripted_module.some_entry_point(torch.randn(2, 2)))
+ print(scripted_module(torch.randn(2, 2)))
+
+ Example ( Annotating forward of nn.Module using example_inputs)::
+
+ import torch
+ import torch.nn as nn
+ from typing import NamedTuple
+
+ class MyModule(NamedTuple):
+ result: List[int]
+
+ class TestNNModule(torch.nn.Module):
+ def forward(self, a) -> MyModule:
+ result = MyModule(result=a)
+ return result
+
+ pdt_model = TestNNModule()
+
+ # Runs the pdt_model in eager model with the inputs provided and annotates the arguments of forward
+ scripted_model = torch.jit.script(pdt_model, example_inputs={pdt_model: [([10, 20, ], ), ], })
+
+ # Run the scripted_model with actual inputs
+ print(scripted_model([20]))
+ """
+ global type_trace_db
+ if not _enabled:
+ return obj
+
+ if optimize is not None:
+ warnings.warn(
+ "`optimize` is deprecated and has no effect. Use `with torch.jit.optimized_execution() instead"
+ )
+
+ # No-op for modules, functions, class instances that are already scripted
+ if isinstance(obj, RecursiveScriptClass):
+ return obj
+ if isinstance(obj, ScriptModule):
+ return obj
+ if isinstance(obj, ScriptFunction):
+ return obj
+
+ if example_inputs:
+ # If MonkeyType is installed, enable profile directed type annotation
+ # Check if example_inputs are defined and generate call traces
+ # for the method by running eager mode version of the method with
+ # the provide example inputs. This logs all the traces in type_trace_db
+ type_trace_db = JitTypeTraceStore()
+ if monkeytype_trace:
+ monkeytype_config = JitTypeTraceConfig(type_trace_db)
+ with monkeytype_trace(monkeytype_config):
+ if isinstance(example_inputs, Dict):
+ # If the obj is an nn.Module or a class, then each method is
+ # executed with the arguments provided in the example inputs.
+ # example inputs here will be of type Dict(class.method, (arguments))
+ # This is used to infer type annotations for those methods
+ # which are not called directly under the hood of monkeytype.
+ for module, example_input in example_inputs.items():
+ for example in example_input:
+ module(*example)
+ elif isinstance(example_inputs, List):
+ for examples in example_inputs:
+ obj(*examples)
+ else:
+ raise ValueError(
+ "Error: Unable to infer types. Please format the inputs to type `List[Tuple]`"
+ " or `Dict[Callable, List[Tuple]]` to be run with MonkeyType."
+ )
+ else:
+ warnings.warn(
+ "Warning: monkeytype is not installed. Please install https://github.com/Instagram/MonkeyType "
+ "to enable Profile-Directed Typing in TorchScript. Refer to "
+ "https://github.com/Instagram/MonkeyType/blob/master/README.rst to install MonkeyType. "
+ )
+
+ if isinstance(obj, torch.nn.Module):
+ obj = call_prepare_scriptable_func(obj)
+ return torch.jit._recursive.create_script_module(
+ obj, torch.jit._recursive.infer_methods_to_compile
+ )
+ else:
+ obj = obj.__prepare_scriptable__() if hasattr(obj, "__prepare_scriptable__") else obj # type: ignore[operator]
+
+ if isinstance(obj, dict):
+ return create_script_dict(obj)
+ if isinstance(obj, list):
+ return create_script_list(obj)
+
+ if inspect.isclass(obj):
+ qualified_name = _qualified_name(obj)
+ # If this type is a `nn.Module` subclass, they probably meant to pass
+ # an instance instead of a Module
+ if issubclass(obj, torch.nn.Module):
+ raise RuntimeError(
+ f"Type '{obj}' cannot be compiled since it inherits from nn.Module, pass an instance instead"
+ )
+
+ # Enums are automatically usable in TorchScript, explicitly scripting
+ # is not necessary, but not harmful either.
+ if issubclass(obj, enum.Enum):
+ return obj
+
+ if not _is_new_style_class(obj):
+ raise RuntimeError(
+ "TorchScript classes must be new-style classes. "
+ "Please inherit from 'object'."
+ )
+ if len(obj.mro()) > 2:
+ raise RuntimeError(
+ "TorchScript classes does not support inheritance yet. "
+ "Please directly inherit from 'object'."
+ )
+ if _rcb is None:
+ _rcb = _jit_internal.createResolutionCallbackFromFrame(_frames_up + 1)
+ _compile_and_register_class(obj, _rcb, qualified_name)
+ return obj
+ elif inspect.isfunction(obj) or inspect.ismethod(obj):
+ qualified_name = _qualified_name(obj)
+ # this is a decorated fn, and we need to the underlying fn and its rcb
+ if hasattr(obj, "__script_if_tracing_wrapper"):
+ obj = obj.__original_fn # type: ignore[union-attr]
+ _rcb = _jit_internal.createResolutionCallbackFromClosure(obj)
+
+ # some functions are explicitly marked as not supported in script mode
+ if hasattr(obj, "__script_unsupported"):
+ raise RuntimeError("TorchScript error: " + obj.__script_unsupported)
+
+ _check_directly_compile_overloaded(obj)
+ maybe_already_compiled_fn = _try_get_jit_cached_function(obj)
+ if maybe_already_compiled_fn:
+ return maybe_already_compiled_fn
+ ast = get_jit_def(obj, obj.__name__)
+ if _rcb is None:
+ _rcb = _jit_internal.createResolutionCallbackFromClosure(obj)
+ fn = torch._C._jit_script_compile(
+ qualified_name, ast, _rcb, get_default_args(obj)
+ )
+ # Forward docstrings
+ fn.__doc__ = obj.__doc__
+ # Allow torch.compile() to inline
+ fn._torchdynamo_inline = obj # type: ignore[attr-defined]
+ _set_jit_function_cache(obj, fn)
+ return fn
+ else:
+ return torch.jit._recursive.create_script_class(obj)
+
+
+# overloads are registered in _jit_internal and compiled here so that _overload
+# can be used in nn/functional.py without an import cycle
+
+
+def _check_overload_defaults(impl_defaults, overload_defaults, loc):
+ for name, overload_value in overload_defaults.items():
+ if name not in impl_defaults or impl_defaults[name] != overload_value:
+ raise torch.jit.frontend.FrontendError(
+ loc,
+ "Default parameters on overloads do not affect the runtime so they "
+ "must equal to the default parameter on the implementation function. Found on "
+ f"parameter {name}",
+ )
+
+
+def _compile_function_with_overload(overload_fn, qual_name, impl_fn):
+ overload_decl = get_jit_def(overload_fn, overload_fn.__name__).decl()
+ overload_signature = torch.jit.annotations.get_signature(
+ overload_fn, None, None, inspect.ismethod(overload_fn)
+ )
+ impl_ast = get_jit_def(impl_fn, impl_fn.__name__)
+ overload_defaults = get_default_args(overload_fn)
+ implementation_defaults = get_default_args(impl_fn)
+ _rcb = _jit_internal.createResolutionCallbackFromClosure(impl_fn)
+ _check_overload_defaults(
+ implementation_defaults, overload_defaults, overload_decl.range()
+ )
+ fn = torch._C._jit_script_compile_overload(
+ qual_name,
+ overload_decl,
+ impl_ast,
+ _rcb,
+ implementation_defaults,
+ overload_signature,
+ )
+ return fn
+
+
+def _get_overloads(obj):
+ # check for cached compiled fns
+ existing_compiled_fns = _try_get_jit_cached_overloads(obj)
+ qual_name = _qualified_name(obj)
+ uncompiled_overloads = _jit_internal._get_fn_overloads(qual_name)
+ if uncompiled_overloads is None:
+ return existing_compiled_fns
+
+ if obj in uncompiled_overloads:
+ raise RuntimeError(
+ _jit_internal.get_overload_no_implementation_error_message("function", obj)
+ )
+
+ compiled_fns = []
+ for overload_fn in uncompiled_overloads:
+ compiled_fns.append(
+ _compile_function_with_overload(overload_fn, qual_name, obj)
+ )
+
+ if existing_compiled_fns:
+ compiled_fns = existing_compiled_fns + compiled_fns
+
+ # cache compilation, remove information stored to do compilation
+ _set_jit_overload_cache(obj, compiled_fns)
+ _jit_internal._clear_fn_overloads(qual_name)
+ return compiled_fns
+
+
+def _check_directly_compile_overloaded(obj):
+ qual_name = _qualified_name(obj)
+ if _jit_internal._get_fn_overloads(qual_name) or _try_get_jit_cached_overloads(obj):
+ raise RuntimeError(
+ f"Function {qual_name} cannot be directly compiled because it"
+ " is overloaded. It must be used in a context of a function"
+ " where its inputs can determine which overload to call."
+ )
+
+
+def interface(obj):
+ r"""Decorate to annotate classes or modules of different types.
+
+ This decorator can be used to define an interface that can be used to annotate
+ classes or modules of different types. This can be used for to annotate a submodule
+ or attribute class that could have different types that implement the same
+ interface, or which could be swapped at runtime; or to store a list of modules or
+ classes of varying types.
+
+ It is sometimes used to implement "Callables" - functions or modules that implement
+ an interface but whose implementations differ and which can be swapped out.
+
+ Example:
+ .. testcode::
+
+ import torch
+ from typing import List
+
+ @torch.jit.interface
+ class InterfaceType:
+ def run(self, x: torch.Tensor) -> torch.Tensor:
+ pass
+
+ # implements InterfaceType
+ @torch.jit.script
+ class Impl1:
+ def run(self, x: torch.Tensor) -> torch.Tensor:
+ return x.relu()
+
+ class Impl2(torch.nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.val = torch.rand(())
+
+ @torch.jit.export
+ def run(self, x: torch.Tensor) -> torch.Tensor:
+ return x + self.val
+
+ def user_fn(impls: List[InterfaceType], idx: int, val: torch.Tensor) -> torch.Tensor:
+ return impls[idx].run(val)
+
+ user_fn_jit = torch.jit.script(user_fn)
+
+ impls = [Impl1(), torch.jit.script(Impl2())]
+ val = torch.rand(4, 4)
+ user_fn_jit(impls, 0, val)
+ user_fn_jit(impls, 1, val)
+ """
+ if not inspect.isclass(obj):
+ raise RuntimeError("interface must be applied to a class")
+ if not _is_new_style_class(obj):
+ raise RuntimeError("TorchScript interfaces must inherit from 'object'")
+
+ # Expected MRO is:
+ # User module
+ # torch.nn.modules.module.Module
+ # object
+ is_module_interface = issubclass(obj, torch.nn.Module) and len(obj.mro()) == 3
+
+ if not is_module_interface and len(obj.mro()) > 2:
+ raise RuntimeError(
+ "TorchScript interface does not support inheritance yet. "
+ "Please directly inherit from 'object' or 'nn.Module'."
+ )
+
+ qualified_name = _qualified_name(obj)
+ rcb = _jit_internal.createResolutionCallbackFromFrame(1)
+ # if this type is a `nn.Module` subclass, generate a module interface type
+ # instead of a class interface type; a module interface type only compiles
+ # the user provided methods as part of the interface
+ ast = get_jit_class_def(obj, obj.__name__)
+ mangled_classname = torch._C._jit_script_interface_compile(
+ qualified_name, ast, rcb, is_module_interface
+ )
+ obj.__torch_script_interface__ = mangled_classname
+ return obj
+
+
+def _recursive_compile_class(obj, loc):
+ _qual_name = _qualified_name(obj)
+ # We're starting a new compilation, so update the error call stack in
+ # case it fails
+ error_stack = torch._C.CallStack(_qual_name, loc)
+ rcb = _jit_internal.createResolutionCallbackForClassMethods(obj)
+ return _compile_and_register_class(obj, rcb, _qual_name)
+
+
+CompilationUnit = torch._C.CompilationUnit
+set_module(CompilationUnit, "torch.jit")
+
+
+def pad(s: str, padding: int, offset: int = 0, char: str = " "):
+ if padding >= len(s):
+ padding -= len(s)
+ return "".join([char for _ in range(padding + offset)]) + s
+
+
+class _ScriptProfileColumn:
+ def __init__(self, header: str, alignment: int = 4, offset: int = 0):
+ self.header = header
+ self.alignment = alignment
+ self.offset = offset
+ self.rows: Dict[int, Any] = {}
+
+ def add_row(self, lineno: int, value: Any):
+ self.rows[lineno] = value
+
+ def materialize(self):
+ max_length = len(self.header)
+ rows: List[Tuple[int, str]] = []
+ for key, value in self.rows.items():
+ cell = str(value)
+ rows.append((key, cell))
+ max_length = max(len(cell), max_length)
+
+ if self.alignment > 0:
+ padding = max_length + self.alignment
+ padding -= padding % self.alignment
+ else:
+ padding = 0
+
+ rows = [(key, pad(cell, padding, self.offset)) for key, cell in rows]
+ return pad(self.header, padding, self.offset), rows
+
+
+class _ScriptProfileTable:
+ def __init__(self, cols: List[_ScriptProfileColumn], source_range: List[int]):
+ self.cols = cols
+ self.source_range = source_range
+
+ def dump_string(self):
+ outputs: List[str] = []
+ cells: List[Tuple[str, Dict[int, str]]] = []
+ header_buffer = ""
+ for col in self.cols:
+ header, rows = col.materialize()
+ header_buffer += header
+ cells.append((header, dict(rows)))
+
+ outputs.append(header_buffer)
+ outputs.append(pad("", len(header_buffer), 0, "="))
+ for line in self.source_range:
+ row_buffer = ""
+ for header, rows in cells:
+ cell = rows.get(line)
+ if cell is None:
+ row_buffer += pad("", len(header))
+ else:
+ row_buffer += cell
+ outputs.append(row_buffer)
+ return "\n".join(outputs)
+
+
+class _ScriptProfile:
+ def __init__(self):
+ self.profile = classes.profiling._ScriptProfile()
+
+ def enable(self):
+ self.profile.enable()
+
+ def disable(self):
+ self.profile.disable()
+
+ def dump_string(self) -> str:
+ outputs: List[str] = []
+ for source_stats in self.profile._dump_stats():
+ source_ref = source_stats.source()
+ source_lines = source_ref.text().splitlines()
+ dedent = min([len(line) - len(line.lstrip(" ")) for line in source_lines])
+ source_lines = [line[dedent:] for line in source_lines]
+
+ start_line = source_ref.starting_lineno()
+ end_line = start_line + len(source_lines)
+ source_range = range(start_line, end_line)
+ lineno = _ScriptProfileColumn("Line #")
+ hits = _ScriptProfileColumn("Hits")
+ time_ns = _ScriptProfileColumn("Time (ns)")
+ line_contents = _ScriptProfileColumn("Line Contents", 0, 1)
+ stats = source_stats.line_map()
+ for line in source_range:
+ lineno.add_row(line, line)
+ line_contents.add_row(line, source_lines[line - start_line])
+ stat = stats.get(line)
+ if stat is not None:
+ hits.add_row(line, stat.count())
+ time_ns.add_row(line, stat.duration_ns())
+
+ table = _ScriptProfileTable(
+ [lineno, hits, time_ns, line_contents], list(source_range)
+ )
+ outputs.append(table.dump_string())
+ return "\n\n".join(outputs)
+
+ def dump(self):
+ print(self.dump_string())
+
+
+def _unwrap_optional(x):
+ assert x is not None, "Unwrapping null optional"
+ return x
+
+
+_register_builtin(_unwrap_optional, "aten::_unwrap_optional")
+_register_builtin(_jit_internal.is_scripting, "aten::is_scripting")
+_register_builtin(has_torch_function, "aten::has_torch_function")
+_register_builtin(has_torch_function_unary, "aten::has_torch_function")
+_register_builtin(has_torch_function_variadic, "aten::has_torch_function")
diff --git a/venv/lib/python3.10/site-packages/torch/jit/_serialization.py b/venv/lib/python3.10/site-packages/torch/jit/_serialization.py
new file mode 100644
index 0000000000000000000000000000000000000000..00b9254a263c2b0cf1dafcb94be034a46edf58bc
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/torch/jit/_serialization.py
@@ -0,0 +1,266 @@
+"""Serialization.
+
+This module contains functionality for serializing TorchScript modules, notably:
+ * torch.jit.save
+ * torch.jit.load
+
+This is not intended to be imported directly; please use the exposed
+functionalities in `torch.jit`.
+"""
+import os
+
+import torch
+from torch.jit._recursive import wrap_cpp_module
+from torch.serialization import validate_cuda_device
+
+
+def save(m, f, _extra_files=None):
+ r"""
+ Save an offline version of this module for use in a separate process.
+
+ The saved module serializes all of the methods, submodules, parameters, and
+ attributes of this module. It can be loaded into the C++ API using
+ ``torch::jit::load(filename)`` or into the Python API with
+ :func:`torch.jit.load `.
+
+ To be able to save a module, it must not make any calls to native Python
+ functions. This means that all submodules must be subclasses of
+ :class:`ScriptModule` as well.
+
+ .. DANGER::
+ All modules, no matter their device, are always loaded onto the CPU
+ during loading. This is different from :func:`torch.load`'s semantics
+ and may change in the future.
+
+ Args:
+ m: A :class:`ScriptModule` to save.
+ f: A file-like object (has to implement write and flush) or a string
+ containing a file name.
+ _extra_files: Map from filename to contents which will be stored as part of `f`.
+
+ .. note::
+ torch.jit.save attempts to preserve the behavior of some operators
+ across versions. For example, dividing two integer tensors in
+ PyTorch 1.5 performed floor division, and if the module
+ containing that code is saved in PyTorch 1.5 and loaded in PyTorch 1.6
+ its division behavior will be preserved. The same module saved in
+ PyTorch 1.6 will fail to load in PyTorch 1.5, however, since the
+ behavior of division changed in 1.6, and 1.5 does not know how to
+ replicate the 1.6 behavior.
+
+ Example:
+ .. testcode::
+
+ import torch
+ import io
+
+ class MyModule(torch.nn.Module):
+ def forward(self, x):
+ return x + 10
+
+ m = torch.jit.script(MyModule())
+
+ # Save to file
+ torch.jit.save(m, 'scriptmodule.pt')
+ # This line is equivalent to the previous
+ m.save("scriptmodule.pt")
+
+ # Save to io.BytesIO buffer
+ buffer = io.BytesIO()
+ torch.jit.save(m, buffer)
+
+ # Save with extra files
+ extra_files = {'foo.txt': b'bar'}
+ torch.jit.save(m, 'scriptmodule.pt', _extra_files=extra_files)
+ """
+ if _extra_files is None:
+ _extra_files = {}
+ if isinstance(f, (str, os.PathLike)):
+ m.save(f, _extra_files=_extra_files)
+ else:
+ ret = m.save_to_buffer(_extra_files=_extra_files)
+ f.write(ret)
+
+
+def load(f, map_location=None, _extra_files=None, _restore_shapes=False):
+ r"""
+ Load a :class:`ScriptModule` or :class:`ScriptFunction` previously saved with :func:`torch.jit.save `.
+
+ All previously saved modules, no matter their device, are first loaded onto CPU,
+ and then are moved to the devices they were saved from. If this fails (e.g.
+ because the run time system doesn't have certain devices), an exception is
+ raised.
+
+ Args:
+ f: a file-like object (has to implement read, readline, tell, and seek),
+ or a string containing a file name
+ map_location (string or torch.device): A simplified version of
+ ``map_location`` in `torch.jit.save` used to dynamically remap
+ storages to an alternative set of devices.
+ _extra_files (dictionary of filename to content): The extra
+ filenames given in the map would be loaded and their content
+ would be stored in the provided map.
+ _restore_shapes (bool): Whether or not to retrace the module on load using stored inputs
+
+ Returns:
+ A :class:`ScriptModule` object.
+
+ Example:
+ .. testcode::
+
+ import torch
+ import io
+
+ torch.jit.load('scriptmodule.pt')
+
+ # Load ScriptModule from io.BytesIO object
+ with open('scriptmodule.pt', 'rb') as f:
+ buffer = io.BytesIO(f.read())
+
+ # Load all tensors to the original device
+ torch.jit.load(buffer)
+
+ # Load all tensors onto CPU, using a device
+ buffer.seek(0)
+ torch.jit.load(buffer, map_location=torch.device('cpu'))
+
+ # Load all tensors onto CPU, using a string
+ buffer.seek(0)
+ torch.jit.load(buffer, map_location='cpu')
+
+ # Load with extra files.
+ extra_files = {'foo.txt': ''} # values will be replaced with data
+ torch.jit.load('scriptmodule.pt', _extra_files=extra_files)
+ print(extra_files['foo.txt'])
+
+ .. testoutput::
+ :hide:
+
+ ...
+
+ .. testcleanup::
+
+ import os
+ os.remove("scriptmodule.pt")
+ """
+ if isinstance(f, (str, os.PathLike)):
+ if not os.path.exists(f): # type: ignore[type-var]
+ raise ValueError(f"The provided filename {f} does not exist") # type: ignore[str-bytes-safe]
+ if os.path.isdir(f):
+ raise ValueError(f"The provided filename {f} is a directory") # type: ignore[str-bytes-safe]
+
+ map_location = validate_map_location(map_location)
+ if _extra_files is None:
+ _extra_files = {}
+
+ cu = torch._C.CompilationUnit()
+ if isinstance(f, (str, os.PathLike)):
+ cpp_module = torch._C.import_ir_module(cu, os.fspath(f), map_location, _extra_files, _restore_shapes) # type: ignore[call-arg]
+ else:
+ cpp_module = torch._C.import_ir_module_from_buffer(
+ cu, f.read(), map_location, _extra_files, _restore_shapes
+ ) # type: ignore[call-arg]
+
+ # TODO: Pretty sure this approach loses ConstSequential status and such
+ return wrap_cpp_module(cpp_module)
+
+
+def validate_map_location(map_location=None):
+ if isinstance(map_location, str):
+ map_location = torch.device(map_location)
+ elif not (map_location is None or isinstance(map_location, torch.device)):
+ raise ValueError(
+ "map_location should be either None, string or torch.device, "
+ "but got type: " + str(type(map_location))
+ )
+
+ if str(map_location).startswith("cuda"):
+ validate_cuda_device(map_location)
+
+ return map_location
+
+
+def jit_module_from_flatbuffer(f):
+ if isinstance(f, (str, os.PathLike)):
+ f = os.fspath(f)
+ return wrap_cpp_module(torch._C._load_jit_module_from_file(f))
+ else:
+ return wrap_cpp_module(torch._C._load_jit_module_from_bytes(f.read()))
+
+
+def save_jit_module_to_flatbuffer(m, f, _extra_files=None):
+ r"""
+ Save an offline version of this module for use in a separate process.
+
+ The saved module serializes all of the methods, submodules, parameters, and
+ attributes of this module. It can be loaded into the C++ API using
+ ``torch::jit::load_jit_module_from_file(filename)`` or into the Python API with
+ :func:`torch.jit.jit_module_from_flatbuffer`.
+
+ To be able to save a module, it must not make any calls to native Python
+ functions. This means that all submodules must be subclasses of
+ :class:`ScriptModule` as well.
+
+ .. DANGER::
+ All modules, no matter their device, are always loaded onto the CPU
+ during loading. This is different from :func:`torch.load`'s semantics
+ and may change in the future.
+
+ Args:
+ m: A :class:`ScriptModule` to save.
+ f: A string for file path
+
+
+ Example:
+ .. testcode::
+
+ import torch
+ import io
+
+ class MyModule(torch.nn.Module):
+ def forward(self, x):
+ return x + 10
+
+ m = torch.jit.script(MyModule())
+
+ # Save to file
+ torch.jit.save_jit_module_to_flatbuffer(m, 'scriptmodule.ff')
+ """
+ extra_files = _extra_files
+ if extra_files is None:
+ extra_files = {}
+
+ if isinstance(f, (str, os.PathLike)):
+ f = os.fspath(f)
+ torch._C._save_jit_module(m._c, f, extra_files)
+ else:
+ s = torch._C._save_jit_module_to_bytes(m._c, extra_files)
+ f.write(s)
+
+
+def get_flatbuffer_module_info(path_or_file):
+ r"""Get some information regarding a model file in flatbuffer format.
+
+ Args:
+ path_or_file: Either str, Path or file like object (BytesIO OK).
+ If it's str or Path, we will read the file referenced by that
+ path as Bytes.
+
+ Returns:
+ A dict with metadata on what that file contains, currently looks like
+ this:
+ {
+ 'bytecode_version': 4, # int
+ 'operator_version': 4, # int
+ 'function_names': {
+ '__torch__.___torch_mangle_0.Foo.forward'}, # set
+ 'type_names': set(), # set
+ 'opname_to_num_args': {'aten::linear': 3} # Dict[str, int]
+ }
+ """
+ if isinstance(path_or_file, (str, os.PathLike)):
+ with open(path_or_file, "rb") as f:
+ all_bytes = f.read()
+ else:
+ all_bytes = path_or_file.read()
+ return torch._C._get_module_info_from_flatbuffer(all_bytes)
diff --git a/venv/lib/python3.10/site-packages/torch/jit/_shape_functions.py b/venv/lib/python3.10/site-packages/torch/jit/_shape_functions.py
new file mode 100644
index 0000000000000000000000000000000000000000..51515039866da2f5401b4edeb66a2d35ad24f94b
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/torch/jit/_shape_functions.py
@@ -0,0 +1,1459 @@
+import math
+from typing import Any, Callable, Dict, List, Optional, Tuple, Union
+
+number = Union[int, float]
+# flake8: noqa
+
+###
+# There are generated files that depend on this file
+# To re-generate, please run from the root of the repo:
+# python torchgen/shape_functions/gen_jit_shape_functions.py
+
+# How to test:
+# After regenerating files, compile PyTorch.
+# Then run: ./build/bin/test_jit --gtest_filter=TestShapeGraphLinting.Basic
+# If you have enabled opinfo testing for the op, also run:
+# python test/test_ops_jit.py TestJitCPU.test_variant_consistency_jit_[FAILING_OP]_cpu_float32
+# to reproduce errors from opinfo tests.
+
+# Example PR: https://github.com/pytorch/pytorch/pull/80860/files
+####
+
+import torch
+
+
+def broadcast(a: List[int], b: List[int]):
+ dimsA = len(a)
+ dimsB = len(b)
+ ndim = max(dimsA, dimsB)
+ expandedSizes: List[int] = []
+
+ for i in range(ndim):
+ offset = ndim - 1 - i
+ dimA = dimsA - 1 - offset
+ dimB = dimsB - 1 - offset
+ sizeA = a[dimA] if (dimA >= 0) else 1
+ sizeB = b[dimB] if (dimB >= 0) else 1
+
+ if sizeA != sizeB and sizeA != 1 and sizeB != 1:
+ # TODO: only assertion error is bound in C++ compilation right now
+ raise AssertionError(
+ f"The size of tensor a {sizeA} must match the size of tensor b ({sizeB}) at non-singleton dimension {i}"
+ )
+
+ expandedSizes.append(sizeB if sizeA == 1 else sizeA)
+
+ return expandedSizes
+
+
+def broadcast_three(a: List[int], b: List[int], c: List[int]):
+ return broadcast(broadcast(a, b), c)
+
+
+def broadcast_one_three(a: List[int], b: Any, c: List[int]):
+ return broadcast(a, c)
+
+
+def adaptive_avg_pool2d(self: List[int], out: List[int]):
+ assert len(out) == 2
+ assert len(self) == 3 or len(self) == 4
+ for i in range(1, len(self)):
+ assert self[i] != 0
+
+ shape: List[int] = []
+ for i in range(0, len(self) - 2):
+ shape.append(self[i])
+ for elem in out:
+ shape.append(elem)
+ return shape
+
+
+def _copy(self: List[int]):
+ out: List[int] = []
+ for elem in self:
+ out.append(elem)
+ return out
+
+
+def unary(self: List[int]):
+ return _copy(self)
+
+
+def broadcast_inplace(a: List[int], b: List[int]):
+ dimsA = len(a)
+ dimsB = len(b)
+ if dimsB > dimsA:
+ raise AssertionError(
+ f"The dims of tensor b ({dimsB}) must be less than or equal tothe dims of tensor a ({dimsA}) "
+ )
+ for dimA in range(dimsA):
+ dimB = dimsB - dimsA + dimA
+ sizeA = a[dimA]
+ sizeB = b[dimB] if (dimB >= 0) else 1
+ if sizeA != sizeB and sizeB != 1:
+ # TODO: only assertion error is bound in C++ compilation right now
+ raise AssertionError(
+ "The size of tensor a {} must match the size of tensor b ("
+ "{}) at non-singleton dimension {}".format(sizeA, sizeB, dimA)
+ )
+ return _copy(a)
+
+
+def expand(self: List[int], sizes: List[int]):
+ assert len(sizes) >= len(self)
+ ndim = len(sizes)
+ tensor_dim = len(self)
+ if ndim == 0:
+ return _copy(sizes)
+ out: List[int] = []
+ for i in range(ndim):
+ offset = ndim - 1 - i
+ dim = tensor_dim - 1 - offset
+ size = self[dim] if dim >= 0 else 1
+ targetSize = sizes[i]
+ if targetSize == -1:
+ assert dim >= 0
+ targetSize = size
+ if size != targetSize:
+ assert size == 1
+ size = targetSize
+ out.append(size)
+ return out
+
+
+def expand_one_unused(self: List[int], sizes: List[int], inp0: Any):
+ return expand(self, sizes)
+
+
+def infer_size_impl(shape: List[int], numel: int) -> List[int]:
+ newsize = 1
+ infer_dim: Optional[int] = None
+ for dim in range(len(shape)):
+ if shape[dim] == -1:
+ if infer_dim is not None:
+ raise AssertionError("only one dimension can be inferred")
+ infer_dim = dim
+ elif shape[dim] >= 0:
+ newsize *= shape[dim]
+ else:
+ raise AssertionError("invalid shape dimensions")
+ if not (
+ numel == newsize
+ or (infer_dim is not None and newsize > 0 and numel % newsize == 0)
+ ):
+ raise AssertionError("invalid shape")
+ out = _copy(shape)
+ if infer_dim is not None:
+ out[infer_dim] = numel // newsize
+ return out
+
+
+def numel(sizes: List[int]):
+ numel = 1
+ for elem in sizes:
+ numel *= elem
+ return numel
+
+
+def view(self: List[int], sizes: List[int]):
+ return infer_size_impl(sizes, numel(self))
+
+
+def view_one_unused(self: List[int], sizes: List[int], *, implicit: bool = False):
+ return view(self, sizes)
+
+
+def sum_mean_dim(
+ self: List[int], opt_dims: Optional[List[int]], keep_dim: bool, dt: Any
+):
+ out: List[int] = []
+ if opt_dims is None or len(opt_dims) == 0:
+ dims: List[int] = list(range(len(self)))
+ else:
+ dims = opt_dims
+
+ for idx in range(len(self)):
+ is_mean_dim: bool = False
+ for reduce_dim in dims:
+ if idx == maybe_wrap_dim(reduce_dim, len(self)):
+ is_mean_dim = True
+ if is_mean_dim:
+ if keep_dim:
+ out.append(1)
+ else:
+ out.append(self[idx])
+ return out
+
+
+def max_dim(self: List[int], dim: int, keep_dim: bool):
+ out = sum_mean_dim(self, [dim], keep_dim, None)
+ return out, out
+
+
+# note: python already rounds down towards negative infinity on integer division, special arithmetic not needed
+def div_rtn(x: int, y: int):
+ return x // y
+
+
+def pooling_output_shape_pad_lr(
+ inputSize: int,
+ kernelSize: int,
+ pad_l: int,
+ pad_r: int,
+ stride: int,
+ dilation: int,
+ ceil_mode: bool,
+):
+ outputSize = (
+ div_rtn(
+ inputSize
+ + pad_l
+ + pad_r
+ - dilation * (kernelSize - 1)
+ - 1
+ + (stride - 1 if ceil_mode else 0),
+ stride,
+ )
+ + 1
+ )
+ if ceil_mode:
+ if (outputSize - 1) * stride >= inputSize + pad_l:
+ outputSize = outputSize - 1
+ return outputSize
+
+
+def pooling_output_shape(
+ inputSize: int,
+ kernelSize: int,
+ pad_l: int,
+ stride: int,
+ dilation: int,
+ ceil_mode: bool,
+):
+ assert stride != 0, "stride should not be zeero"
+ return pooling_output_shape_pad_lr(
+ inputSize, kernelSize, pad_l, pad_l, stride, dilation, ceil_mode
+ )
+
+
+def pool2d_shape_check(
+ input: List[int],
+ kH: int,
+ kW: int,
+ dH: int,
+ dW: int,
+ padH: int,
+ padW: int,
+ dilationH: int,
+ dilationW: int,
+ nInputPlane: int,
+ inputHeight: int,
+ inputWidth: int,
+ outputHeight: int,
+ outputWidth: int,
+):
+ ndim = len(input)
+ nOutputPlane = nInputPlane
+
+ assert kW > 0 and kH > 0
+ assert dW > 0 and dH > 0
+ assert dilationH > 0 and dilationW > 0
+
+ valid_dims = input[1] != 0 and input[2] != 0
+ assert (
+ ndim == 3
+ and input[0] != 0
+ and valid_dims
+ or (ndim == 4 and valid_dims and input[3] != 0)
+ )
+
+ assert kW // 2 >= padW and kH // 2 >= padH
+ assert outputWidth >= 1 and outputHeight >= 1
+
+
+def max_pool2d(
+ input: List[int],
+ kernel_size: List[int],
+ stride: List[int],
+ padding: List[int],
+ dilation: List[int],
+ ceil_mode: bool,
+):
+ assert (
+ len(kernel_size) == 1 or len(kernel_size) == 2
+ ), "max_pool2d: kernel_size must either be a single int, or a tuple of two ints"
+ kH = kernel_size[0]
+ kW = kH if len(kernel_size) == 1 else kernel_size[1]
+
+ assert (
+ len(stride) == 0 or len(stride) == 1 or len(stride) == 2
+ ), "max_pool2d: stride must either be omitted, a single int, or a tuple of two ints"
+ dH = kH if len(stride) == 0 else stride[0]
+ if len(stride) == 0:
+ dW = kW
+ elif len(stride) == 1:
+ dW = dH
+ else:
+ dW = stride[1]
+
+ assert (
+ len(padding) == 1 or len(padding) == 2
+ ), "max_pool2d: padding must either be a single int, or a tuple of two ints"
+ padH = padding[0]
+ padW = padH if len(padding) == 1 else padding[1]
+
+ assert (
+ len(dilation) == 1 or len(dilation) == 2
+ ), "max_pool2d: dilation must be either a single int, or a tuple of two ints"
+ dilationH = dilation[0]
+ dilationW = dilationH if len(dilation) == 1 else dilation[1]
+
+ assert len(input) == 3 or len(input) == 4
+
+ nbatch = input[-4] if len(input) == 4 else 1
+ nInputPlane = input[-3]
+ inputHeight = input[-2]
+ inputWidth = input[-1]
+
+ outputHeight = pooling_output_shape(inputHeight, kH, padH, dH, dilationH, ceil_mode)
+ outputWidth = pooling_output_shape(inputWidth, kW, padW, dW, dilationW, ceil_mode)
+
+ pool2d_shape_check(
+ input,
+ kH,
+ kW,
+ dH,
+ dW,
+ padH,
+ padW,
+ dilationH,
+ dilationW,
+ nInputPlane,
+ inputHeight,
+ inputWidth,
+ outputHeight,
+ outputWidth,
+ )
+
+ if len(input) == 3:
+ return [nInputPlane, outputHeight, outputWidth]
+ else:
+ return [nbatch, nInputPlane, outputHeight, outputWidth]
+
+
+def max_pool2d_with_indices(
+ input: List[int],
+ kernel_size: List[int],
+ stride: List[int],
+ padding: List[int],
+ dilation: List[int],
+ ceil_mode: bool,
+):
+ out = max_pool2d(input, kernel_size, stride, padding, dilation, ceil_mode)
+ return (out, out)
+
+
+def upsample_nearest2d(
+ input: List[int],
+ output_size: Optional[List[int]],
+ scale_factors: Optional[List[float]],
+):
+ out: List[int] = []
+ out.append(input[0])
+ out.append(input[1])
+
+ if scale_factors is None and output_size is None:
+ assert 0, "Either output_size or scale_factors must be presented"
+
+ if output_size is not None:
+ assert (
+ scale_factors is None
+ ), "Must specify exactly one of output_size and scale_factors"
+ assert len(output_size) == 2
+ out.append(output_size[0])
+ out.append(output_size[1])
+
+ if scale_factors is not None:
+ assert (
+ output_size is None
+ ), "Must specify exactly one of output_size and scale_factors"
+ assert len(scale_factors) == 2
+ out.append(int(input[2] * scale_factors[0]))
+ out.append(int(input[3] * scale_factors[1]))
+
+ return out
+
+
+def mm(self: List[int], mat2: List[int]):
+ assert len(self) == 2, "self must be a matrix"
+ assert len(mat2) == 2, "mat2 must be a matrix"
+
+ assert self[1] == mat2[0]
+ return [self[0], mat2[1]]
+
+
+def dot(self: List[int], tensor: List[int]):
+ assert len(self) == 1 and len(tensor) == 1
+ assert self[0] == tensor[0]
+ out: List[int] = []
+ return out
+
+
+def mv(self: List[int], vec: List[int]):
+ assert len(self) == 2 and len(vec) == 1
+ assert self[1] == vec[0]
+ # TODO: return self
+ return [self[0]]
+
+
+def unsqueeze(li: List[int], dim: int):
+ dim = maybe_wrap_dim(dim, len(li) + 1)
+ out = _copy(li)
+ out.insert(dim, 1)
+ return out
+
+
+def squeeze_nodim(li: List[int]):
+ out: List[int] = []
+ for i in range(len(li)):
+ if li[i] != 1:
+ out.append(li[i])
+ return out
+
+
+def squeeze(li: List[int], dim: int):
+ out: List[int] = []
+ wrapped_dim = maybe_wrap_dim(dim, len(li))
+ for i in range(len(li)):
+ if i == wrapped_dim:
+ if li[i] != 1:
+ out.append(li[i])
+ else:
+ out.append(li[i])
+ return out
+
+
+def squeeze_dims(li: List[int], dims: List[int]):
+ if len(dims) == 0:
+ return li
+ wrapped_dims = _copy(dims)
+ for i in range(len(dims)):
+ wrapped_dims[i] = maybe_wrap_dim(wrapped_dims[i], len(li))
+ result: List[int] = []
+ for i in range(len(li)):
+ if li[i] == 1:
+ if i not in wrapped_dims:
+ result.append(li[i])
+ else:
+ result.append(li[i])
+ return result
+
+
+def index_select(self: List[int], dim: int, index: List[int]):
+ dim = maybe_wrap_dim(dim, len(self))
+ numel = multiply_integers(index)
+ assert len(index) <= 1
+ assert dim == 0 or dim < len(self)
+ result_size: List[int] = []
+ for i in range(len(self)):
+ if dim == i:
+ result_size.append(numel)
+ else:
+ result_size.append(self[i])
+ return result_size
+
+
+def embedding(
+ weight: List[int],
+ indices: List[int],
+ padding_idx: int = -1,
+ scale_grad_by_freq: bool = False,
+ sparse: bool = False,
+):
+ assert len(weight) == 2
+ if len(indices) == 1:
+ return index_select(weight, 0, indices)
+ size = _copy(indices)
+ size.append(weight[1])
+ return size
+
+
+def max_int():
+ return 9223372036854775807
+
+
+def slice(
+ self: List[int], dim: int, start: Optional[int], end: Optional[int], step: int
+):
+ ndim = len(self)
+ assert ndim != 0
+ dim = maybe_wrap_dim(dim, ndim)
+ start_val = start if start is not None else 0
+ end_val = end if end is not None else max_int()
+ assert step > 0
+ if start_val == max_int():
+ start_val = 0
+ if start_val < 0:
+ start_val += self[dim]
+ if end_val < 0:
+ end_val += self[dim]
+ if start_val < 0:
+ start_val = 0
+ elif start_val > self[dim]:
+ start_val = self[dim]
+ if end_val < start_val:
+ end_val = start_val
+ elif end_val >= self[dim]:
+ end_val = self[dim]
+ slice_len = end_val - start_val
+ out = _copy(self)
+ out[dim] = (slice_len + step - 1) // step
+ return out
+
+
+def check_cat_no_zero_dim(tensors: List[List[int]]):
+ for tensor in tensors:
+ assert len(tensor) > 0
+
+
+def legacy_cat_wrap_dim(dim: int, tensor_sizes: List[List[int]]):
+ out_dim: Optional[int] = None
+ for size in tensor_sizes:
+ if not (len(size) == 1 and size[0] == 0):
+ if out_dim is None:
+ out_dim = maybe_wrap_dim(dim, len(size))
+ if out_dim is None:
+ out_dim = dim
+ return out_dim
+
+
+def should_skip(tensor: List[int]):
+ return numel(tensor) == 0 and len(tensor) == 1
+
+
+def check_cat_shape_except_dim(
+ first: List[int], second: List[int], dimension: int, index: int
+):
+ first_dims = len(first)
+ second_dims = len(second)
+ assert first_dims == second_dims, "Tensors must have same number of dimensions"
+ for dim in range(0, first_dims):
+ if dim != dimension:
+ assert (
+ first[dim] == second[dim]
+ ), "Sizes of tensors must match except in dimension"
+
+
+def cat(tensors: List[List[int]], dim: int):
+ check_cat_no_zero_dim(tensors)
+ dim = legacy_cat_wrap_dim(dim, tensors)
+ assert len(tensors) > 0
+ not_skipped_tensor: Optional[List[int]] = None
+ for tensor in tensors:
+ if not should_skip(tensor):
+ not_skipped_tensor = tensor
+ if not_skipped_tensor is None:
+ return [0]
+
+ cat_dim_size = 0
+
+ for i in range(len(tensors)):
+ tensor = tensors[i]
+ if not should_skip(tensor):
+ check_cat_shape_except_dim(not_skipped_tensor, tensor, dim, i)
+ cat_dim_size = cat_dim_size + tensor[dim]
+
+ result_size = _copy(not_skipped_tensor)
+ result_size[dim] = cat_dim_size
+ return result_size
+
+
+def stack(tensors: List[List[int]], dim: int):
+ unsqueezed_tensors: List[List[int]] = []
+ for tensor in tensors:
+ unsqueezed = unsqueeze(tensor, dim)
+ unsqueezed_tensors.append(unsqueezed)
+ return cat(unsqueezed_tensors, dim)
+
+
+def select(self: List[int], dim: int, index: int):
+ ndim = len(self)
+ assert ndim != 0
+ dim = maybe_wrap_dim(dim, ndim)
+ size = self[dim]
+ assert not (index < -size or index >= size)
+ if index < 0:
+ index += size
+ out: List[int] = []
+ for i in range(ndim):
+ if i != dim:
+ out.append(self[i])
+ return out
+
+
+def matmul(tensor1: List[int], tensor2: List[int]):
+ dim_tensor1 = len(tensor1)
+ dim_tensor2 = len(tensor2)
+ if dim_tensor1 == 1 and dim_tensor2 == 1:
+ return dot(tensor1, tensor2)
+ elif dim_tensor1 == 2 and dim_tensor2 == 1:
+ return mv(tensor1, tensor2)
+ elif dim_tensor1 == 1 and dim_tensor2 == 2:
+ return squeeze(mm(unsqueeze(tensor1, 0), tensor2), 0)
+ elif dim_tensor1 == 2 and dim_tensor2 == 2:
+ return mm(tensor1, tensor2)
+ elif dim_tensor1 >= 1 and dim_tensor2 >= 1:
+ # We are multiplying b1 x n x m1 by x2 x m2 x p (where b1 can be a list);
+ # we track m1 vs m2 separately even though they must match for nicer error messages
+ n = tensor1[-2] if dim_tensor1 > 1 else 1
+ m1 = tensor1[-1]
+ batch_tensor1: List[int] = []
+ # TODO: handling of slice
+ for i in range(dim_tensor1 - 2):
+ batch_tensor1.append(tensor1[i])
+ m2 = tensor2[-1] if dim_tensor2 > 1 else 1
+ p = tensor2[-1]
+ batch_tensor2: List[int] = []
+ # TODO: handling of slice
+ for i in range(dim_tensor2 - 2):
+ batch_tensor2.append(tensor2[i])
+
+ # expand the batch portion (i.e. cut off matrix dimensions and expand rest)
+ expand_batch_portion = broadcast(batch_tensor1, batch_tensor2)
+
+ # todo: copy ?
+ output_shape = expand_batch_portion
+ if dim_tensor1 > 1:
+ output_shape.append(n)
+
+ if dim_tensor2 > 1:
+ output_shape.append(p)
+
+ return output_shape
+ else:
+ assert False, "both arguments to matmul need to be at least 1D"
+
+
+def t(self: List[int]):
+ assert len(self) <= 2
+ self_len = len(self)
+ if self_len == 0:
+ out: List[int] = []
+ return out
+ elif self_len == 1:
+ return [self[0]]
+ else:
+ return [self[1], self[0]]
+
+
+def transpose(self: List[int], dim0: int, dim1: int):
+ ndims = len(self)
+ dim0 = maybe_wrap_dim(dim0, ndims)
+ dim1 = maybe_wrap_dim(dim1, ndims)
+ if dim0 == dim1:
+ return _copy(self)
+ out: List[int] = []
+ for i in range(ndims):
+ if i == dim0:
+ out.append(self[dim1])
+ elif i == dim1:
+ out.append(self[dim0])
+ else:
+ out.append(self[i])
+ return out
+
+
+def linear(input: List[int], weight: List[int], bias: Optional[List[int]]):
+ out = matmul(input, t(weight))
+ if bias is not None:
+ assert broadcast(bias, out) == out
+ return out
+
+
+def addmm(self: List[int], mat1: List[int], mat2: List[int], beta: Any, alpha: Any):
+ return broadcast(self, mm(mat1, mat2))
+
+
+def check_non_negative(array: List[int]) -> bool:
+ # TODO: look into rewriting with early return and getting loop unrolling to fire
+ non_negative = False
+ for val in array:
+ if val < 0:
+ non_negative = True
+ return non_negative
+
+
+def check_shape_forward(
+ input: List[int],
+ weight_sizes: List[int],
+ bias: Optional[List[int]],
+ stride: List[int],
+ padding: List[int],
+ dilation: List[int],
+ groups: int,
+):
+ k = len(input)
+ weight_dim = len(weight_sizes)
+
+ # TODO: assertions could be expanded with the error messages
+ assert not check_non_negative(padding)
+ assert not check_non_negative(stride)
+
+ assert weight_dim == k
+ assert weight_sizes[0] >= groups
+ assert (weight_sizes[0] % groups) == 0
+ # only handling not transposed
+ assert input[1] == weight_sizes[1] * groups
+ assert bias is None or (len(bias) == 1 and bias[0] == weight_sizes[0])
+
+ for i in range(2, k):
+ assert (input[i] + 2 * padding[i - 2]) >= (
+ dilation[i - 2] * (weight_sizes[i] - 1) + 1
+ )
+
+ # this is not handling transposed convolution yet
+
+
+def conv_output_size(
+ input_size: List[int],
+ weight_size: List[int],
+ bias: Optional[List[int]],
+ stride: List[int],
+ padding: List[int],
+ dilation: List[int],
+ groups: int,
+):
+ check_shape_forward(
+ input_size, weight_size, bias, stride, padding, dilation, groups
+ )
+
+ has_dilation = len(dilation) > 0
+ dim = len(input_size)
+ output_size: List[int] = []
+ input_batch_size_dim = 0
+ weight_output_channels_dim = 0
+ output_size.append(input_size[input_batch_size_dim])
+ output_size.append(weight_size[weight_output_channels_dim])
+
+ for d in range(2, dim):
+ dilation_ = dilation[d - 2] if has_dilation else 1
+ kernel = dilation_ * (weight_size[d] - 1) + 1
+ output_size.append(
+ (input_size[d] + (2 * padding[d - 2]) - kernel) // stride[d - 2] + 1
+ )
+ return output_size
+
+
+def conv1d(
+ input: List[int],
+ weight: List[int],
+ bias: Optional[List[int]],
+ stride: List[int],
+ padding: List[int],
+ dilation: List[int],
+ groups: int,
+):
+ assert len(weight) == 3
+ assert len(input) == 3
+ return conv_output_size(input, weight, bias, stride, padding, dilation, groups)
+
+
+def conv2d(
+ input: List[int],
+ weight: List[int],
+ bias: Optional[List[int]],
+ stride: List[int],
+ padding: List[int],
+ dilation: List[int],
+ groups: int,
+):
+ assert len(weight) == 4
+ assert len(input) == 4
+ return conv_output_size(input, weight, bias, stride, padding, dilation, groups)
+
+
+def conv_backwards(
+ grad_output: List[int],
+ input: List[int],
+ weight: List[int],
+ biases: Optional[List[int]],
+):
+ # Bias gradient is always generated regardess of if biases is supplied
+ return _copy(input), _copy(weight), [grad_output[1]]
+
+
+def conv_transpose2d_input(
+ input: List[int],
+ weight: List[int],
+ bias: Optional[List[int]] = None,
+ stride: Optional[List[int]] = None,
+ padding: Optional[List[int]] = None,
+ output_padding: Optional[List[int]] = None,
+ groups: int = 1,
+ dilation: Optional[List[int]] = None,
+) -> List[int]:
+ if stride is None:
+ stride = [1, 1]
+ if padding is None:
+ padding = [0, 0]
+ if output_padding is None:
+ output_padding = [0, 0]
+ if dilation is None:
+ dilation = [1, 1]
+ has_dilation = len(dilation) > 0
+ dim = len(input)
+ output_size: List[int] = []
+ input_batch_size_dim = 0
+ weight_output_channels_dim = 1
+ output_size.append(input[input_batch_size_dim])
+ output_size.append(weight[weight_output_channels_dim] * groups)
+
+ for d in range(2, dim):
+ dilation_ = dilation[d - 2] if has_dilation else 1
+ kernel = dilation_ * (weight[d] - 1)
+ output_size.append(
+ (input[d] - 1) * stride[d - 2]
+ - 2 * padding[d - 2]
+ + kernel
+ + output_padding[d - 2]
+ + 1
+ )
+ return output_size
+
+
+def conv_forwards(
+ input: List[int],
+ weight: List[int],
+ bias: Optional[List[int]],
+ stride: List[int],
+ padding: List[int],
+ dilation: List[int],
+ transposed: bool,
+ output_padding: List[int],
+ groups: int,
+) -> List[int]:
+ has_dilation = len(dilation) > 0
+ has_output_padding = len(output_padding) > 0
+ dim = len(input)
+ output_size: List[int] = []
+ input_batch_size_dim = 0
+ weight_output_channels_dim = 1 if transposed else 0
+ output_size.append(input[input_batch_size_dim])
+ if transposed:
+ output_size.append(weight[weight_output_channels_dim] * groups)
+ else:
+ output_size.append(weight[weight_output_channels_dim])
+
+ for d in range(2, dim):
+ dilation_ = dilation[d - 2] if has_dilation else 1
+ output_padding_ = output_padding[d - 2] if has_output_padding else 0
+ if transposed:
+ kernel = dilation_ * (weight[d] - 1)
+ output_size.append(
+ (input[d] - 1) * stride[d - 2]
+ - 2 * padding[d - 2]
+ + kernel
+ + output_padding_
+ + 1
+ )
+ else:
+ kernel = dilation_ * (weight[d] - 1) + 1
+ output_size.append(
+ (input[d] + (2 * padding[d - 2]) - kernel) // stride[d - 2] + 1
+ )
+ return output_size
+
+
+def _conv_forwards(
+ input: List[int],
+ weight: List[int],
+ bias: Optional[List[int]],
+ stride: List[int],
+ padding: List[int],
+ dilation: List[int],
+ transposed: bool,
+ output_padding: List[int],
+ groups: int,
+ benchmark: bool,
+ deterministic: bool,
+ cudnn_enabled: bool,
+ allow_tf32: bool,
+) -> List[int]:
+ return conv_forwards(
+ input,
+ weight,
+ bias,
+ stride,
+ padding,
+ dilation,
+ transposed,
+ output_padding,
+ groups,
+ )
+
+
+def batch_norm(
+ input: List[int],
+ weight: Optional[List[int]],
+ bias: Optional[List[int]],
+ running_mean: Optional[List[int]],
+ running_var: Optional[List[int]],
+ training: bool,
+ momentum: float,
+ eps: float,
+ cudnn_enabled: bool,
+):
+ out: List[int] = []
+ for elem in input:
+ out.append(elem)
+ return out
+
+
+def conv3d(
+ input: List[int],
+ weight: List[int],
+ bias: Optional[List[int]],
+ stride: List[int],
+ padding: List[int],
+ dilation: List[int],
+ groups: int,
+):
+ assert len(weight) == 5
+ assert len(input) == 5
+ return conv_output_size(input, weight, bias, stride, padding, dilation, groups)
+
+
+def maybe_wrap_dim(dim: int, dim_post_expr: int, wrap_scalar: bool = True):
+ if dim_post_expr <= 0:
+ assert wrap_scalar
+ dim_post_expr = 1
+ min = -dim_post_expr
+ max = dim_post_expr - 1
+ assert not (dim < min or dim > max)
+ if dim < 0:
+ dim += dim_post_expr
+ return dim
+
+
+def zero_dim_tensor(input: Any):
+ out: List[int] = []
+ return out
+
+
+def multiply_integers(li: List[int]):
+ out = 1
+ for elem in li:
+ out = out * elem
+ return out
+
+
+def arange_end(end: number, inp0: Any, inp1: Any, inp2: Any, inp3: Any):
+ assert end >= 0
+ return [int(math.ceil(end))]
+
+
+def arange_start(
+ start: number, end: number, inp0: Any, inp1: Any, inp2: Any, inp3: Any
+):
+ assert end >= 0
+ assert end >= start
+ return [int(math.ceil(end - start))]
+
+
+def arange_start_step(
+ start: number, end: number, step: number, inp0: Any, inp1: Any, inp2: Any, inp3: Any
+):
+ assert step != 0
+ if step < 0:
+ assert start >= end
+ else:
+ assert end >= start
+ return [int(math.ceil((end - start) / step))]
+
+
+def permute(input: List[int], dims: List[int]):
+ assert len(input) == len(dims)
+ ndim = len(dims)
+ seen_dims: List[int] = []
+ newSizes: List[int] = []
+ for i in range(ndim):
+ dim = maybe_wrap_dim(dims[i], ndim)
+ seen_dims.append(dim)
+ newSizes.append(input[dim])
+ for i in range(1, ndim):
+ for j in range(i):
+ assert seen_dims[i] != seen_dims[j]
+ return newSizes
+
+
+def movedim(self: List[int], source: List[int], destination: List[int]) -> List[int]:
+ self_dim = len(self)
+ if self_dim <= 1:
+ return self
+ normalized_src: List[int] = []
+ normalized_dst: List[int] = []
+ for i in range(len(source)):
+ normalized_src.append(maybe_wrap_dim(source[i], self_dim))
+ normalized_dst.append(maybe_wrap_dim(destination[i], self_dim))
+ order = [-1 for i in range(self_dim)]
+ src_dims = [i for i in range(self_dim)]
+ dst_dims = [i for i in range(self_dim)]
+
+ for i in range(len(source)):
+ order[normalized_dst[i]] = normalized_src[i]
+ src_dims[normalized_src[i]] = -1
+ dst_dims[normalized_dst[i]] = -1
+
+ source_dims: List[int] = []
+ destination_dims: List[int] = []
+ for ele in src_dims:
+ if ele != -1:
+ source_dims.append(ele)
+ for ele in dst_dims:
+ if ele != -1:
+ destination_dims.append(ele)
+
+ rest_dim = self_dim - len(source)
+ for i in range(rest_dim):
+ order[destination_dims[i]] = source_dims[i]
+ return permute(self, order)
+
+
+def flatten(input: List[int], start_dim: int, end_dim: int):
+ start_dim = maybe_wrap_dim(start_dim, len(input))
+ end_dim = maybe_wrap_dim(end_dim, len(input))
+ assert start_dim <= end_dim
+ if len(input) == 0:
+ return [1]
+ if start_dim == end_dim:
+ # TODO: return self
+ out: List[int] = []
+ for elem in input:
+ out.append(elem)
+ return out
+ slice_numel = 1
+ for i in range(start_dim, end_dim + 1):
+ slice_numel *= input[i]
+ # TODO: use slicing when slice optimization has landed
+ # slice_numel = multiply_integers(input[start_dim:end_dim - start_dim + 1])
+ shape: List[int] = []
+ for i in range(start_dim):
+ shape.append(input[i])
+ shape.append(slice_numel)
+ for i in range(end_dim + 1, len(input)):
+ shape.append(input[i])
+ return shape
+
+
+def nonzero_lower_bound(input: List[int]):
+ return [0, len(input)]
+
+
+def nonzero_upper_bound(input: List[int]):
+ return [numel(input), len(input)]
+
+
+def _reduce_along_dim(self: List[int], dim: int, keepdim: bool):
+ dim = maybe_wrap_dim(dim, len(self))
+ out: List[int] = []
+ for i, self_dim in enumerate(self):
+ if i == dim:
+ if keepdim:
+ out.append(1)
+ else:
+ out.append(self_dim)
+ return out
+
+
+def argmax(
+ self: List[int], dim: Optional[int] = None, keepdim: bool = False
+) -> List[int]:
+ if dim is None:
+ return []
+ return _reduce_along_dim(self, dim, keepdim)
+
+
+def bmm(self: List[int], mat2: List[int]) -> List[int]:
+ assert len(self) == 3, "bmm only supports 3D tensors"
+ assert len(mat2) == 3, "bmm only supports 3D tensors"
+ assert self[0] == mat2[0], "mismatching batch dimension"
+ assert self[2] == mat2[1], "mismatching contracting dimension"
+ return [self[0], self[1], mat2[2]]
+
+
+def _shape_as_tensor(self: List[int]) -> List[int]:
+ return [len(self)]
+
+
+def topk(self: List[int], k: int, dim: int = -1) -> Tuple[List[int], List[int]]:
+ if len(self) == 0:
+ result: List[int] = []
+ else:
+ assert (
+ k <= self[dim]
+ ), f"k ({k}) is too big for dimension {dim} of size {self[dim]}"
+ result = _copy(self)
+ result[dim] = k
+ return result, result
+
+
+def nll_loss_forward(
+ self: List[int], target: List[int], weight: Optional[List[int]], reduction: int
+) -> Tuple[List[int], List[int]]:
+ # This is taken shamelessly from the meta function in LossNLL.cpp
+ self_dim = len(self)
+ target_dim = len(target)
+ assert 0 < self_dim <= 2
+ assert target_dim <= 1
+ no_batch_dim = self_dim == 1 and target_dim == 0
+ assert no_batch_dim or (self[0] == target[0])
+ n_classes = self[-1]
+ scalar_shape: List[int] = []
+ assert weight is None or (len(weight) == 1 and weight[0] == n_classes)
+ if reduction == 0 and self_dim == 2:
+ reduction_shape = [self[0]]
+ else:
+ reduction_shape = scalar_shape
+ return reduction_shape, scalar_shape
+
+
+def native_layer_norm(
+ input: List[int], normalized_shape: List[int]
+) -> Tuple[List[int], List[int], List[int]]:
+ reduction_shape: List[int] = []
+ num_unreduced_dimensions = len(input) - len(normalized_shape)
+ assert num_unreduced_dimensions >= 0
+ for i in range(num_unreduced_dimensions):
+ reduction_shape.append(input[i])
+ for i in range(num_unreduced_dimensions, len(input)):
+ reduction_shape.append(1)
+ return _copy(input), reduction_shape, reduction_shape
+
+
+def native_batch_norm(
+ input: List[int],
+ weight: Optional[List[int]],
+ bias: Optional[List[int]],
+ running_mean: Optional[List[int]],
+ running_var: Optional[List[int]],
+ training: bool,
+) -> Tuple[List[int], List[int], List[int]]:
+ if training:
+ _size = [input[1]]
+ else:
+ _size = [0]
+ return _copy(input), _size, _size
+
+
+def cross_entropy_loss(
+ self: List[int],
+ target: List[int],
+ weight: Optional[List[int]] = None,
+ reduction: int = 1,
+ ignore_index: int = -100,
+ label_smoothing: float = 0.0,
+) -> List[int]:
+ result_shape = nll_loss_forward(self, target, weight, reduction)[0]
+ return result_shape
+
+
+"""
+Currently deferring the enabling of this, as part of the propoasal to suspend
+adding ops.
+There are currently cases in the test case where this is being called
+in the SSA opinfo tests with with unexpected values (eg list of two ints, see the first
+opinfo test). The behavoir of index is significantly dependent on the inputs.
+
+This could be an error with how we are matching up shape functions, or that this
+function needs to just implement everything.
+
+def index_Tensor(self: List[int], indices: List[Optional[List[int]]]) -> List[int]:
+ assert len(indices) <= len(self), "More indices than dimensions to index"
+ broadcasted_shape: List[int] = []
+ for index_tensor_shape in indices:
+ if index_tensor_shape is not None:
+ broadcasted_shape = broadcast(broadcasted_shape, index_tensor_shape)
+ return broadcasted_shape
+"""
+
+ScriptFn = torch._C.ScriptFunction
+shape_compute_graph_mapping: Dict[str, ScriptFn] = {}
+bounded_compute_graph_mapping: Dict[str, Tuple[ScriptFn, ScriptFn]] = {}
+script_func_map: Dict[Callable, ScriptFn] = {}
+
+
+def process_func(func: Callable):
+ if func not in script_func_map:
+ scripted_func = torch.jit.script(func)
+
+ torch._C._jit_pass_inline(scripted_func.graph)
+
+ for _ in range(2):
+ torch._C._jit_pass_peephole(scripted_func.graph)
+ torch._C._jit_pass_constant_propagation(scripted_func.graph)
+
+ script_func_map[func] = scripted_func
+ return script_func_map[func]
+
+
+def add_shape_compute_mapping(operator_schema: str, func: Callable):
+ global shape_compute_graph_mapping
+
+ shape_compute_graph_mapping[operator_schema] = process_func(func)
+
+
+def add_bounded_compute_mapping(
+ operator_schema: str, lower_bound_func: Callable, upper_bound_func: Callable
+):
+ # Adds a shape compute function for both upper and lower bounds
+ fns = (process_func(lower_bound_func), process_func(upper_bound_func))
+ bounded_compute_graph_mapping[operator_schema] = fns
+
+
+add_shape_compute_mapping(
+ "aten::contiguous(Tensor(a) self, *, MemoryFormat memory_format=contiguous_format) -> Tensor(a)",
+ unary,
+)
+add_shape_compute_mapping(
+ "aten::rsub.Tensor(Tensor self, Scalar other, Scalar alpha=1) -> Tensor", unary
+)
+add_shape_compute_mapping(
+ "aten::dropout(Tensor input, float p, bool train) -> Tensor", unary
+)
+add_shape_compute_mapping(
+ "aten::adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor",
+ adaptive_avg_pool2d,
+)
+add_shape_compute_mapping(
+ "prim::NumToTensor.Scalar(Scalar a) -> Tensor", zero_dim_tensor
+)
+add_shape_compute_mapping("prim::NumToTensor.bool(bool a) -> Tensor", zero_dim_tensor)
+add_shape_compute_mapping(
+ "aten::zeros(int[] size, *, int? dtype=None, int? layout=None, Device? device=None, bool? pin_memory=None) -> (Tensor)",
+ unary,
+)
+add_shape_compute_mapping(
+ "aten::to.dtype(Tensor(a) self, int dtype, bool non_blocking=False, bool copy=False, int? memory_format=None) -> (Tensor(a))",
+ unary,
+)
+add_shape_compute_mapping(
+ "aten::arange(Scalar end, *, int? dtype=None, int? layout=None, Device? device=None, bool? pin_memory=None) -> (Tensor)",
+ arange_end,
+)
+add_shape_compute_mapping(
+ "aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor",
+ arange_start,
+)
+add_shape_compute_mapping(
+ "aten::arange.start_step(Scalar start, Scalar end, Scalar step, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor",
+ arange_start_step,
+)
+add_shape_compute_mapping("aten::squeeze(Tensor(a) self) -> Tensor(a)", squeeze_nodim)
+add_shape_compute_mapping(
+ "aten::squeeze.dim(Tensor(a) self, int dim) -> Tensor(a)", squeeze
+)
+add_shape_compute_mapping(
+ "aten::squeeze.dims(Tensor(a) self, int[] dim) -> Tensor(a)", squeeze_dims
+)
+add_shape_compute_mapping(
+ "aten::unsqueeze(Tensor(a) self, int dim) -> Tensor(a)", unsqueeze
+)
+add_shape_compute_mapping(
+ "aten::slice.Tensor(Tensor(a) self, int dim=0, int? start=None, int? end=None, int step=1) -> Tensor(a)",
+ slice,
+)
+add_shape_compute_mapping(
+ "aten::select.int(Tensor(a) self, int dim, int index) -> Tensor(a)", select
+)
+add_shape_compute_mapping(
+ "aten::index_select(Tensor self, int dim, Tensor index) -> Tensor", index_select
+)
+add_shape_compute_mapping(
+ "aten::layer_norm(Tensor input, int[] normalized_shape, Tensor? weight=None, Tensor? bias=None, "
+ "float eps=1e-05, bool cudnn_enable=True) -> Tensor",
+ unary,
+)
+add_shape_compute_mapping(
+ "aten::softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor", unary
+)
+add_shape_compute_mapping(
+ "aten::_no_grad_embedding_renorm_(Tensor weight, Tensor input, float max_norm, float norm_type) -> Tensor",
+ unary,
+)
+add_shape_compute_mapping(
+ "aten::embedding_renorm_(Tensor(a!) self, Tensor indices, float max_norm, float norm_type) -> Tensor(a!)",
+ unary,
+)
+add_shape_compute_mapping(
+ "aten::embedding(Tensor weight, Tensor indices, int padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor",
+ embedding,
+)
+add_shape_compute_mapping("aten::mm(Tensor self, Tensor mat2) -> Tensor", mm)
+add_shape_compute_mapping("aten::dot(Tensor self, Tensor tensor) -> Tensor", dot)
+add_shape_compute_mapping("aten::mv(Tensor self, Tensor vec) -> Tensor", mv)
+add_shape_compute_mapping("aten::matmul(Tensor self, Tensor other) -> Tensor", matmul)
+add_shape_compute_mapping(
+ "aten::linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor", linear
+)
+add_shape_compute_mapping(
+ "aten::max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor",
+ max_pool2d,
+)
+add_shape_compute_mapping(
+ "aten::max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)",
+ max_pool2d_with_indices,
+)
+add_shape_compute_mapping("aten::t(Tensor(a) self) -> Tensor(a)", t)
+add_shape_compute_mapping(
+ "aten::transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a)", transpose
+)
+add_shape_compute_mapping(
+ "aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] dilation=1, int groups=1) -> Tensor",
+ conv1d,
+)
+add_shape_compute_mapping(
+ "aten::conv2d(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1, int groups=1) -> Tensor",
+ conv2d,
+)
+add_shape_compute_mapping(
+ "aten::batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> Tensor",
+ batch_norm,
+)
+add_shape_compute_mapping(
+ "aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1, int groups=1) -> Tensor",
+ conv3d,
+)
+add_shape_compute_mapping(
+ "aten::convolution_backward(Tensor grad_output, Tensor input, Tensor weight, int[]? bias_sizes, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)",
+ conv_backwards,
+)
+add_shape_compute_mapping(
+ "aten::convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor",
+ conv_forwards,
+)
+add_shape_compute_mapping(
+ "aten::_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> Tensor",
+ _conv_forwards,
+)
+add_shape_compute_mapping(
+ "aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int groups=1, int[2] dilation=1) -> Tensor",
+ conv_transpose2d_input,
+)
+add_shape_compute_mapping(
+ "aten::flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a)",
+ flatten,
+)
+add_shape_compute_mapping("aten::cat(Tensor[] tensors, int dim=0) -> Tensor", cat)
+add_shape_compute_mapping("aten::stack(Tensor[] tensors, int dim=0) -> Tensor", stack)
+add_shape_compute_mapping(
+ "aten::permute(Tensor(a) self, int[] dims) -> Tensor(a)", permute
+)
+add_shape_compute_mapping(
+ "aten::movedim.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)",
+ movedim,
+)
+add_shape_compute_mapping("aten::view(Tensor(a) self, int[] size) -> Tensor(a)", view)
+add_shape_compute_mapping(
+ "aten::expand_as(Tensor(a) self, Tensor other) -> Tensor(a)", expand
+)
+add_shape_compute_mapping(
+ "aten::expand(Tensor(a) self, int[] size, *, bool implicit=False) -> Tensor(a)",
+ expand_one_unused,
+)
+add_shape_compute_mapping(
+ "aten::mean.dim(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor",
+ sum_mean_dim,
+)
+add_shape_compute_mapping(
+ "aten::sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor",
+ sum_mean_dim,
+)
+add_shape_compute_mapping(
+ "aten::max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)",
+ max_dim,
+)
+add_shape_compute_mapping(
+ "aten::mean(Tensor self, *, ScalarType? dtype=None) -> Tensor", zero_dim_tensor
+)
+add_shape_compute_mapping(
+ "aten::sum(Tensor self, *, ScalarType? dtype=None) -> Tensor", zero_dim_tensor
+)
+add_shape_compute_mapping(
+ "aten::addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor",
+ addmm,
+)
+add_shape_compute_mapping(
+ "aten::upsample_nearest2d.vec(Tensor input, int[]? output_size, float[]? scale_factors) -> (Tensor)",
+ upsample_nearest2d,
+)
+add_shape_compute_mapping(
+ "aten::quantize_per_tensor(Tensor self, float scale, int zero_point, ScalarType dtype) -> Tensor",
+ unary,
+)
+add_shape_compute_mapping(
+ "aten::quantize_per_tensor.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype) -> Tensor",
+ unary,
+)
+add_shape_compute_mapping("aten::dequantize(Tensor self) -> Tensor", unary)
+add_shape_compute_mapping(
+ "quantized::add(Tensor qa, Tensor qb, float scale, int zero_point) -> Tensor qc",
+ broadcast,
+)
+add_shape_compute_mapping(
+ "aten::argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor", argmax
+)
+add_shape_compute_mapping("aten::bmm(Tensor self, Tensor mat2) -> Tensor", bmm)
+add_shape_compute_mapping(
+ "aten::_shape_as_tensor(Tensor self) -> Tensor", _shape_as_tensor
+)
+add_shape_compute_mapping(
+ "aten::topk(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices)",
+ topk,
+)
+add_shape_compute_mapping(
+ "aten::nll_loss_forward(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index) -> (Tensor output, Tensor total_weight)",
+ nll_loss_forward,
+)
+add_shape_compute_mapping(
+ "aten::native_layer_norm(Tensor input, int[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor)",
+ native_layer_norm,
+)
+add_shape_compute_mapping(
+ "aten::native_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)",
+ native_batch_norm,
+)
+add_shape_compute_mapping(
+ "aten::_native_batch_norm_legit(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)",
+ native_batch_norm,
+)
+add_shape_compute_mapping(
+ "aten::_native_batch_norm_legit.no_stats(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)",
+ native_batch_norm,
+)
+add_shape_compute_mapping(
+ "aten::cross_entropy_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, float label_smoothing=0.0) -> Tensor",
+ cross_entropy_loss,
+)
+# add_shape_compute_mapping("aten::index.Tensor(Tensor self, Tensor?[] indices) -> Tensor", index_Tensor)
+
+# TODO: migrate over all of symbolic_shape_registry_util.cpp
+# These are duplicated here so that the functions will be serialiazed
+add_shape_compute_mapping(
+ "aten::lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor",
+ broadcast_three,
+)
+add_shape_compute_mapping(
+ "aten::where.ScalarSelf(Tensor condition, Scalar self, Tensor other) -> Tensor",
+ broadcast_one_three,
+)
+add_shape_compute_mapping(
+ "aten::add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)",
+ broadcast_inplace,
+)
+
+# quantized_conv_prepack TODO
+
+# Shape Compute Fn with upper and lower bounds
+add_bounded_compute_mapping(
+ "aten::nonzero(Tensor self) -> (Tensor)", nonzero_lower_bound, nonzero_upper_bound
+)
diff --git a/venv/lib/python3.10/site-packages/torch/jit/_state.py b/venv/lib/python3.10/site-packages/torch/jit/_state.py
new file mode 100644
index 0000000000000000000000000000000000000000..1d75415ef80e9b517594be6c6ce58220647865b6
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/torch/jit/_state.py
@@ -0,0 +1,126 @@
+"""JIT-related state.
+
+This module stores various pieces of Python-global state relating to the JIT.
+
+This is not intended to be imported directly; please the exposed
+functionalities in `torch.jit`.
+"""
+import os
+import weakref
+from typing import Any, Dict, Type
+
+import torch
+
+
+class EnabledProxy:
+ """Stores whether the JIT is enabled or not.
+
+ This is just a wrapper for a bool, so that we get reference semantics
+ """
+
+ def __init__(self):
+ self.enabled = self.parse_env(
+ "PYTORCH_JIT", True, "> Using PyTorch JIT", "> PyTorch JIT DISABLED"
+ )
+
+ def parse_env(self, name, default, true_message, false_message):
+ value = os.environ.get(name)
+ if value is None:
+ return default
+ if value.lower() in {"1", "true", "yes"}:
+ return True
+ elif value.lower() in {"0", "false", "no"}:
+ return False
+ if value == "1v":
+ print(true_message)
+ return True
+ elif value == "0v":
+ print(false_message)
+ return False
+ raise ValueError(f"Unknown setting of {name}. Try using 0 or 1.")
+
+ def __bool__(self):
+ return self.enabled
+
+
+_enabled = EnabledProxy()
+
+
+def disable():
+ _enabled.enabled = False
+
+
+def enable():
+ _enabled.enabled = True
+
+
+# The Python CompilationUnit. All functions and modules defined in Python will
+# live in here. It's defined in Python because doing in cpp creates static
+# destruction order issues.
+_python_cu = torch._C.CompilationUnit()
+
+
+# python class => ScriptClass mapping
+_script_classes: Dict[Type[Any], Type[Any]] = {}
+_name_to_pyclass: Dict[str, Type[Any]] = {}
+
+
+def _add_script_class(python_class, script_class):
+ _script_classes[python_class] = script_class
+ _name_to_pyclass[script_class.qualified_name()] = python_class
+
+
+def _get_script_class(python_class):
+ override = getattr(python_class, "_jit_override_qualname", None)
+ if override is not None:
+ python_class = _get_python_class(override)
+ return _script_classes.get(python_class, None)
+
+
+def _get_python_class(qualified_name):
+ return _name_to_pyclass.get(qualified_name, None)
+
+
+def _clear_class_state():
+ _script_classes.clear()
+ _name_to_pyclass.clear()
+
+
+# Caching: we currently cache compilation of free functions and overloaded functions.
+# To cache free functions we hold a weak ref to the function object and
+# map to the compiled fn's qualified name.
+# To cache overloaded functions we hold a weak ref to the function obj and
+# map to all of its overloaded compiled fns.
+# In the future we could consider caching more types of objects so that
+# aliasing is preserved across separate compilations of the same object.
+
+_jit_caching_layer: weakref.WeakKeyDictionary = weakref.WeakKeyDictionary()
+_jit_function_overload_caching: weakref.WeakKeyDictionary = weakref.WeakKeyDictionary()
+
+
+def _try_get_jit_cached_overloads(key):
+ qual_names = _jit_function_overload_caching.get(key, None)
+ if qual_names:
+ return [_python_cu.find_function(qual_name) for qual_name in qual_names]
+ else:
+ return None
+
+
+def _set_jit_overload_cache(key, compiled_fns):
+ _jit_function_overload_caching[key] = [fn.qualified_name for fn in compiled_fns]
+
+
+def _try_get_jit_cached_function(key):
+ if getattr(key, "__disable_jit_function_caching__", False) is True:
+ return None
+ qual_name = _jit_caching_layer.get(key, None)
+ if qual_name:
+ return _python_cu.find_function(qual_name)
+ else:
+ return None
+
+
+def _set_jit_function_cache(key, value):
+ # only free functions currently supported
+ assert isinstance(value, torch.jit.ScriptFunction)
+ _jit_caching_layer[key] = value.qualified_name
diff --git a/venv/lib/python3.10/site-packages/torch/jit/_trace.py b/venv/lib/python3.10/site-packages/torch/jit/_trace.py
new file mode 100644
index 0000000000000000000000000000000000000000..23fe78201f10ce14bba0cec5e333294a8fff9038
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/torch/jit/_trace.py
@@ -0,0 +1,1313 @@
+"""Tracing.
+
+This module contains functionality to support the JIT's tracing frontend, notably:
+ * torch.jit.trace
+ * torch.jit.trace_module
+
+This is not intended to be imported directly; please use the exposed
+functionalities in `torch.jit`.
+"""
+import contextlib
+
+import copy
+import functools
+import inspect
+import os
+import re
+import warnings
+from typing import Any, Callable, Dict, List, Optional, Set, TypeVar
+
+from typing_extensions import ParamSpec
+
+import torch
+from torch._jit_internal import (
+ _qualified_name,
+ get_callable_argument_names,
+ is_scripting,
+)
+from torch.autograd import function
+from torch.jit._script import _CachedForward, script, ScriptModule
+
+from torch.jit._state import _enabled, _python_cu
+from torch.nn import Module
+
+from torch.testing._comparison import default_tolerances
+
+_flatten = torch._C._jit_flatten
+_unflatten = torch._C._jit_unflatten
+
+R = TypeVar("R", covariant=True) # return type (always covariant)
+P = ParamSpec("P")
+
+
+def _create_interpreter_name_lookup_fn(frames_up=1):
+ def _get_interpreter_name_for_var(var):
+ frame = inspect.currentframe()
+ if not frame:
+ raise RuntimeError("failed to inspect frame")
+
+ i = 0
+ while i < frames_up + 1:
+ frame = frame.f_back
+ if not frame:
+ raise RuntimeError("failed to get frame")
+ i += 1
+
+ f_locals = frame.f_locals
+ f_globals = frame.f_globals
+
+ for k, v in f_locals.items():
+ if isinstance(v, torch.Tensor) and var is v:
+ return k if k != "self" else ""
+ return ""
+
+ return _get_interpreter_name_for_var
+
+
+def _unique_state_dict(module, keep_vars=False):
+ # since Parameter.detach() always creates a new torch.Tensor instance,
+ # id(v) doesn't work with it. So we always get the Parameter or Buffer
+ # as values, and deduplicate the params using Parameters and Buffers
+ state_dict = module.state_dict(keep_vars=True)
+ filtered_dict = type(state_dict)()
+ seen_ids: Set[int] = set()
+ for k, v in state_dict.items():
+ if id(v) in seen_ids:
+ continue
+ seen_ids.add(id(v))
+ if keep_vars:
+ filtered_dict[k] = v
+ else:
+ filtered_dict[k] = v.detach()
+ return filtered_dict
+
+
+class ONNXTracedModule(torch.nn.Module):
+ def __init__(
+ self,
+ inner,
+ strict=True,
+ force_outplace=False,
+ return_inputs=False,
+ return_inputs_states=False,
+ ):
+ super().__init__()
+ # inner may be a Module, or it may be an arbitrary callable
+ # If it's a Module, we get its parameters automatically, which lets
+ # us avoid a special casing functions versus modules.
+ self.inner = inner
+ self.strict = strict
+ self._force_outplace = force_outplace
+ self._return_inputs = return_inputs
+ self._return_inputs_states = return_inputs_states
+
+ def forward(self, *args: torch.Tensor):
+ in_vars, in_desc = _flatten(args)
+ # NOTE: use full state, because we need it for BatchNorm export
+ # This differs from the compiler path, which doesn't support it at the moment.
+ module_state = list(_unique_state_dict(self, keep_vars=True).values())
+
+ ret_inputs = []
+ inputs_states = []
+ outs = []
+
+ def wrapper(*args):
+ in_args: List[torch.Tensor] = []
+ for i in range(len(in_vars)):
+ if not isinstance(args[i], torch.Tensor):
+ raise RuntimeError("Expected Tensor argument")
+ in_args.append(args[i])
+
+ trace_inputs = _unflatten(in_args, in_desc)
+
+ if self._return_inputs:
+ ret_inputs.append(
+ tuple(x.clone(memory_format=torch.preserve_format) for x in args)
+ )
+ if self._return_inputs_states:
+ inputs_states.append(_unflatten(in_args, in_desc))
+ outs.append(self.inner(*trace_inputs))
+ if self._return_inputs_states:
+ inputs_states[0] = (inputs_states[0], trace_inputs)
+ out_vars, _ = _flatten(outs)
+ if len(out_vars) == 1:
+ return out_vars[0]
+ else:
+ return tuple(out_vars)
+
+ graph, out = torch._C._create_graph_by_tracing(
+ wrapper,
+ in_vars + module_state,
+ _create_interpreter_name_lookup_fn(),
+ self.strict,
+ self._force_outplace,
+ )
+
+ if self._return_inputs:
+ return graph, outs[0], ret_inputs[0]
+ if self._return_inputs_states:
+ return graph, outs[0], inputs_states[0]
+ else:
+ return graph, outs[0]
+
+
+def _clone_inputs(args):
+ def clone_input(a):
+ if a is None:
+ return None
+ elif isinstance(a, torch.Tensor):
+ # TODO: figure out one liner to .clone() and set requires_grad
+ v = (
+ a.detach()
+ .clone(memory_format=None if a.is_mkldnn else torch.preserve_format)
+ .requires_grad_(a.requires_grad)
+ )
+ if a.grad is not None:
+ v.grad = clone_input(v.grad)
+ return v
+ else:
+ return a.clone(memory_format=torch.preserve_format)
+
+ return function._nested_map(
+ lambda x: isinstance(x, torch.Tensor), clone_input, condition_msg="tensors"
+ )(args)
+
+
+# This is purely for developer debugging. We are not going to advertise it.
+_JIT_TIME = os.environ.get("PYTORCH_JIT_TIME", False) # CUDA-only timing
+_JIT_DISABLE = os.environ.get("PYTORCH_JIT_DISABLE", False)
+_JIT_STATS = os.environ.get("PYTORCH_JIT_STATS", False)
+
+
+@contextlib.contextmanager
+def _time(trace_name, name, time=True):
+ if (not _JIT_TIME and not time) or not torch.cuda.is_available():
+ yield
+ return
+ stream = torch.cuda.current_stream()
+ start = torch.cuda.Event(enable_timing=True)
+ end = torch.cuda.Event(enable_timing=True)
+ stream.record_event(start)
+ try:
+ yield
+ finally:
+ stream.record_event(end)
+ end.synchronize()
+ print(f"{trace_name} {name} time: {start.elapsed_time(end)} ms")
+
+
+def verify(model, args, loss_fn=torch.sum, devices=None):
+ """
+ Verify that a JIT compiled model has the same behavior as its uncompiled version along with its backwards pass.
+
+ If your model returns multiple outputs,
+ you must also specify a `loss_fn` to produce a loss for which
+ the backwards will be computed.
+
+ This function has side-effects (e.g., it executes your model / saves and loads
+ parameters), so don't expect the model to come out exactly the same as what
+ you passed in.
+
+ Args:
+ model (compiled torch.nn.Module or function): the module/function to be
+ verified. The module/function definition MUST have been decorated with
+ `@torch.jit.compile`.
+ args (tuple or Tensor): the positional arguments to pass to the
+ compiled function/module to be verified. A non-tuple is assumed to
+ be a single positional argument to be passed to the model.
+ loss_fn (function, optional): the loss function to be applied to
+ the output of the model, before backwards is invoked. By default,
+ we assume that a model returns a single result, and we :func:`torch.sum`
+ before calling backwards; if this is inappropriate, you can pass your
+ own loss function. Note that if a model returns a tuple of results,
+ these are passed as separate positional arguments to `loss_fn`.
+ devices (iterable of device IDs, optional): the GPU devices which the
+ compiled module will be run on. This determines the RNG state we
+ must save when running both compiled and uncompiled versions of the model.
+ """
+ # TODO: In principle, we track device information in our trace, so it
+ # should be possible to check if our execution actually obeyed the 'devices'
+ # the user provided.
+
+ # TODO: Consider adding a utility function to torch.jit to test
+ # for this case
+ if not isinstance(model, torch._C.CompiledFunction): # type: ignore[attr-defined]
+ raise TypeError(
+ "Cannot verify an uncompiled module. Add @torch.jit.compile to compile it"
+ )
+ is_module = isinstance(model, Module)
+
+ if not isinstance(args, tuple):
+ args = (args,)
+
+ saved_args = _clone_inputs(args)
+ if is_module:
+ saved_state = copy.deepcopy(model.state_dict())
+
+ def run_fwd_bwd(args, force_trace=False, assert_compiled=False):
+ params = list(model.parameters()) if is_module else []
+ in_vars, _ = _flatten((args, params))
+ # We use a special API to reset the trace and compile it from scratch.
+ compiled_fn = model
+ if force_trace:
+ compiled_fn.clear_cache()
+ if assert_compiled:
+ hits = compiled_fn.hits
+ out = model(*args)
+ if assert_compiled and compiled_fn.hits == hits: # type: ignore[possibly-undefined]
+ raise RuntimeError("failed to use the compiled function")
+ if not isinstance(out, tuple):
+ out = (out,)
+ if loss_fn == torch.sum and len(out) != 1:
+ raise ValueError(
+ f"Model returns {len(out)} outputs, but default loss function "
+ "(torch.sum) can only handle a single output"
+ )
+ out_vars, _ = _flatten(out)
+ saved_outs = [
+ v.detach().clone(memory_format=torch.preserve_format) for v in out_vars
+ ]
+ loss = loss_fn(*out)
+ grads = torch.autograd.grad([loss], in_vars)
+ # TODO: I'm not sure if the clone here is necessary but it is safer
+ saved_grads = [
+ v.detach().clone(memory_format=torch.preserve_format) for v in grads
+ ]
+ return (saved_outs, saved_grads)
+
+ with torch.random.fork_rng(devices, _caller="torch.jit.verify"):
+ uncompiled_outs, uncompiled_grads = run_fwd_bwd(args, force_trace=True)
+ assert model.has_trace_for(*args)
+
+ if is_module:
+ model.load_state_dict(saved_state) # type: ignore[possibly-undefined]
+ compiled_outs, compiled_grads = run_fwd_bwd(args, assert_compiled=True)
+
+ _verify_equal(uncompiled_outs, compiled_outs)
+ _verify_equal(uncompiled_grads, compiled_grads)
+
+
+def _verify_equal(xs, ys):
+ for x, y in zip(xs, ys):
+ if x.sub(y).abs().max() > 1e-6:
+ raise RuntimeError("JIT and real computation mismatch")
+
+
+def indent(s):
+ return "\n".join(["\t" + line for line in s.splitlines()])
+
+
+class TracingCheckError(Exception):
+ def __init__(self, graph_diff_error, tensor_compare_error, extra_msg=None):
+ self.message = "Tracing failed sanity checks!\n"
+ if extra_msg is not None:
+ self.message += extra_msg + "\n"
+ if graph_diff_error is not None:
+ self.message += "ERROR: Graphs differed across invocations!\n"
+ self.message += indent(graph_diff_error) + "\n"
+ if tensor_compare_error is not None:
+ self.message += (
+ "ERROR: Tensor-valued Constant nodes differed in value "
+ "across invocations. This often indicates that the tracer has"
+ " encountered untraceable code.\n"
+ )
+ self.message += indent(tensor_compare_error) + "\n"
+ super().__init__(self.message)
+
+
+# Check the traced module against a set of user-provided validation inputs
+@torch.no_grad()
+def _check_trace(
+ check_inputs,
+ func,
+ traced_func,
+ check_tolerance,
+ strict,
+ force_outplace,
+ is_trace_module,
+ _module_class,
+ example_inputs_is_kwarg=False,
+):
+ # Note: tracing is independent of optimizations, which consume the trace
+ for inputs in check_inputs:
+ if isinstance(inputs, torch.Tensor):
+ inputs = (inputs,)
+
+ if is_trace_module:
+ copied_dict = {}
+ for name, data in inputs.items():
+ copied_dict[name] = _clone_inputs(data)
+ check_mod = torch.jit.trace_module(
+ getattr(func, "__self__", func),
+ copied_dict,
+ check_trace=False,
+ strict=strict,
+ _force_outplace=force_outplace,
+ _module_class=_module_class,
+ _compilation_unit=torch._C.CompilationUnit(),
+ example_inputs_is_kwarg=example_inputs_is_kwarg,
+ _store_inputs=False,
+ )
+ check_mod_func = check_mod._c._get_method(traced_func.name)
+ inputs = inputs[traced_func.name]
+ if (
+ isinstance(inputs, (torch.Tensor))
+ or isinstance(inputs, dict)
+ and not example_inputs_is_kwarg
+ ):
+ inputs = (inputs,)
+ else:
+ if example_inputs_is_kwarg:
+ check_mod = torch.jit.trace(
+ func,
+ check_trace=False,
+ strict=strict,
+ _force_outplace=force_outplace,
+ _module_class=_module_class,
+ example_kwarg_inputs=_clone_inputs(inputs),
+ _store_inputs=False,
+ )
+ else:
+ check_mod = torch.jit.trace(
+ func,
+ _clone_inputs(inputs),
+ check_trace=False,
+ strict=strict,
+ _force_outplace=force_outplace,
+ _module_class=_module_class,
+ _store_inputs=False,
+ )
+ check_mod_func = check_mod
+
+ def graph_diagnostic_info():
+ mod_canonicalized = torch._C._jit_pass_canonicalize(traced_func.graph)
+ torch._C._jit_pass_inline(mod_canonicalized)
+ torch._C._jit_pass_erase_shape_information(mod_canonicalized)
+ mod_str = str(mod_canonicalized)
+ mod_str = re.sub(r"___torch_mangle_[0-9]+\.", "", mod_str)
+ check_canonicalized = torch._C._jit_pass_canonicalize(check_mod_func.graph)
+ torch._C._jit_pass_inline(check_canonicalized)
+ torch._C._jit_pass_erase_shape_information(check_canonicalized)
+ check_str = str(check_canonicalized)
+ check_str = re.sub(r"___torch_mangle_[0-9]+\.", "", check_str)
+
+ graph_diff_errors = None
+ if mod_str != check_str:
+ import difflib
+
+ graph_diff = difflib.ndiff(
+ mod_str.splitlines(True), check_str.splitlines(True)
+ )
+ graph_diff_errors = "Graph diff:\n" + indent("".join(graph_diff)) + "\n"
+
+ for n_mod, n_check in zip(
+ mod_canonicalized.nodes(), check_canonicalized.nodes()
+ ):
+ if str(n_mod) != str(n_check):
+ graph_diff_errors += "First diverging operator:\n"
+ node_diff = difflib.ndiff(
+ str(n_mod).splitlines(True), str(n_check).splitlines(True)
+ )
+ source_printout = (
+ "Node diff:\n" + indent("".join(node_diff)) + "\n"
+ )
+ mod_stack = n_mod.sourceRange()
+ if mod_stack:
+ source_printout += (
+ "Trace source location:\n" + indent(mod_stack) + "\n"
+ )
+ check_stack = n_check.sourceRange()
+ if check_stack:
+ source_printout += (
+ "Check source location:\n" + indent(check_stack) + "\n"
+ )
+ graph_diff_errors += source_printout
+
+ break # For now, only print out the first pair of nodes that diverges
+
+ tensor_compare_errors = None
+ # Check Tensor-valued constant nodes
+ for n_mod, n_check in zip(
+ mod_canonicalized.nodes(), check_canonicalized.nodes()
+ ):
+ if n_mod.kind() != n_check.kind():
+ break # Graphs have already diverged
+
+ if n_mod.kind() == "prim::Constant" and not (
+ n_mod.mustBeNone() or n_check.mustBeNone()
+ ):
+ if not n_mod.hasAttribute("value"):
+ continue
+ if n_mod.kindOf("value") != "t" or n_check.kindOf("value") != "t":
+ continue
+
+ mod_tensor_val = n_mod.t("value")
+ check_tensor_val = n_check.t("value")
+
+ try:
+ torch.testing.assert_close(
+ mod_tensor_val, check_tensor_val, equal_nan=True
+ )
+ except (RuntimeError, AssertionError) as e:
+ if tensor_compare_errors is None:
+ tensor_compare_errors = ""
+ tensor_compare_errors += "Node:\n" + indent(str(n_mod)) + "\n"
+ compare_stack = n_mod.sourceRange()
+ if compare_stack:
+ tensor_compare_errors += (
+ "Source Location:\n" + indent(compare_stack) + "\n"
+ )
+ tensor_compare_errors += "Comparison exception: " + indent(
+ str(e)
+ )
+
+ break # For now, only print the first diverging pair
+
+ return graph_diff_errors, tensor_compare_errors
+
+ def wrap_retval(x):
+ return x if isinstance(x, tuple) else (x,)
+
+ def run_mod_and_filter_tensor_outputs(mod, inputs, running_what):
+ try:
+ if isinstance(inputs, dict) and example_inputs_is_kwarg:
+ outs = wrap_retval(mod(**inputs))
+ else:
+ outs = wrap_retval(mod(*_clone_inputs(inputs)))
+ outs = [out for out in outs if isinstance(out, torch.Tensor)]
+ return outs
+ except Exception as e:
+ graph_diff_errors, tensor_compare_errors = graph_diagnostic_info()
+ msg = f"encountered an exception while running the {running_what} with test inputs.\nException:\n{indent(str(e))}"
+ raise TracingCheckError(
+ graph_diff_errors,
+ tensor_compare_errors,
+ extra_msg=msg,
+ ) from e
+
+ has_warned = [False]
+
+ def maybe_warn_nondeterministic():
+ if has_warned[0]:
+ return
+ has_warned[0] = True
+ nondeterm_ops = [
+ op for op in traced_func.graph.nodes() if op.isNondeterministic()
+ ]
+ if len(nondeterm_ops) > 0:
+ nondeterministic_ops_warning = "Trace had nondeterministic nodes. "
+ nondeterministic_ops_warning += (
+ "Did you forget call .eval() on your model? Nodes:\n"
+ )
+ nondeterministic_ops_warning += "\n".join(
+ [indent(str(op)) for op in nondeterm_ops][:20]
+ )
+ nondeterministic_ops_warning += (
+ "\nThis may cause errors in trace checking. To disable trace checking,"
+ " pass check_trace=False to torch.jit.trace()"
+ )
+ warnings.warn(
+ nondeterministic_ops_warning, category=TracerWarning, stacklevel=5
+ )
+
+ def compare_outputs(original, reference, match_what):
+ all_ok = True
+ for i, (orig, ref) in enumerate(zip(original, reference)):
+ try:
+ if orig.is_quantized:
+ orig = orig.dequantize()
+ if ref.is_quantized:
+ ref = ref.dequantize()
+ if orig.is_mkldnn:
+ orig = orig.to_dense()
+ if ref.is_mkldnn:
+ ref = ref.to_dense()
+ if ref.is_complex() or orig.is_complex():
+ torch.testing.assert_close(
+ orig.to(torch.cdouble),
+ ref.to(torch.cdouble),
+ rtol=check_tolerance,
+ atol=default_tolerances(orig, ref)[1],
+ equal_nan=True,
+ )
+ else:
+ if orig.is_mps or ref.is_mps:
+ torch.testing.assert_close(
+ orig.float(),
+ ref.float(),
+ rtol=check_tolerance,
+ atol=default_tolerances(orig, ref)[1],
+ equal_nan=True,
+ )
+ elif getattr(orig, "is_nested", None) or getattr(
+ ref, "is_nested", None
+ ):
+ assert getattr(orig, "is_nested", None) == getattr(
+ ref, "is_nested", None
+ )
+ for t_orig, t_ref in zip(orig.unbind(), ref.unbind()):
+ torch.testing.assert_close(
+ t_orig.double(),
+ t_ref.double(),
+ rtol=check_tolerance,
+ atol=default_tolerances(t_orig, t_ref)[1],
+ equal_nan=True,
+ )
+ else:
+ torch.testing.assert_close(
+ orig.double(),
+ ref.double(),
+ rtol=check_tolerance,
+ atol=default_tolerances(orig, ref)[1],
+ equal_nan=True,
+ )
+ except AssertionError as e:
+ maybe_warn_nondeterministic()
+ warnings.warn(
+ "Output nr "
+ + str(i + 1)
+ + ". of the traced function does not match "
+ "the corresponding output of the "
+ + match_what
+ + ". Detailed error:\n"
+ + str(e),
+ category=TracerWarning,
+ stacklevel=4,
+ )
+ all_ok = False
+
+ return all_ok
+
+ traced_outs = run_mod_and_filter_tensor_outputs(traced_func, inputs, "trace")
+ fn_outs = run_mod_and_filter_tensor_outputs(func, inputs, "Python function")
+ if compare_outputs(traced_outs, fn_outs, "Python function"):
+ check_outs = run_mod_and_filter_tensor_outputs(
+ check_mod_func, inputs, "repeated trace"
+ )
+ compare_outputs(traced_outs, check_outs, "repeated trace")
+
+ diag_info = graph_diagnostic_info()
+ if any(info is not None for info in diag_info):
+ raise TracingCheckError(*diag_info)
+
+
+class TracerWarning(Warning):
+ @staticmethod
+ def ignore_lib_warnings():
+ # We ignore warnings from all submodules excluding the JIT, because we need them e.g. for _check_trace
+ warnings.filterwarnings(
+ "ignore", category=TracerWarning, module="torch.(?!jit)"
+ )
+ warnings.filterwarnings("ignore", "torch::jit::fuser::cuda")
+
+
+# We ignore the tracer warnings coming form inside the library, because all our shape
+# checks in nn will trigger them.
+TracerWarning.ignore_lib_warnings()
+torch._C._tracer_warn_use_python()
+
+
+def make_tuple(example_inputs):
+ if isinstance(example_inputs, (torch.Tensor, dict)):
+ return (example_inputs,)
+ # done primarily so that weird iterables fail here and not pybind11 code
+ if not isinstance(example_inputs, tuple):
+ return tuple(example_inputs)
+ return example_inputs
+
+
+def make_module(mod, _module_class, _compilation_unit):
+ if isinstance(mod, ScriptModule):
+ return mod
+ elif torch._jit_internal.module_has_exports(mod):
+ infer_methods_stubs_fn = torch.jit._recursive.make_stubs_from_exported_methods
+ return torch.jit._recursive.create_script_module(
+ mod, infer_methods_stubs_fn, share_types=False, is_tracing=True
+ )
+ else:
+ if _module_class is None:
+ _module_class = TopLevelTracedModule
+ return _module_class(mod, _compilation_unit=_compilation_unit)
+
+
+def wrap_check_inputs(check_inputs):
+ if check_inputs is None:
+ return None
+
+ return [{"forward": c} for c in check_inputs]
+
+
+def trace(
+ func,
+ example_inputs=None,
+ optimize=None,
+ check_trace=True,
+ check_inputs=None,
+ check_tolerance=1e-5,
+ strict=True,
+ _force_outplace=False,
+ _module_class=None,
+ _compilation_unit=_python_cu,
+ example_kwarg_inputs=None,
+ _store_inputs=True,
+):
+ r"""
+ Trace a function and return an executable or :class:`ScriptFunction` that will be optimized using just-in-time compilation.
+
+ Tracing is ideal for code that operates only on
+ ``Tensor``\\s and lists, dictionaries, and
+ tuples of ``Tensor``\\s.
+
+ Using `torch.jit.trace` and `torch.jit.trace_module`, you can turn an
+ existing module or Python function into a TorchScript
+ :class:`ScriptFunction` or :class:`ScriptModule`. You must provide example
+ inputs, and we run the function, recording the operations performed on all
+ the tensors.
+
+ * The resulting recording of a standalone function produces `ScriptFunction`.
+ * The resulting recording of `nn.Module.forward` or `nn.Module` produces
+ `ScriptModule`.
+
+ This module also contains any parameters that the original
+ module had as well.
+
+ Warning:
+ Tracing only correctly records functions and modules which are not data
+ dependent (e.g., do not have conditionals on data in tensors) and do not have
+ any untracked external dependencies (e.g., perform input/output or
+ access global variables). Tracing only records operations done when the given
+ function is run on the given tensors. Therefore, the returned
+ `ScriptModule` will always run the same traced graph on any input. This
+ has some important implications when your module is expected to run
+ different sets of operations, depending on the input and/or the module
+ state. For example,
+
+ * Tracing will not record any control-flow like if-statements or loops.
+ When this control-flow is constant across your module, this is fine
+ and it often inlines the control-flow decisions. But sometimes the
+ control-flow is actually part of the model itself. For instance, a
+ recurrent network is a loop over the (possibly dynamic) length of an
+ input sequence.
+ * In the returned :class:`ScriptModule`, operations that have different
+ behaviors in ``training`` and ``eval`` modes will always behave as if
+ it is in the mode it was in during tracing, no matter which mode the
+ `ScriptModule` is in.
+
+ In cases like these, tracing would not be appropriate and
+ :func:`scripting ` is a better choice. If you trace
+ such models, you may silently get incorrect results on subsequent
+ invocations of the model. The tracer will try to emit warnings when
+ doing something that may cause an incorrect trace to be produced.
+
+ Args:
+ func (callable or torch.nn.Module): A Python function or `torch.nn.Module`
+ that will be run with `example_inputs`. `func` arguments and return
+ values must be tensors or (possibly nested) tuples that contain
+ tensors. When a module is passed `torch.jit.trace`, only the
+ ``forward`` method is run and traced (see :func:`torch.jit.trace
+ ` for details).
+
+ Keyword arguments:
+ example_inputs (tuple or torch.Tensor or None, optional): A tuple of example
+ inputs that will be passed to the function while tracing.
+ Default: ``None``. Either this argument or ``example_kwarg_inputs``
+ should be specified. The resulting trace can be run with inputs of
+ different types and shapes assuming the traced operations support those
+ types and shapes. `example_inputs` may also be a single Tensor in which
+ case it is automatically wrapped in a tuple. When the value is None,
+ ``example_kwarg_inputs`` should be specified.
+
+ check_trace (``bool``, optional): Check if the same inputs run through
+ traced code produce the same outputs. Default: ``True``. You might want
+ to disable this if, for example, your network contains non-
+ deterministic ops or if you are sure that the network is correct despite
+ a checker failure.
+
+ check_inputs (list of tuples, optional): A list of tuples of input
+ arguments that should be used to check the trace against what is
+ expected. Each tuple is equivalent to a set of input arguments that
+ would be specified in ``example_inputs``. For best results, pass in
+ a set of checking inputs representative of the space of shapes and
+ types of inputs you expect the network to see. If not specified,
+ the original ``example_inputs`` are used for checking
+ check_tolerance (float, optional): Floating-point comparison tolerance
+ to use in the checker procedure. This can be used to relax the
+ checker strictness in the event that results diverge numerically
+ for a known reason, such as operator fusion.
+ strict (``bool``, optional): run the tracer in a strict mode or not
+ (default: ``True``). Only turn this off when you want the tracer to
+ record your mutable container types (currently ``list``/``dict``)
+ and you are sure that the container you are using in your
+ problem is a ``constant`` structure and does not get used as
+ control flow (if, for) conditions.
+ example_kwarg_inputs (dict, optional): This parameter is a pack of keyword
+ arguments of example inputs that will be passed to the function while
+ tracing. Default: ``None``. Either this argument or ``example_inputs``
+ should be specified. The dict will be unpacking by the arguments name
+ of the traced function. If the keys of the dict don't not match with
+ the traced function's arguments name, a runtime exception will be raised.
+
+ Returns:
+ If `func` is `nn.Module` or ``forward`` of `nn.Module`, `trace` returns
+ a :class:`ScriptModule` object with a single ``forward`` method
+ containing the traced code. The returned `ScriptModule` will
+ have the same set of sub-modules and parameters as the original
+ ``nn.Module``. If ``func`` is a standalone function, ``trace``
+ returns `ScriptFunction`.
+
+ Example (tracing a function):
+
+ .. testcode::
+
+ import torch
+
+ def foo(x, y):
+ return 2 * x + y
+
+ # Run `foo` with the provided inputs and record the tensor operations
+ traced_foo = torch.jit.trace(foo, (torch.rand(3), torch.rand(3)))
+
+ # `traced_foo` can now be run with the TorchScript interpreter or saved
+ # and loaded in a Python-free environment
+
+ Example (tracing an existing module)::
+
+ import torch
+ import torch.nn as nn
+
+ class Net(nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.conv = nn.Conv2d(1, 1, 3)
+
+ def forward(self, x):
+ return self.conv(x)
+
+ n = Net()
+ example_weight = torch.rand(1, 1, 3, 3)
+ example_forward_input = torch.rand(1, 1, 3, 3)
+
+ # Trace a specific method and construct `ScriptModule` with
+ # a single `forward` method
+ module = torch.jit.trace(n.forward, example_forward_input)
+
+ # Trace a module (implicitly traces `forward`) and construct a
+ # `ScriptModule` with a single `forward` method
+ module = torch.jit.trace(n, example_forward_input)
+
+ """
+ if not _enabled:
+ return func
+ if optimize is not None:
+ warnings.warn(
+ "`optimize` is deprecated and has no effect. Use `with torch.jit.optimized_execution() instead"
+ )
+
+ if isinstance(func, torch.jit.ScriptModule):
+ # it is hard to trace it because the forward method on ScriptModule is already defined, so it
+ # would result in an error.
+ warnings.warn(
+ "The input to trace is already a ScriptModule, tracing it is a no-op. Returning the object as is."
+ )
+ return func
+
+ if isinstance(func, torch.nn.Module):
+ if example_inputs is None:
+ if isinstance(example_kwarg_inputs, dict):
+ example_inputs = example_kwarg_inputs
+ else:
+ raise RuntimeError("example_kwarg_inputs should be a dict")
+ return trace_module(
+ func,
+ {"forward": example_inputs},
+ None,
+ check_trace,
+ wrap_check_inputs(check_inputs),
+ check_tolerance,
+ strict,
+ _force_outplace,
+ _module_class,
+ example_inputs_is_kwarg=isinstance(example_kwarg_inputs, dict),
+ _store_inputs=_store_inputs,
+ )
+ if (
+ hasattr(func, "__self__")
+ and isinstance(func.__self__, torch.nn.Module)
+ and func.__name__ == "forward"
+ ):
+ if example_inputs is None:
+ if isinstance(example_kwarg_inputs, dict):
+ example_inputs = example_kwarg_inputs
+ else:
+ raise RuntimeError("example_kwarg_inputs should be a dict")
+ return trace_module(
+ func.__self__,
+ {"forward": example_inputs},
+ None,
+ check_trace,
+ wrap_check_inputs(check_inputs),
+ check_tolerance,
+ strict,
+ _force_outplace,
+ _module_class,
+ example_inputs_is_kwarg=isinstance(example_kwarg_inputs, dict),
+ _store_inputs=_store_inputs,
+ )
+
+ # Special case for common case of passing a single Tensor
+ if (
+ isinstance(example_inputs, (torch.Tensor, dict))
+ and example_kwarg_inputs is None
+ ):
+ example_inputs = (example_inputs,)
+ # done primarily so that weird iterables fail here and not pybind11 code
+ elif example_kwarg_inputs is None and not isinstance(example_inputs, tuple):
+ example_inputs = tuple(example_inputs)
+
+ var_lookup_fn = _create_interpreter_name_lookup_fn(0)
+
+ if hasattr(func, "__self__") and isinstance(func.__self__, torch.nn.Module):
+ raise AttributeError(
+ "trace doesn't support compiling individual module's functions.\n"
+ "Please use trace_module"
+ )
+
+ name = _qualified_name(func)
+ if isinstance(example_kwarg_inputs, dict):
+ example_inputs = example_kwarg_inputs
+ traced = torch._C._create_function_from_trace_with_dict(
+ name,
+ func,
+ example_kwarg_inputs,
+ var_lookup_fn,
+ strict,
+ _force_outplace,
+ get_callable_argument_names(func),
+ )
+ else:
+ traced = torch._C._create_function_from_trace(
+ name,
+ func,
+ example_inputs,
+ var_lookup_fn,
+ strict,
+ _force_outplace,
+ get_callable_argument_names(func),
+ )
+
+ # Check the trace against new traces created from user-specified inputs
+ if check_trace:
+ if check_inputs is not None:
+ _check_trace(
+ check_inputs,
+ func,
+ traced,
+ check_tolerance,
+ strict,
+ _force_outplace,
+ False,
+ _module_class,
+ example_inputs_is_kwarg=isinstance(example_kwarg_inputs, dict),
+ )
+ else:
+ _check_trace(
+ [example_inputs],
+ func,
+ traced,
+ check_tolerance,
+ strict,
+ _force_outplace,
+ False,
+ _module_class,
+ example_inputs_is_kwarg=isinstance(example_kwarg_inputs, dict),
+ )
+
+ # Allow torch.compile() to inline
+ traced._torchdynamo_inline = func # type: ignore[attr-defined]
+ return traced
+
+
+_trace_module_map: Optional[Dict[Any, Any]] = None
+
+
+def trace_module(
+ mod,
+ inputs,
+ optimize=None,
+ check_trace=True,
+ check_inputs=None,
+ check_tolerance=1e-5,
+ strict=True,
+ _force_outplace=False,
+ _module_class=None,
+ _compilation_unit=_python_cu,
+ example_inputs_is_kwarg=False,
+ _store_inputs=True,
+):
+ """
+ Trace a module and return an executable :class:`ScriptModule` that will be optimized using just-in-time compilation.
+
+ When a module is passed to :func:`torch.jit.trace `, only
+ the ``forward`` method is run and traced. With ``trace_module``, you can specify a dictionary of
+ method names to example inputs to trace (see the ``inputs``) argument below.
+
+ See :func:`torch.jit.trace ` for more information on tracing.
+
+ Args:
+ mod (torch.nn.Module): A ``torch.nn.Module`` containing methods whose names are
+ specified in ``inputs``. The given methods will be compiled
+ as a part of a single `ScriptModule`.
+ inputs (dict): A dict containing sample inputs indexed by method names in ``mod``.
+ The inputs will be passed to methods whose names correspond to inputs'
+ keys while tracing.
+ ``{ 'forward' : example_forward_input, 'method2': example_method2_input}``
+ Keyword arguments:
+ check_trace (``bool``, optional): Check if the same inputs run through
+ traced code produce the same outputs. Default: ``True``. You might want
+ to disable this if, for example, your network contains non-
+ deterministic ops or if you are sure that the network is correct despite
+ a checker failure.
+
+ check_inputs (list of dicts, optional): A list of dicts of input arguments that should be used
+ to check the trace against what is expected. Each tuple
+ is equivalent to a set of input arguments that would
+ be specified in ``inputs``. For best results, pass in a
+ set of checking inputs representative of the space of
+ shapes and types of inputs you expect the network to see.
+ If not specified, the original ``inputs`` are used for checking
+ check_tolerance (float, optional): Floating-point comparison tolerance to use in the checker procedure.
+ This can be used to relax the checker strictness in the event that
+ results diverge numerically for a known reason, such as operator fusion.
+ example_inputs_is_kwarg (``bool``, optional): This parameter indicate whether the example inputs is a pack
+ pack of keyword arguments. Default: ``False``.
+
+ Returns:
+ A :class:`ScriptModule` object with a single ``forward`` method containing the traced code.
+ When ``func`` is a ``torch.nn.Module``, the returned :class:`ScriptModule` will have the same set of
+ sub-modules and parameters as ``func``.
+
+ Example (tracing a module with multiple methods)::
+
+ import torch
+ import torch.nn as nn
+
+ class Net(nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.conv = nn.Conv2d(1, 1, 3)
+
+ def forward(self, x):
+ return self.conv(x)
+
+ def weighted_kernel_sum(self, weight):
+ return weight * self.conv.weight
+
+
+ n = Net()
+ example_weight = torch.rand(1, 1, 3, 3)
+ example_forward_input = torch.rand(1, 1, 3, 3)
+
+ # Trace a specific method and construct `ScriptModule` with
+ # a single `forward` method
+ module = torch.jit.trace(n.forward, example_forward_input)
+
+ # Trace a module (implicitly traces `forward`) and construct a
+ # `ScriptModule` with a single `forward` method
+ module = torch.jit.trace(n, example_forward_input)
+
+ # Trace specific methods on a module (specified in `inputs`), constructs
+ # a `ScriptModule` with `forward` and `weighted_kernel_sum` methods
+ inputs = {'forward' : example_forward_input, 'weighted_kernel_sum' : example_weight}
+ module = torch.jit.trace_module(n, inputs)
+
+ """
+ if not _enabled:
+ return mod
+ if optimize is not None:
+ warnings.warn(
+ "`optimize` is deprecated and has no effect. Use `with torch.jit.optimized_execution() instead"
+ )
+
+ var_lookup_fn = _create_interpreter_name_lookup_fn(0)
+
+ if not isinstance(mod, torch.nn.Module):
+ raise AttributeError("expected torch.nn.Module as the first argument")
+
+ if not isinstance(inputs, dict):
+ raise AttributeError("expected a dictionary of (method_name, input) pairs")
+
+ old_module_map = torch.jit._trace._trace_module_map
+ try:
+ trace_module_map: Dict[Any, Any] = {}
+
+ def register_submods(mod, prefix):
+ for name, child in mod.named_children():
+ submod_qualname = prefix + "." + name
+ trace_module_map[child] = submod_qualname
+ register_submods(child, submod_qualname)
+
+ trace_module_map["__module"] = mod
+ torch.jit._trace._trace_module_map = trace_module_map
+ register_submods(mod, "__module")
+
+ module = make_module(mod, _module_class, _compilation_unit)
+
+ for method_name, example_inputs in inputs.items():
+ if method_name == "forward":
+ # "forward" is a special case because we need to trace
+ # `Module.__call__`, which sets up some extra tracing, but uses
+ # argument names of the real `Module.forward` method.
+ func = mod
+ forward_method = getattr(mod, method_name)
+ argument_names = get_callable_argument_names(forward_method)
+ else:
+ func = getattr(mod, method_name)
+ argument_names = get_callable_argument_names(func)
+
+ if isinstance(example_inputs, dict) and example_inputs_is_kwarg:
+ # Raise exception when the user provided key names are not aligned with forward() method's arguments' name/
+ for key in example_inputs:
+ if key not in argument_names:
+ valid_arguments = "[" + ",".join(argument_names) + "]"
+ raise NameError(
+ f"""'{key}' is not in forward() method's arguments,
+ valid arguments name are {valid_arguments}"""
+ )
+ module._c._create_method_from_trace_with_dict(
+ method_name,
+ func,
+ example_inputs,
+ var_lookup_fn,
+ strict,
+ _force_outplace,
+ argument_names,
+ _store_inputs,
+ )
+ else:
+ example_inputs = make_tuple(example_inputs)
+ module._c._create_method_from_trace(
+ method_name,
+ func,
+ example_inputs,
+ var_lookup_fn,
+ strict,
+ _force_outplace,
+ argument_names,
+ _store_inputs,
+ )
+
+ check_trace_method = module._c._get_method(method_name)
+
+ # Check the trace against new traces created from user-specified inputs
+ if check_trace:
+ if check_inputs is not None:
+ _check_trace(
+ check_inputs,
+ func,
+ check_trace_method,
+ check_tolerance,
+ strict,
+ _force_outplace,
+ True,
+ _module_class,
+ example_inputs_is_kwarg=example_inputs_is_kwarg,
+ )
+ else:
+ _check_trace(
+ [inputs],
+ func,
+ check_trace_method,
+ check_tolerance,
+ strict,
+ _force_outplace,
+ True,
+ _module_class,
+ example_inputs_is_kwarg=example_inputs_is_kwarg,
+ )
+ finally:
+ torch.jit._trace._trace_module_map = old_module_map
+
+ return module
+
+
+def is_tracing():
+ """Return a boolean value.
+
+ Returns ``True`` in tracing (if a function is called during the
+ tracing of code with ``torch.jit.trace``) and ``False`` otherwise.
+ """
+ if is_scripting():
+ return False
+ return torch._C._is_tracing()
+
+
+class TracedModule(ScriptModule):
+ _disable_script_meta = True
+
+ def __init__(self, orig, id_set=None, _compilation_unit=None):
+ # XXX: orig can be a nn.Module or a function!
+ super().__init__()
+ assert isinstance(orig, torch.nn.Module)
+
+ # Copy a subset of `orig` to a temporary nn.Module.
+ # This is a way to customize what will actually get compiled by create_script_module
+ id_set = set()
+
+ # This allows us to preserve the original module's qualified name by defining a new
+ # type with the attribute _jit_override_qualname. In torch._jit_internal._qualified_name
+ # we have a special case that will look up this attribute to override whatever qualname
+ # we would get from the python type system
+ class QualnameWrapper(torch.nn.Module):
+ pass
+
+ QualnameWrapper._jit_override_qualname = torch._jit_internal._qualified_name( # type: ignore[attr-defined]
+ type(orig)
+ )
+
+ tmp_module = QualnameWrapper()
+
+ def check_unique(param):
+ if param in id_set:
+ raise ValueError(
+ "TracedModules don't support parameter sharing between modules"
+ )
+ id_set.add(param)
+
+ tmp_module.training = orig.training
+
+ for name, param in orig._parameters.items():
+ if param is not None:
+ tmp_module._parameters[name] = param
+ check_unique(param)
+ for name, buf in orig._buffers.items():
+ if buf is not None:
+ tmp_module._buffers[name] = buf
+ check_unique(buf)
+ for name, val in orig.__dict__.items():
+ if (
+ torch._C._jit_is_script_object(val)
+ and name not in orig._parameters
+ and name not in orig._buffers
+ ):
+ setattr(tmp_module, name, val)
+
+ if orig._backward_hooks:
+ raise ValueError(
+ "Modules that have backward hooks assigned can't be compiled: "
+ + str(orig)
+ )
+
+ for name, submodule in orig._modules.items():
+ if submodule is None:
+ continue
+ tmp_module._modules[name] = make_module(
+ submodule, TracedModule, _compilation_unit=None
+ )
+
+ script_module = torch.jit._recursive.create_script_module(
+ tmp_module, lambda module: (), share_types=False, is_tracing=True
+ )
+
+ self.__dict__["_name"] = type(orig).__name__
+ self.__dict__["_actual_script_module"] = script_module
+ for name in ("_parameters", "_buffers", "_modules", "training"):
+ delattr(self, name)
+
+ def forward(self, *args, **kwargs):
+ raise RuntimeError("Trace submodules cannot be called.")
+
+ def __getattr__(self, attr):
+ if "_actual_script_module" not in self.__dict__:
+ return super().__getattr__(attr)
+ return getattr(self._actual_script_module, attr)
+
+ def __setattr__(self, attr, value):
+ if "_actual_script_module" not in self.__dict__:
+ return super().__setattr__(attr, value)
+ setattr(self._actual_script_module, attr, value)
+
+ def _get_name(self):
+ return self._name
+
+ def extra_repr(self):
+ return f"original_name={self._name}"
+
+
+class TopLevelTracedModule(TracedModule):
+ forward: Callable[..., Any] = _CachedForward() # type: ignore[assignment]
+
+ def _reconstruct(self, cpp_module):
+ """
+ Re-construct an instance of TopLevelTracedModule using an instance of a C++ module.
+
+ Args:
+ cpp_module: The C++ module that this TopLevelTracedModule will be rebuilt around.
+ """
+ self.__dict__["_actual_script_module"]._reconstruct(cpp_module)
+
+
+def _script_if_tracing(fn: Callable[P, R]) -> Callable[P, R]:
+ @functools.wraps(fn)
+ def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
+ if not is_tracing():
+ # Not tracing, don't do anything
+ return fn(*args, **kwargs)
+
+ compiled_fn: Callable[P, R] = script(wrapper.__original_fn) # type: ignore[attr-defined]
+ return compiled_fn(*args, **kwargs)
+
+ wrapper.__original_fn = fn # type: ignore[attr-defined]
+ wrapper.__script_if_tracing_wrapper = True # type: ignore[attr-defined]
+
+ return wrapper
+
+
+def _get_trace_graph(
+ f,
+ args=(),
+ kwargs=None,
+ strict=True,
+ _force_outplace=False,
+ return_inputs=False,
+ _return_inputs_states=False,
+):
+ """Return a tuple on tracing a function or model.
+
+ .. warning::
+ This function is internal-only and should only be used by the ONNX
+ exporter. If you are trying to get a graph through tracing, please go
+ through the public API instead::
+
+ trace = torch.jit.trace(nn.LSTMCell(), (input, hidden))
+ trace_graph = trace.graph
+
+ Trace a function or model, returning a tuple consisting of the both the
+ *trace* of an execution, as well as the original return value. If return_inputs,
+ also returns the trace inputs as part of the tuple
+
+ Tracing is guaranteed not to change the semantics of the function/module
+ that is traced.
+
+ Args:
+ f (torch.nn.Module or function): the function or module
+ to be traced.
+ args (tuple or Tensor): the positional arguments to pass to the
+ function/module to be traced. A non-tuple is assumed to
+ be a single positional argument to be passed to the model.
+ kwargs (dict): the keyword arguments to pass to the function/module
+ to be traced.
+
+ Example (trace a cell):
+
+ .. testcode::
+
+ trace = torch.jit.trace(nn.LSTMCell(), (input, hidden))
+ """
+ if kwargs is None:
+ kwargs = {}
+ if not isinstance(args, tuple):
+ args = (args,)
+ outs = ONNXTracedModule(
+ f, strict, _force_outplace, return_inputs, _return_inputs_states
+ )(*args, **kwargs)
+ return outs
diff --git a/venv/lib/python3.10/site-packages/torch/jit/annotations.py b/venv/lib/python3.10/site-packages/torch/jit/annotations.py
new file mode 100644
index 0000000000000000000000000000000000000000..804475b35e1d86246e4d1c3d604b78dec4dadcb2
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/torch/jit/annotations.py
@@ -0,0 +1,550 @@
+import ast
+import builtins
+import dis
+import enum
+import inspect
+import re
+import typing
+import warnings
+
+from textwrap import dedent
+from typing import Type
+
+import torch
+
+from torch._C import (
+ _GeneratorType,
+ AnyType,
+ AwaitType,
+ BoolType,
+ ComplexType,
+ DeviceObjType,
+ DictType,
+ EnumType,
+ FloatType,
+ FutureType,
+ InterfaceType,
+ IntType,
+ ListType,
+ NoneType,
+ NumberType,
+ OptionalType,
+ StreamObjType,
+ StringType,
+ TensorType,
+ TupleType,
+ UnionType,
+)
+from torch._sources import get_source_lines_and_file
+from .._jit_internal import ( # type: ignore[attr-defined]
+ _Await,
+ _qualified_name,
+ Any,
+ BroadcastingList1,
+ BroadcastingList2,
+ BroadcastingList3,
+ Dict,
+ Future,
+ is_await,
+ is_dict,
+ is_future,
+ is_ignored_fn,
+ is_list,
+ is_optional,
+ is_tuple,
+ is_union,
+ List,
+ Optional,
+ Tuple,
+ Union,
+)
+from ._state import _get_script_class
+
+if torch.distributed.rpc.is_available():
+ from torch._C import RRefType
+ from .._jit_internal import is_rref, RRef
+
+from torch._ops import OpOverloadPacket
+
+
+class Module:
+ def __init__(self, name, members):
+ self.name = name
+ self.members = members
+
+ def __getattr__(self, name):
+ try:
+ return self.members[name]
+ except KeyError:
+ raise RuntimeError(
+ f"Module {self.name} has no member called {name}"
+ ) from None
+
+
+class EvalEnv:
+ env = {
+ "torch": Module("torch", {"Tensor": torch.Tensor}),
+ "Tensor": torch.Tensor,
+ "typing": Module("typing", {"Tuple": Tuple}),
+ "Tuple": Tuple,
+ "List": List,
+ "Dict": Dict,
+ "Optional": Optional,
+ "Union": Union,
+ "Future": Future,
+ "Await": _Await,
+ }
+
+ def __init__(self, rcb):
+ self.rcb = rcb
+ if torch.distributed.rpc.is_available():
+ self.env["RRef"] = RRef
+
+ def __getitem__(self, name):
+ if name in self.env:
+ return self.env[name]
+ if self.rcb is not None:
+ return self.rcb(name)
+ return getattr(builtins, name, None)
+
+
+def get_signature(fn, rcb, loc, is_method):
+ if isinstance(fn, OpOverloadPacket):
+ signature = try_real_annotations(fn.op, loc)
+ else:
+ signature = try_real_annotations(fn, loc)
+ if signature is not None and is_method:
+ # If this is a method, then the signature will include a type for
+ # `self`, but type comments do not contain a `self`. So strip it
+ # away here so everything is consistent (`inspect.ismethod` does
+ # not work here since `fn` is unbound at this point)
+ param_types, return_type = signature
+ param_types = param_types[1:]
+ signature = (param_types, return_type)
+
+ if signature is None:
+ type_line, source = None, None
+ try:
+ source = dedent("".join(get_source_lines_and_file(fn)[0]))
+ type_line = get_type_line(source)
+ except TypeError:
+ pass
+ # This might happen both because we failed to get the source of fn, or
+ # because it didn't have any annotations.
+ if type_line is not None:
+ signature = parse_type_line(type_line, rcb, loc)
+
+ return signature
+
+
+def is_function_or_method(the_callable):
+ # A stricter version of `inspect.isroutine` that does not pass for built-in
+ # functions
+ return inspect.isfunction(the_callable) or inspect.ismethod(the_callable)
+
+
+def is_vararg(the_callable):
+ if not is_function_or_method(the_callable) and callable(the_callable): # noqa: B004
+ # If `the_callable` is a class, de-sugar the call so we can still get
+ # the signature
+ the_callable = the_callable.__call__
+
+ if is_function_or_method(the_callable):
+ return inspect.getfullargspec(the_callable).varargs is not None
+ else:
+ return False
+
+
+def get_param_names(fn, n_args):
+ if isinstance(fn, OpOverloadPacket):
+ fn = fn.op
+
+ if (
+ not is_function_or_method(fn)
+ and callable(fn)
+ and is_function_or_method(fn.__call__)
+ ): # noqa: B004
+ # De-sugar calls to classes
+ fn = fn.__call__
+
+ if is_function_or_method(fn):
+ if is_ignored_fn(fn):
+ fn = inspect.unwrap(fn)
+ return inspect.getfullargspec(fn).args
+ else:
+ # The `fn` was not a method or function (maybe a class with a __call__
+ # method, so use a default param name list)
+ return [str(i) for i in range(n_args)]
+
+
+def check_fn(fn, loc):
+ # Make sure the function definition is not a class instantiation
+ try:
+ source = dedent("".join(get_source_lines_and_file(fn)[0]))
+ except (OSError, TypeError):
+ return
+ if source is None:
+ return
+
+ py_ast = ast.parse(source)
+ if len(py_ast.body) == 1 and isinstance(py_ast.body[0], ast.ClassDef):
+ raise torch.jit.frontend.FrontendError(
+ loc,
+ f"Cannot instantiate class '{py_ast.body[0].name}' in a script function",
+ )
+ if len(py_ast.body) != 1 or not isinstance(py_ast.body[0], ast.FunctionDef):
+ raise torch.jit.frontend.FrontendError(
+ loc, "Expected a single top-level function"
+ )
+
+
+def _eval_no_call(stmt, glob, loc):
+ """Evaluate statement as long as it does not contain any method/function calls."""
+ bytecode = compile(stmt, "", mode="eval")
+ for insn in dis.get_instructions(bytecode):
+ if "CALL" in insn.opname:
+ raise RuntimeError(
+ f"Type annotation should not contain calls, but '{stmt}' does"
+ )
+ return eval(bytecode, glob, loc) # type: ignore[arg-type] # noqa: P204
+
+
+def parse_type_line(type_line, rcb, loc):
+ """Parse a type annotation specified as a comment.
+
+ Example inputs:
+ # type: (Tensor, torch.Tensor) -> Tuple[Tensor]
+ # type: (Tensor, Tuple[Tensor, Tensor]) -> Tensor
+ """
+ arg_ann_str, ret_ann_str = split_type_line(type_line)
+
+ try:
+ arg_ann = _eval_no_call(arg_ann_str, {}, EvalEnv(rcb))
+ except (NameError, SyntaxError) as e:
+ raise RuntimeError(
+ "Failed to parse the argument list of a type annotation"
+ ) from e
+
+ if not isinstance(arg_ann, tuple):
+ arg_ann = (arg_ann,)
+
+ try:
+ ret_ann = _eval_no_call(ret_ann_str, {}, EvalEnv(rcb))
+ except (NameError, SyntaxError) as e:
+ raise RuntimeError(
+ "Failed to parse the return type of a type annotation"
+ ) from e
+
+ arg_types = [ann_to_type(ann, loc) for ann in arg_ann]
+ return arg_types, ann_to_type(ret_ann, loc)
+
+
+def get_type_line(source):
+ """Try to find the line containing a comment with the type annotation."""
+ type_comment = "# type:"
+
+ lines = source.split("\n")
+ lines = list(enumerate(lines))
+ type_lines = list(filter(lambda line: type_comment in line[1], lines))
+ # `type: ignore` comments may be needed in JIT'ed functions for mypy, due
+ # to the hack in torch/_VF.py.
+
+ # An ignore type comment can be of following format:
+ # 1) type: ignore
+ # 2) type: ignore[rule-code]
+ # This ignore statement must be at the end of the line
+
+ # adding an extra backslash before the space, to avoid triggering
+ # one of the checks in .github/workflows/lint.yml
+ type_pattern = re.compile("# type:\\ ignore(\\[[a-zA-Z-]+\\])?$")
+ type_lines = list(filter(lambda line: not type_pattern.search(line[1]), type_lines))
+
+ if len(type_lines) == 0:
+ # Catch common typo patterns like extra spaces, typo in 'ignore', etc.
+ wrong_type_pattern = re.compile("#[\t ]*type[\t ]*(?!: ignore(\\[.*\\])?$):")
+ wrong_type_lines = list(
+ filter(lambda line: wrong_type_pattern.search(line[1]), lines)
+ )
+ if len(wrong_type_lines) > 0:
+ raise RuntimeError(
+ "The annotation prefix in line "
+ + str(wrong_type_lines[0][0])
+ + " is probably invalid.\nIt must be '# type:'"
+ + "\nSee PEP 484 (https://www.python.org/dev/peps/pep-0484/#suggested-syntax-for-python-2-7-and-straddling-code)" # noqa: B950
+ + "\nfor examples"
+ )
+ return None
+ elif len(type_lines) == 1:
+ # Only 1 type line, quit now
+ return type_lines[0][1].strip()
+
+ # Parse split up argument types according to PEP 484
+ # https://www.python.org/dev/peps/pep-0484/#suggested-syntax-for-python-2-7-and-straddling-code
+ return_line = None
+ parameter_type_lines = []
+ for line_num, line in type_lines:
+ if "# type: (...) -> " in line:
+ return_line = (line_num, line)
+ break
+ elif type_comment in line:
+ parameter_type_lines.append(line)
+ if return_line is None:
+ raise RuntimeError(
+ "Return type line '# type: (...) -> ...' not found on multiline "
+ "type annotation\nfor type lines:\n"
+ + "\n".join([line[1] for line in type_lines])
+ + "\n(See PEP 484 https://www.python.org/dev/peps/pep-0484/#suggested-syntax-for-python-2-7-and-straddling-code)"
+ )
+
+ def get_parameter_type(line):
+ item_type = line[line.find(type_comment) + len(type_comment) :]
+ return item_type.strip()
+
+ types = map(get_parameter_type, parameter_type_lines)
+ parameter_types = ", ".join(types)
+
+ return return_line[1].replace("...", parameter_types)
+
+
+def split_type_line(type_line):
+ """Split the comment with the type annotation into parts for argument and return types.
+
+ For example, for an input of:
+ # type: (Tensor, torch.Tensor) -> Tuple[Tensor, Tensor]
+
+ This function will return:
+ ("(Tensor, torch.Tensor)", "Tuple[Tensor, Tensor]")
+
+ """
+ start_offset = len("# type:")
+ try:
+ arrow_pos = type_line.index("->")
+ except ValueError:
+ raise RuntimeError(
+ "Syntax error in type annotation (cound't find `->`)"
+ ) from None
+ return type_line[start_offset:arrow_pos].strip(), type_line[arrow_pos + 2 :].strip()
+
+
+def try_real_annotations(fn, loc):
+ """Try to use the Py3.5+ annotation syntax to get the type."""
+ try:
+ # Note: anything annotated as `Optional[T]` will automatically
+ # be returned as `Union[T, None]` per
+ # https://github.com/python/typing/blob/master/src/typing.py#L850
+ sig = inspect.signature(fn)
+ except ValueError:
+ return None
+
+ all_annots = [sig.return_annotation] + [
+ p.annotation for p in sig.parameters.values()
+ ]
+ if all(ann is sig.empty for ann in all_annots):
+ return None
+
+ arg_types = [ann_to_type(p.annotation, loc) for p in sig.parameters.values()]
+ return_type = ann_to_type(sig.return_annotation, loc)
+ return arg_types, return_type
+
+
+# Finds common type for enum values belonging to an Enum class. If not all
+# values have the same type, AnyType is returned.
+def get_enum_value_type(e: Type[enum.Enum], loc):
+ enum_values: List[enum.Enum] = list(e)
+ if not enum_values:
+ raise ValueError(f"No enum values defined for: '{e.__class__}'")
+
+ types = {type(v.value) for v in enum_values}
+ ir_types = [try_ann_to_type(t, loc) for t in types]
+
+ # If Enum values are of different types, an exception will be raised here.
+ # Even though Python supports this case, we chose to not implement it to
+ # avoid overcomplicate logic here for a rare use case. Please report a
+ # feature request if you find it necessary.
+ res = torch._C.unify_type_list(ir_types)
+ if not res:
+ return AnyType.get()
+ return res
+
+
+def is_tensor(ann):
+ if issubclass(ann, torch.Tensor):
+ return True
+
+ if issubclass(
+ ann,
+ (
+ torch.LongTensor,
+ torch.DoubleTensor,
+ torch.FloatTensor,
+ torch.IntTensor,
+ torch.ShortTensor,
+ torch.HalfTensor,
+ torch.CharTensor,
+ torch.ByteTensor,
+ torch.BoolTensor,
+ ),
+ ):
+ warnings.warn(
+ "TorchScript will treat type annotations of Tensor "
+ "dtype-specific subtypes as if they are normal Tensors. "
+ "dtype constraints are not enforced in compilation either."
+ )
+ return True
+
+ return False
+
+
+def _fake_rcb(inp):
+ return None
+
+
+def try_ann_to_type(ann, loc, rcb=None):
+ ann_args = typing.get_args(ann) # always returns a tuple!
+
+ if ann is inspect.Signature.empty:
+ return TensorType.getInferred()
+ if ann is None:
+ return NoneType.get()
+ if inspect.isclass(ann) and is_tensor(ann):
+ return TensorType.get()
+ if is_tuple(ann):
+ # Special case for the empty Tuple type annotation `Tuple[()]`
+ if len(ann_args) == 1 and ann_args[0] == ():
+ return TupleType([])
+ return TupleType([try_ann_to_type(a, loc) for a in ann_args])
+ if is_list(ann):
+ elem_type = try_ann_to_type(ann_args[0], loc)
+ if elem_type:
+ return ListType(elem_type)
+ if is_dict(ann):
+ key = try_ann_to_type(ann_args[0], loc)
+ value = try_ann_to_type(ann_args[1], loc)
+ # Raise error if key or value is None
+ if key is None:
+ raise ValueError(
+ f"Unknown type annotation: '{ann_args[0]}' at {loc.highlight()}"
+ )
+ if value is None:
+ raise ValueError(
+ f"Unknown type annotation: '{ann_args[1]}' at {loc.highlight()}"
+ )
+ return DictType(key, value)
+ if is_optional(ann):
+ if issubclass(ann_args[1], type(None)):
+ contained = ann_args[0]
+ else:
+ contained = ann_args[1]
+ valid_type = try_ann_to_type(contained, loc)
+ msg = "Unsupported annotation {} could not be resolved because {} could not be resolved. At\n{}"
+ assert valid_type, msg.format(repr(ann), repr(contained), repr(loc))
+ return OptionalType(valid_type)
+ if is_union(ann):
+ # TODO: this is hack to recognize NumberType
+ if set(ann_args) == {int, float, complex}:
+ return NumberType.get()
+ inner: List = []
+ # We need these extra checks because both `None` and invalid
+ # values will return `None`
+ # TODO: Determine if the other cases need to be fixed as well
+ for a in typing.get_args(ann):
+ if a is None:
+ inner.append(NoneType.get())
+ maybe_type = try_ann_to_type(a, loc)
+ msg = "Unsupported annotation {} could not be resolved because {} could not be resolved. At\n{}"
+ assert maybe_type, msg.format(repr(ann), repr(maybe_type), repr(loc))
+ inner.append(maybe_type)
+ return UnionType(inner) # type: ignore[arg-type]
+ if torch.distributed.rpc.is_available() and is_rref(ann):
+ return RRefType(try_ann_to_type(ann_args[0], loc))
+ if is_future(ann):
+ return FutureType(try_ann_to_type(ann_args[0], loc))
+ if is_await(ann):
+ elementType = try_ann_to_type(ann_args[0], loc) if ann_args else AnyType.get()
+ return AwaitType(elementType)
+ if ann is float:
+ return FloatType.get()
+ if ann is complex:
+ return ComplexType.get()
+ if ann is int or ann is torch.SymInt:
+ return IntType.get()
+ if ann is str:
+ return StringType.get()
+ if ann is bool:
+ return BoolType.get()
+ if ann is Any:
+ return AnyType.get()
+ if ann is type(None):
+ return NoneType.get()
+ if inspect.isclass(ann) and hasattr(ann, "__torch_script_interface__"):
+ return InterfaceType(ann.__torch_script_interface__)
+ if ann is torch.device:
+ return DeviceObjType.get()
+ if ann is torch.Generator:
+ return _GeneratorType.get()
+ if ann is torch.Stream:
+ return StreamObjType.get()
+ if ann is torch.dtype:
+ return IntType.get() # dtype not yet bound in as its own type
+ if inspect.isclass(ann) and issubclass(ann, enum.Enum):
+ if _get_script_class(ann) is None:
+ scripted_class = torch.jit._script._recursive_compile_class(ann, loc)
+ name = scripted_class.qualified_name()
+ else:
+ name = _qualified_name(ann)
+ return EnumType(name, get_enum_value_type(ann, loc), list(ann))
+ if inspect.isclass(ann):
+ maybe_script_class = _get_script_class(ann)
+ if maybe_script_class is not None:
+ return maybe_script_class
+ if torch._jit_internal.can_compile_class(ann):
+ return torch.jit._script._recursive_compile_class(ann, loc)
+
+ # Maybe resolve a NamedTuple to a Tuple Type
+ if rcb is None:
+ rcb = _fake_rcb
+ return torch._C._resolve_type_from_object(ann, loc, rcb)
+
+
+def ann_to_type(ann, loc, rcb=None):
+ the_type = try_ann_to_type(ann, loc, rcb)
+ if the_type is not None:
+ return the_type
+ raise ValueError(f"Unknown type annotation: '{ann}' at {loc.highlight()}")
+
+
+__all__ = [
+ "Any",
+ "List",
+ "BroadcastingList1",
+ "BroadcastingList2",
+ "BroadcastingList3",
+ "Tuple",
+ "is_tuple",
+ "is_list",
+ "Dict",
+ "is_dict",
+ "is_optional",
+ "is_union",
+ "TensorType",
+ "TupleType",
+ "FloatType",
+ "ComplexType",
+ "IntType",
+ "ListType",
+ "StringType",
+ "DictType",
+ "AnyType",
+ "Module",
+ # TODO: Consider not exporting these during wildcard import (reserve
+ # that for the types; for idiomatic typing code.)
+ "get_signature",
+ "check_fn",
+ "get_param_names",
+ "parse_type_line",
+ "get_type_line",
+ "split_type_line",
+ "try_real_annotations",
+ "try_ann_to_type",
+ "ann_to_type",
+]
diff --git a/venv/lib/python3.10/site-packages/torch/jit/frontend.py b/venv/lib/python3.10/site-packages/torch/jit/frontend.py
new file mode 100644
index 0000000000000000000000000000000000000000..37237af33d4846fab483c88104c1ed05193c4eba
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/torch/jit/frontend.py
@@ -0,0 +1,1266 @@
+import ast
+import dataclasses
+import inspect
+import re
+import string
+import sys
+from collections import namedtuple
+from textwrap import dedent
+from typing import List, Tuple # noqa: F401
+
+import torch
+import torch.jit.annotations
+from torch import _jit_internal
+from torch._C._jit_tree_views import (
+ Apply,
+ Assert,
+ Assign,
+ Attribute,
+ AugAssign,
+ BinOp,
+ Break,
+ ClassDef,
+ Const,
+ Continue,
+ Decl,
+ Def,
+ Delete,
+ DictComp,
+ DictLiteral,
+ Dots,
+ EmptyTypeAnnotation,
+ ExprStmt,
+ FalseLiteral,
+ For,
+ Ident,
+ If,
+ ListComp,
+ ListLiteral,
+ NoneLiteral,
+ Param,
+ Pass,
+ Property,
+ Raise,
+ Return,
+ Select,
+ SliceExpr,
+ Starred,
+ Stmt,
+ StringLiteral,
+ Subscript,
+ TernaryIf,
+ TrueLiteral,
+ TupleLiteral,
+ UnaryOp,
+ Var,
+ While,
+ With,
+ WithItem,
+)
+from torch._jit_internal import ( # noqa: F401
+ _is_drop_fn,
+ FunctionModifiers,
+ is_static_fn,
+ should_drop,
+)
+from torch._sources import (
+ get_source_lines_and_file,
+ make_source_context,
+ parse_def,
+ ParsedDef as _ParsedDef,
+)
+from torch.jit._dataclass_impls import DATACLASS_MAGIC_METHODS
+from torch.jit._monkeytype_config import get_qualified_name, monkeytype_trace
+
+_IS_ASTUNPARSE_INSTALLED = False
+try:
+ import astunparse # type: ignore[import]
+
+ _IS_ASTUNPARSE_INSTALLED = True
+except ImportError:
+ pass
+
+# Borrowed from cPython implementation
+# https://github.com/python/cpython/blob/561612d8456cfab5672c9b445521113b847bd6b3/Lib/textwrap.py#L411#
+
+_reserved_prefix = "__jit"
+_reserved_names = {"print"}
+_identifier_chars = set(string.ascii_lowercase + string.ascii_uppercase + string.digits)
+
+
+def is_reserved_name(name):
+ return name.startswith(_reserved_prefix) or name in _reserved_names
+
+
+pretty_node_names = {
+ ast.FunctionDef: "function definitions",
+ ast.For: "for loops",
+ ast.Delete: "del statements",
+ ast.ClassDef: "class definitions",
+ ast.With: "with statements",
+ ast.Raise: "raise statements",
+ ast.Assert: "assertions",
+ ast.Import: "import statements",
+ ast.ImportFrom: "import statements",
+ ast.Global: "global variables",
+ ast.Break: "break statements",
+ ast.Continue: "continue statements",
+}
+
+node_start_tokens = {
+ ast.FunctionDef: "def",
+ ast.For: "for",
+ ast.Delete: "del",
+ ast.ClassDef: "class",
+ ast.With: "with",
+ ast.Raise: "raise",
+ ast.Assert: "assert",
+ ast.Import: "import",
+ ast.ImportFrom: "from",
+ ast.Global: "global",
+ ast.Break: "break",
+ ast.Continue: "continue",
+}
+
+pretty_node_names.update(
+ {
+ ast.AsyncFunctionDef: "async function definitions",
+ ast.AsyncFor: "async for loops",
+ ast.AsyncWith: "async with statements",
+ ast.Try: "try blocks",
+ ast.Nonlocal: "nonlocal variables",
+ }
+)
+
+node_start_tokens.update(
+ {
+ ast.AsyncFunctionDef: "async def",
+ ast.AsyncFor: "async for",
+ ast.AsyncWith: "async with",
+ ast.Try: "try",
+ ast.Nonlocal: "nonlocal",
+ }
+)
+
+pretty_node_names.update(
+ {
+ ast.AnnAssign: "annotated assignments",
+ }
+)
+# NB: no specific token for AnnAssign
+
+
+class FrontendError(Exception):
+ def __init__(self, source_range, msg):
+ self.source_range = source_range
+ self.msg = msg
+
+ # This has to be instantiated here so the ErrorReport is accurate to the
+ # call stack when the FrontendError was raised
+ self.error_report = torch._C.ErrorReport(self.source_range)
+
+ def __str__(self):
+ return self.msg + self.error_report.what().lstrip()
+
+
+class NotSupportedError(FrontendError):
+ pass
+
+
+class UnsupportedNodeError(NotSupportedError):
+ def __init__(self, ctx, offending_node, reason=""):
+ # If we don't have a specific token, we default to length of 1
+ node_type = type(offending_node)
+ range_len = len(node_start_tokens.get(node_type, " "))
+ source_range = ctx.make_range(
+ offending_node.lineno,
+ offending_node.col_offset,
+ offending_node.col_offset + range_len,
+ )
+ feature_name = pretty_node_names.get(node_type, node_type.__name__)
+ msg = f"{feature_name} {reason + ' ' if reason else ''}aren't supported"
+ super().__init__(source_range, msg)
+
+
+class FrontendTypeError(FrontendError):
+ pass
+
+
+def build_withitems(ctx, items):
+ items = [build_withitem(ctx, i) for i in items]
+ return list(items)
+
+
+def build_stmts(ctx, stmts):
+ stmts = [build_stmt(ctx, s) for s in stmts]
+ return list(filter(None, stmts))
+
+
+def get_class_properties(cls, self_name):
+ """
+ Get a list of Property objects representing the properties of a class.
+
+ Args:
+ cls: The class to get properties of.
+ self_name: The name of the class that the properties should belong to.
+ Returns:
+ A list of Property objects corresponding to the properties of cls. Property
+ here refers to the subclass of TreeView.
+ """
+ props = inspect.getmembers(cls, predicate=lambda m: isinstance(m, property))
+ # Any property that should not compiled must be in this list on the Module.
+ unused_properties = getattr(cls, "__jit_unused_properties__", [])
+
+ # Create Property TreeView objects from inspected property objects.
+ properties = []
+ for prop in props:
+ if prop[0] not in unused_properties and not should_drop(prop[1].fget):
+ getter = get_jit_def(
+ prop[1].fget, f"__{prop[0]}_getter", self_name=self_name
+ )
+ setter = (
+ get_jit_def(prop[1].fset, f"__{prop[0]}_setter", self_name=self_name)
+ if prop[1].fset
+ else None
+ )
+ properties.append(
+ Property(getter.range(), Ident(getter.range(), prop[0]), getter, setter)
+ )
+
+ return properties
+
+
+def get_class_assigns(ctx, cls_ast):
+ assigns = []
+
+ def maybe_build_assign(builder, entry):
+ nonlocal assigns
+ try:
+ assigns.append(builder(ctx, entry))
+ except NotSupportedError:
+ pass
+
+ for entry in cls_ast.body:
+ if isinstance(entry, ast.Assign):
+ maybe_build_assign(StmtBuilder.build_Assign, entry)
+ elif isinstance(entry, ast.AnnAssign):
+ maybe_build_assign(StmtBuilder.build_AnnAssign, entry)
+ return assigns
+
+
+def get_jit_class_def(cls, self_name):
+ # Get defs for each method within the current class independently
+ # TODO: proper overriding analysis when implementing class inheritance
+ methods = inspect.getmembers(
+ cls,
+ predicate=lambda m: (inspect.ismethod(m) or inspect.isfunction(m))
+ and not is_static_fn(cls, m.__name__)
+ and m.__name__ in cls.__dict__
+ and not _is_drop_fn(m),
+ )
+
+ def is_classmethod(fn):
+ return inspect.ismethod(fn) and getattr(fn, "__self__", None) == cls
+
+ # Get and parse the source code for this class
+ sourcelines, file_lineno, filename = get_source_lines_and_file(
+ cls, torch._C.ErrorReport.call_stack()
+ )
+ source = "".join(sourcelines)
+
+ dedent_src = dedent(source)
+ py_ast = ast.parse(dedent_src)
+
+ class_ast = py_ast.body[0]
+ assert isinstance(class_ast, ast.ClassDef)
+
+ # Special case for dataclasses. In general we need access to the source code for
+ # an object in order to JIT compile it. But the dataclasses module dynamically synthesizes
+ # magic methods for classes, and we can't get the source code for these methods. As a
+ # workaround, we synthesize TorchScript-friendly implementations ourselves.
+ if dataclasses.is_dataclass(cls):
+ # Detect whether the user manually implemented any of the magic methods. If they did,
+ # we don't want to synthesize/override them.
+ overrides = {
+ method.name
+ for method in class_ast.body
+ if isinstance(method, ast.FunctionDef)
+ and method.name in DATACLASS_MAGIC_METHODS
+ }
+ for i, (name, _) in enumerate(methods):
+ # Is this a magic method we can synthesize?
+ synthesizer_fn = DATACLASS_MAGIC_METHODS.get(name)
+ if synthesizer_fn and name not in overrides:
+ parsed_def = synthesizer_fn(cls)
+ methods[i] = name, parsed_def
+ func = getattr(cls, name)
+ _jit_internal.loader.cache(func, parsed_def.source)
+
+ method_defs = [
+ get_jit_def(obj, name, self_name=self_name, is_classmethod=is_classmethod(obj))
+ for (name, obj) in methods
+ ]
+ properties = get_class_properties(cls, self_name)
+
+ leading_whitespace_len = len(source.split("\n", 1)[0]) - len(
+ dedent_src.split("\n", 1)[0]
+ )
+ ctx = make_source_context(
+ source, filename, file_lineno, leading_whitespace_len, False
+ )
+ assigns = get_class_assigns(ctx, class_ast)
+
+ return build_class_def(ctx, class_ast, method_defs, properties, self_name, assigns)
+
+
+def get_jit_def(fn, def_name, self_name=None, is_classmethod=False):
+ """
+ Build a JIT AST (TreeView) from the given function.
+
+ Args:
+ fn: A function object to compile or a pre-parsed ParsedDef object
+ def_name: The name to give to the resulting AST object. This is not
+ always the same as `fn.__name__`, for example:
+ def _forward(self):
+ ...
+ forward = _forward
+ In this case, the `__name__` attribute of the function object is "_forward",
+ but we want the result AST to have the name "forward".
+ self_name: If this function is a method, what the type name of `self` is.
+ """
+ parsed_def = parse_def(fn) if not isinstance(fn, _ParsedDef) else fn
+ type_line = torch.jit.annotations.get_type_line(parsed_def.source)
+ fn_def = parsed_def.ast.body[0]
+
+ if is_classmethod:
+ arg_name = fn_def.args.args[0].arg
+ # Insert a statement that assigns the first argument to the class
+ assign_stmt = ast.parse(f"{arg_name} = {self_name}").body[0]
+ fn_def.body.insert(0, assign_stmt)
+
+ # Swap out the function signature and body if it is unused
+ if should_drop(fn):
+ unused_fn_def = ast.parse(
+ 'def unused_fn(self: Any):\n\traise RuntimeError("Cannot call @unused methods")'
+ )
+ if len(unused_fn_def.body) != 1 or not isinstance(
+ unused_fn_def.body[0], ast.FunctionDef
+ ):
+ raise RuntimeError(
+ f"Expected a single top-level function: {parsed_def.filename}:{parsed_def.file_lineno}"
+ )
+ unused_def = unused_fn_def.body[0]
+ fn_def.body = unused_def.body
+ # kwarg/vararg not supported by `build_def`
+ fn_def.args.kwarg = fn_def.args.vararg = None
+ for arg in fn_def.args.args + fn_def.args.kwonlyargs:
+ # Replace potentially unsupported type annotations by "Any"
+ arg.annotation = unused_def.args.args[0].annotation
+ if _is_drop_fn(fn):
+ # Dropping potentially unsupported return type annotation for jit._drop
+ fn_def.returns = None
+ fn_def.type_comment = None
+
+ # If MonkeyType is installed, get all the consolidated type traces
+ # for the arguments from type_trace_db
+ type_trace_db = torch.jit._script._get_type_trace_db()
+ pdt_arg_types = None
+ if monkeytype_trace and not isinstance(fn, _ParsedDef): # type: ignore[truthy-function]
+ qualname = get_qualified_name(fn)
+ pdt_arg_types = type_trace_db.get_args_types(qualname)
+
+ return build_def(
+ parsed_def.ctx,
+ fn_def,
+ type_line,
+ def_name,
+ self_name=self_name,
+ pdt_arg_types=pdt_arg_types,
+ )
+
+
+# TODO: more robust handling of recognizing ignore context manager
+def is_torch_jit_ignore_context_manager(stmt):
+ # checks if the statement is torch.jit.ignore context manager
+ if isinstance(stmt.items[0].context_expr, ast.Call):
+ # extract torch part
+ function = stmt.items[0].context_expr.func
+ if isinstance(function, ast.Attribute):
+ attr_name = function.attr
+ attr_value = function.value
+ if attr_name == "_IgnoreContextManager" and isinstance(
+ attr_value, ast.Attribute
+ ):
+ # there should be at most two nested attributes (e.g torch.jit._IgnoreContextManager)
+ if attr_value.attr == "jit" and isinstance(attr_value.value, ast.Name):
+ if attr_value.value.id == "torch":
+ return True
+ return False
+
+
+class Builder:
+ def __call__(self, ctx, node):
+ method = getattr(self, "build_" + node.__class__.__name__, None)
+ if method is None:
+ raise UnsupportedNodeError(ctx, node)
+ return method(ctx, node)
+
+
+def build_class_def(ctx, py_def, methods, properties, self_name, assigns):
+ r = ctx.make_range(
+ py_def.lineno, py_def.col_offset, py_def.col_offset + len("class")
+ )
+ return ClassDef(
+ Ident(r, self_name), [Stmt(method) for method in methods], properties, assigns
+ )
+
+
+def build_def(ctx, py_def, type_line, def_name, self_name=None, pdt_arg_types=None):
+ body = py_def.body
+ r = ctx.make_range(py_def.lineno, py_def.col_offset, py_def.col_offset + len("def"))
+
+ param_list = build_param_list(ctx, py_def.args, self_name, pdt_arg_types)
+ return_type = None
+ if getattr(py_def, "returns", None) is not None:
+ return_type = build_expr(ctx, py_def.returns)
+
+ decl = Decl(r, param_list, return_type)
+ is_method = self_name is not None
+ if type_line is not None:
+ type_comment_decl = torch._C.parse_type_comment(type_line)
+ decl = torch._C.merge_type_from_type_comment(decl, type_comment_decl, is_method)
+
+ return Def(Ident(r, def_name), decl, build_stmts(ctx, body))
+
+
+_vararg_kwarg_err = (
+ "Compiled functions can't take variable number of arguments "
+ "or use keyword-only arguments with defaults"
+)
+
+
+def build_param_list(ctx, py_args, self_name, pdt_arg_types=None):
+ if py_args.kwarg is not None:
+ expr = py_args.kwarg
+ ctx_range = ctx.make_range(
+ expr.lineno, expr.col_offset - 1, expr.col_offset + len(expr.arg)
+ )
+ raise NotSupportedError(ctx_range, _vararg_kwarg_err)
+ if py_args.vararg is not None:
+ expr = py_args.vararg
+ ctx_range = ctx.make_range(
+ expr.lineno, expr.col_offset - 1, expr.col_offset + len(expr.arg)
+ )
+ raise NotSupportedError(ctx_range, _vararg_kwarg_err)
+ if len(py_args.kw_defaults) > 0:
+ # kw_defaults is a list of the values for the kwargs (which default to None),
+ # so they don't actually have line numbers.
+ for arg in py_args.kw_defaults:
+ if arg is not None:
+ ctx_range = build_expr(ctx, arg).range()
+ raise NotSupportedError(ctx_range, _vararg_kwarg_err)
+
+ # List of Tuple of args and type as inferred by profile directed typing
+ arg_and_types = [
+ (
+ arg,
+ pdt_arg_types[arg.arg]
+ if pdt_arg_types and bool(pdt_arg_types[arg.arg])
+ else None,
+ )
+ for arg in py_args.args
+ ]
+ arg_and_types_kwonlyargs = [
+ (
+ arg,
+ pdt_arg_types[arg.arg]
+ if pdt_arg_types and bool(pdt_arg_types[arg.arg])
+ else None,
+ )
+ for arg in py_args.kwonlyargs
+ ]
+
+ result = [
+ build_param(ctx, arg, self_name, kwarg_only=False, pdt_arg_type=arg_type)
+ for arg, arg_type in arg_and_types
+ ]
+ result += [
+ build_param(ctx, arg, self_name, kwarg_only=True, pdt_arg_type=arg_type)
+ for arg, arg_type in arg_and_types_kwonlyargs
+ ]
+ return result
+
+
+def build_param(ctx, py_arg, self_name, kwarg_only, pdt_arg_type=None):
+ # NB: In Python3 py_arg is a pair of (str arg, expr? annotation)
+ name = py_arg.arg
+ r = ctx.make_range(py_arg.lineno, py_arg.col_offset, py_arg.col_offset + len(name))
+ if getattr(py_arg, "annotation", None) is not None:
+ annotation_expr = build_expr(ctx, py_arg.annotation)
+ elif pdt_arg_type:
+ annotation_expr = Var(Ident(r, pdt_arg_type))
+ elif self_name is not None and name == "self":
+ annotation_expr = Var(Ident(r, self_name))
+ else:
+ annotation_expr = EmptyTypeAnnotation(r)
+ return Param(annotation_expr, Ident(r, name), kwarg_only)
+
+
+def build_ignore_context_manager(ctx, stmt):
+ InputType = namedtuple("InputType", ["name", "ann"])
+ OutputType = namedtuple("OutputType", ["name", "ann"])
+
+ def process_ins_outs(args):
+ # parse the context manager to figure out inputs and outputs
+ # with their annotated types
+ # TODO: add input, output validator
+ inputs = []
+ outputs = []
+ for arg in args:
+ var_name = arg.arg
+ var_ann = arg.value.value
+ var_decl_type, var_ann = var_ann.split(":")
+ if var_decl_type == "inp":
+ inputs.append(InputType(var_name, var_ann))
+ if var_decl_type == "out":
+ outputs.append(OutputType(var_name, var_ann))
+ return inputs, outputs
+
+ def create_unique_name_ext(ctx, stmt):
+ # extension will be based on the full path filename plus
+ # the line number of original context manager
+ fn = re.sub(r"[^a-zA-Z0-9_]", "_", ctx.filename)
+ return f"{fn}_{stmt.lineno}"
+
+ def build_return_ann_stmt(outputs):
+ return_type_ann = ""
+ return_statement_str = "return "
+ if len(outputs) == 0:
+ return_type_ann += " -> None"
+ if len(outputs) == 1:
+ return_type_ann = " -> " + outputs[0].ann
+ return_statement_str += outputs[0].name
+ if len(outputs) > 1:
+ return_type_ann = " -> Tuple"
+ return_type_ann += "[" + ", ".join([var.ann for var in outputs]) + "]"
+ return_statement_str += ", ".join([var.name for var in outputs])
+ return return_type_ann, return_statement_str
+
+ def build_args(args):
+ return ", ".join([arg.name for arg in args])
+
+ inputs, outputs = process_ins_outs(stmt.items[0].context_expr.keywords)
+
+ # build the replacement function str with given inputs and outputs
+ ignore_function_name = "func_ignore_" + create_unique_name_ext(ctx, stmt)
+ ignore_function_str = "\ndef " + ignore_function_name
+ ignore_function_str += (
+ "(" + ", ".join([var.name + " :" + var.ann for var in inputs]) + ")"
+ )
+
+ return_ann, return_stmt = build_return_ann_stmt(outputs)
+ ignore_function_str += return_ann + ": pass"
+
+ # first create the functionDef object from just declaration
+ ignore_function = ast.parse(ignore_function_str).body[0]
+
+ # dump the body of context manager to dummy function
+ ignore_function.body = stmt.body # type: ignore[attr-defined]
+
+ # insert return statement to the function
+ return_stmt = ast.parse(return_stmt).body[0]
+ ignore_function.body.append(return_stmt) # type: ignore[attr-defined]
+
+ # registers the custom function in the global context
+ ignore_func_str = "@torch.jit.ignore\n" + astunparse.unparse(ignore_function)
+ ignore_func_str += f'\nglobals()["{ignore_function_name}"] = {ignore_function_name}'
+ exec(ignore_func_str) # noqa: P204
+
+ # build the statements as:
+ # , , ... = torch.jit.frontend.(, )
+ assign_str_lhs = build_args(outputs)
+ # this function will be registered in torch.jit.frontend module by default
+ assign_str_rhs = (
+ f"torch.jit.frontend.{ignore_function_name}(" + build_args(inputs) + ")"
+ )
+
+ if len(outputs) > 0:
+ assign_str = assign_str_lhs + " = " + assign_str_rhs
+ else:
+ assign_str = assign_str_rhs
+ assign_ast = ast.parse(assign_str).body[0]
+ return assign_ast
+
+
+def get_default_args(fn):
+ if fn is None:
+ return {}
+
+ signature = inspect.signature(fn)
+
+ return {
+ k: v.default
+ for k, v in signature.parameters.items()
+ if v.default is not inspect.Parameter.empty
+ }
+
+
+def get_default_args_for_class(cls):
+ """
+ Get default arguments for all methods in a class (except for static methods).
+
+ Args:
+ cls: type - The class type to inspect for default arguments.
+ Returns:
+ A Dict[str, Dict[str, Any]] which maps each method name to a Dict[str, Any]
+ that maps each argument name to its default value.
+ """
+ # Get methods (except static methods because those are compiled separately as
+ # if they were independent script functions).
+ methods = inspect.getmembers(
+ cls,
+ predicate=lambda m: (inspect.ismethod(m) or inspect.isfunction(m))
+ and not is_static_fn(cls, m.__name__)
+ and m.__name__ in cls.__dict__,
+ )
+
+ # Get method defaults. Property defaults do not need to be considered
+ # because setters cannot be invoked without a value.
+ defaults = {
+ method_name: get_default_args(method_impl)
+ for method_name, method_impl in methods
+ }
+
+ return defaults
+
+
+class WithItemBuilder(Builder):
+ @staticmethod
+ def build_withitem(ctx, item):
+ lineno = item.context_expr.lineno
+ start = item.context_expr.col_offset
+ end = start + len(pretty_node_names[ast.With])
+ op_vars = item.optional_vars
+ r = ctx.make_range(lineno, start, end)
+
+ return WithItem(
+ r,
+ build_expr(ctx, item.context_expr),
+ build_expr(ctx, op_vars) if op_vars else None,
+ )
+
+
+class StmtBuilder(Builder):
+ augassign_map = {
+ ast.Add: "+",
+ ast.Sub: "-",
+ ast.Mult: "*",
+ ast.Div: "/",
+ ast.Mod: "%",
+ ast.BitOr: "|",
+ ast.BitAnd: "&",
+ ast.BitXor: "^",
+ ast.LShift: "<<",
+ ast.RShift: ">>",
+ ast.Pow: "**",
+ }
+
+ @staticmethod
+ def build_Expr(ctx, stmt):
+ value = stmt.value
+ if value.__class__.__name__ == "Str":
+ # If a statement is a string literal expression,
+ # then it is a docstring. Just ignore it.
+ return None
+ else:
+ return ExprStmt(build_expr(ctx, value))
+
+ @staticmethod
+ def build_Assign(ctx, stmt):
+ rhs = build_expr(ctx, stmt.value)
+ lhs = [build_expr(ctx, x) for x in stmt.targets]
+ return Assign(lhs, rhs)
+
+ @staticmethod
+ def build_AnnAssign(ctx, stmt):
+ if stmt.value is None:
+ raise UnsupportedNodeError(ctx, stmt, reason="without assigned value")
+
+ # Disallow type annotations on instance attributes outside of __init__
+ if (
+ type(stmt.target) == ast.Attribute
+ and stmt.target.value.id == "self" # type: ignore[attr-defined]
+ and ctx.funcname != "__init__"
+ ):
+ start = stmt.col_offset
+ end = start + len(f"self.{stmt.target.attr}")
+ if hasattr(stmt.annotation, "id"):
+ end += len(f": {stmt.annotation.id}")
+ sr = ctx.make_range(stmt.lineno, start, end)
+ raise ValueError(
+ "Type annotations on instance attributes must be declared in "
+ f"__init__, not '{ctx.funcname}': {sr}"
+ )
+
+ rhs = build_expr(ctx, stmt.value)
+ lhs = build_expr(ctx, stmt.target)
+ the_type = build_expr(ctx, stmt.annotation)
+ return Assign([lhs], rhs, the_type)
+
+ @staticmethod
+ def build_Delete(ctx, stmt):
+ r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset + len("del"))
+
+ return Delete(r, [build_expr(ctx, target) for target in stmt.targets])
+
+ @staticmethod
+ def build_Return(ctx, stmt):
+ r = ctx.make_range(
+ stmt.lineno, stmt.col_offset, stmt.col_offset + len("return")
+ )
+ return Return(r, None if stmt.value is None else build_expr(ctx, stmt.value))
+
+ @staticmethod
+ def build_Raise(ctx, stmt):
+ r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset + len("raise"))
+ expr = build_expr(ctx, stmt.exc)
+ return Raise(r, expr)
+
+ @staticmethod
+ def build_Assert(ctx, stmt):
+ r = ctx.make_range(
+ stmt.lineno, stmt.col_offset, stmt.col_offset + len("assert")
+ )
+ test = build_expr(ctx, stmt.test)
+ msg = build_expr(ctx, stmt.msg) if stmt.msg is not None else None
+ return Assert(r, test, msg)
+
+ @staticmethod
+ def build_AugAssign(ctx, stmt):
+ lhs = build_expr(ctx, stmt.target)
+ rhs = build_expr(ctx, stmt.value)
+ op = type(stmt.op)
+ if op in StmtBuilder.augassign_map:
+ op_token = StmtBuilder.augassign_map[op]
+ else:
+ raise NotSupportedError(
+ find_before(ctx, rhs.range().start, "=", offsets=(-1, 0)),
+ "unsupported kind of augmented assignment: " + op.__name__,
+ )
+ return AugAssign(lhs, op_token, rhs)
+
+ @staticmethod
+ def build_While(ctx, stmt):
+ if stmt.orelse:
+ # TODO: try to recover the location of else:? Python doesn't give us useful
+ # annotations in this case
+ raise NotSupportedError(
+ None, "else branches of while loops aren't supported"
+ )
+ r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset + len("while"))
+ return While(r, build_expr(ctx, stmt.test), build_stmts(ctx, stmt.body))
+
+ @staticmethod
+ def build_For(ctx, stmt):
+ r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset + len("for"))
+ if stmt.orelse:
+ raise NotSupportedError(r, "else branches of for loops aren't supported")
+
+ return For(
+ r,
+ [build_expr(ctx, stmt.target)],
+ [build_expr(ctx, stmt.iter)],
+ build_stmts(ctx, stmt.body),
+ )
+
+ @staticmethod
+ def build_If(ctx, stmt):
+ r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset + len("if"))
+ return If(
+ r,
+ build_expr(ctx, stmt.test),
+ build_stmts(ctx, stmt.body),
+ build_stmts(ctx, stmt.orelse),
+ )
+
+ @staticmethod
+ def build_Print(ctx, stmt):
+ r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset + len("print"))
+ if stmt.dest:
+ raise NotSupportedError(
+ r, "print statements with non-default destinations aren't supported"
+ )
+ args = [build_expr(ctx, val) for val in stmt.values]
+ return ExprStmt(Apply(Var(Ident(r, "print")), args, []))
+
+ @staticmethod
+ def build_Pass(ctx, stmt):
+ r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset + len("pass"))
+ return Pass(r)
+
+ @staticmethod
+ def build_Break(ctx, stmt):
+ r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset + len("break"))
+ return Break(r)
+
+ @staticmethod
+ def build_Continue(ctx, stmt):
+ r = ctx.make_range(
+ stmt.lineno, stmt.col_offset, stmt.col_offset + len("continue")
+ )
+ return Continue(r)
+
+ @staticmethod
+ def build_With(ctx, stmt):
+ r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset + len("with"))
+ # Handle ignore context manager
+ if is_torch_jit_ignore_context_manager(stmt):
+ if not _IS_ASTUNPARSE_INSTALLED:
+ raise RuntimeError(
+ "torch.jit._IgnoreContextManager requires installing Python library `astunparse`, \
+ please install it in your Python environment"
+ )
+ assign_ast = build_ignore_context_manager(ctx, stmt)
+ return build_stmt(ctx, assign_ast)
+ return With(r, build_withitems(ctx, stmt.items), build_stmts(ctx, stmt.body))
+
+
+class ExprBuilder(Builder):
+ binop_map = {
+ ast.Add: "+",
+ ast.Sub: "-",
+ ast.Mult: "*",
+ ast.Div: "/",
+ ast.Pow: "**",
+ ast.Mod: "%",
+ ast.FloorDiv: "//",
+ ast.BitAnd: "&",
+ ast.BitXor: "^",
+ ast.BitOr: "|",
+ ast.LShift: "<<",
+ ast.RShift: ">>",
+ }
+
+ binop_map[ast.MatMult] = "@"
+
+ unop_map = {
+ ast.Not: "not",
+ ast.USub: "-",
+ ast.Invert: "~",
+ }
+
+ boolop_map = {
+ ast.And: "and",
+ ast.Or: "or",
+ }
+
+ cmpop_map = {
+ ast.Eq: "==",
+ ast.NotEq: "!=",
+ ast.LtE: "<=",
+ ast.Lt: "<",
+ ast.GtE: ">=",
+ ast.Gt: ">",
+ ast.Is: "is",
+ ast.IsNot: "is not",
+ ast.In: "in",
+ ast.NotIn: "not in",
+ }
+
+ @staticmethod
+ def build_Attribute(ctx, expr):
+ base = build_expr(ctx, expr.value)
+ # expr.attr is just a string, so it's not annotated in any way, so we have
+ # to build the range manually
+ source = ctx.source.encode("utf-8")
+
+ def get_char(index):
+ return chr(source[index])
+
+ start_pos = base.range().end + 1
+ while get_char(start_pos) in string.whitespace: # Skip whitespace
+ start_pos += 1
+ end_pos = start_pos + len(expr.attr)
+ name_range = ctx.make_raw_range(start_pos, end_pos)
+ return Select(base, Ident(name_range, expr.attr))
+
+ @staticmethod
+ def build_Call(ctx, expr):
+ func = build_expr(ctx, expr.func)
+ args = [build_expr(ctx, py_arg) for py_arg in expr.args]
+ if hasattr(expr, "starargs") and expr.starargs:
+ stararg_expr = build_expr(ctx, expr.starargs)
+ args += [Starred(stararg_expr.range(), stararg_expr)]
+ kwargs = []
+ for kw in expr.keywords:
+ kw_expr = build_expr(ctx, kw.value)
+ # XXX: we could do a better job at figuring out the range for the name here
+ if not kw.arg:
+ raise NotSupportedError(
+ kw_expr.range(), "keyword-arg expansion is not supported"
+ )
+ kwargs.append(Attribute(Ident(kw_expr.range(), kw.arg), kw_expr))
+ return Apply(func, args, kwargs)
+
+ @staticmethod
+ def build_Ellipsis(ctx, expr):
+ r = ctx.make_range(
+ expr.lineno, expr.col_offset, expr.col_offset + 3
+ ) # len("...") == 3
+ return Dots(r)
+
+ @staticmethod
+ def build_Name(ctx, expr):
+ r = ctx.make_range(expr.lineno, expr.col_offset, expr.col_offset + len(expr.id))
+ if expr.id.startswith(_reserved_prefix):
+ raise NotSupportedError(
+ r,
+ "names of variables used in JIT-ed functions "
+ "can't start with " + _reserved_prefix,
+ )
+ if expr.id == "True":
+ return TrueLiteral(r)
+ elif expr.id == "False":
+ return FalseLiteral(r)
+ elif expr.id == "None":
+ return NoneLiteral(r)
+ elif expr.id == "Ellipsis":
+ return Dots(r)
+ return Var(Ident(r, expr.id))
+
+ @staticmethod
+ def build_NameConstant(ctx, expr):
+ r = ctx.make_range(
+ expr.lineno, expr.col_offset, expr.col_offset + len(str(expr.value))
+ )
+ if expr.value is True:
+ return TrueLiteral(r)
+ elif expr.value is False:
+ return FalseLiteral(r)
+ elif expr.value is None:
+ return NoneLiteral(r)
+ elif expr.value == Ellipsis:
+ return Dots(r)
+ else:
+ raise ValueError("Name constant value unsupported: " + str(expr.value))
+
+ @staticmethod
+ def build_BinOp(ctx, expr):
+ lhs = build_expr(ctx, expr.left)
+ rhs = build_expr(ctx, expr.right)
+ op = type(expr.op)
+
+ if op == ast.Div and not ctx.uses_true_division:
+ err_range = ctx.make_raw_range(lhs.range().end, rhs.range().start)
+ raise FrontendError(
+ err_range,
+ "Division of ints in TorchScript uses Python 3 true "
+ "division semantics. Please put `from __future__ "
+ "import division` at the top of your file",
+ )
+ op_token = ExprBuilder.binop_map.get(op)
+ if op_token is None:
+ err_range = ctx.make_raw_range(lhs.range().end, rhs.range().start)
+ raise NotSupportedError(
+ err_range, "unsupported binary operator: " + op.__name__
+ )
+ return BinOp(op_token, lhs, rhs)
+
+ @staticmethod
+ def build_UnaryOp(ctx, expr):
+ sub_expr = build_expr(ctx, expr.operand)
+ op = type(expr.op)
+ op_token = ExprBuilder.unop_map.get(op)
+ if op_token is None:
+ raise NotSupportedError(
+ expr.range(), "unsupported unary operator: " + op.__name__
+ )
+ r = ctx.make_range(
+ expr.lineno, expr.col_offset, expr.col_offset + len(op_token)
+ )
+ return UnaryOp(r, op_token, sub_expr)
+
+ @staticmethod
+ def build_BoolOp(ctx, expr):
+ if len(expr.values) < 2:
+ raise AssertionError(
+ "expected at least 2 values in BoolOp, but got " + str(len(expr.values))
+ )
+ sub_exprs = [build_expr(ctx, sub_expr) for sub_expr in expr.values]
+ op = type(expr.op)
+ op_token = ExprBuilder.boolop_map.get(op)
+ if op_token is None:
+ err_range = ctx.make_raw_range(
+ sub_exprs[0].range().end, sub_exprs[1].range().start
+ )
+ raise NotSupportedError(
+ err_range, "unsupported boolean operator: " + op.__name__
+ )
+ lhs = sub_exprs[0]
+ for rhs in sub_exprs[1:]:
+ lhs = BinOp(op_token, lhs, rhs)
+ return lhs
+
+ @staticmethod
+ def build_IfExp(ctx, expr):
+ return TernaryIf(
+ build_expr(ctx, expr.test),
+ build_expr(ctx, expr.body),
+ build_expr(ctx, expr.orelse),
+ )
+
+ @staticmethod
+ def build_Compare(ctx, expr):
+ operands = [build_expr(ctx, e) for e in [expr.left] + list(expr.comparators)]
+ result = None
+ for lhs, op_, rhs in zip(operands, expr.ops, operands[1:]):
+ op = type(op_)
+ op_token = ExprBuilder.cmpop_map.get(op)
+ r = ctx.make_raw_range(lhs.range().end, rhs.range().start)
+ if op_token is None:
+ raise NotSupportedError(
+ r, "unsupported comparison operator: " + op.__name__
+ )
+
+ if op == ast.NotIn:
+ # NB: `not in` is just `not( in )`, so we don't introduce new tree view
+ # but just make it a nested call in our tree view structure
+ in_expr = BinOp("in", lhs, rhs)
+ cmp_expr = UnaryOp(r, "not", in_expr)
+ else:
+ cmp_expr = BinOp(op_token, lhs, rhs)
+
+ if result is None:
+ result = cmp_expr
+ else:
+ result = BinOp("and", result, cmp_expr)
+ return result
+
+ @staticmethod
+ def build_Subscript(ctx, expr):
+ def build_SliceExpr(ctx, base, slice_expr):
+ lower = (
+ build_expr(ctx, slice_expr.lower)
+ if slice_expr.lower is not None
+ else None
+ )
+ upper = (
+ build_expr(ctx, slice_expr.upper)
+ if slice_expr.upper is not None
+ else None
+ )
+ step = (
+ build_expr(ctx, slice_expr.step)
+ if slice_expr.step is not None
+ else None
+ )
+ return SliceExpr(base.range(), lower, upper, step)
+
+ def build_Index(ctx, base, index_expr):
+ if isinstance(index_expr.value, ast.Tuple):
+ raise NotSupportedError(
+ base.range(),
+ "slicing multiple dimensions with tuples not supported yet",
+ )
+ return build_expr(ctx, index_expr.value)
+
+ def build_ExtSlice(ctx, base, extslice):
+ sub_exprs = []
+ for expr in extslice.dims:
+ sub_type = type(expr)
+ if sub_type is ast.Index:
+ sub_exprs.append(build_Index(ctx, base, expr))
+ elif sub_type is ast.Slice:
+ sub_exprs.append(build_SliceExpr(ctx, base, expr))
+ elif sub_type is ast.Ellipsis:
+ sub_exprs.append(Dots(base.range()))
+ else:
+ raise NotSupportedError(
+ base.range(),
+ f"slicing multiple dimensions with {sub_type} not supported",
+ )
+ return sub_exprs
+
+ base = build_expr(ctx, expr.value)
+ sub_type = type(expr.slice)
+ if sub_type is ast.Index:
+ if isinstance(expr.slice.value, ast.Tuple):
+ # N-dimensional indexing using Tuple: x[(i, j, k)] is equivalent to x[i, j, k]
+ # XXX: Indexing using a list is **different**! It triggers advanced indexing.
+ indices = [
+ build_expr(ctx, index_expr) for index_expr in expr.slice.value.elts
+ ]
+ if not indices:
+ # `col_offset` is an int, but `end_col_offset` is
+ # `Optional[int]`. The magic number is here to make
+ # sure we can parse `()` on any machine
+ r = ctx.make_range(
+ expr.lineno,
+ expr.slice.value.col_offset,
+ expr.slice.value.col_offset + 2,
+ )
+ tup = TupleLiteral(r, [])
+ indices.append(tup)
+ return Subscript(base, indices)
+ else:
+ return Subscript(base, [build_expr(ctx, expr.slice.value)])
+ elif sub_type is ast.Slice:
+ return Subscript(base, [build_SliceExpr(ctx, base, expr.slice)])
+ elif sub_type is ast.ExtSlice:
+ return Subscript(base, build_ExtSlice(ctx, base, expr.slice))
+ elif sys.version_info >= (
+ 3,
+ 9,
+ ): # In Python3.9 array indicies are not wrapped in ast.Index
+ if sub_type is ast.Tuple:
+ # N-dimensional indexing using Tuple: x[(i, j, k)] is equivalent to x[i, j, k]
+ indices = []
+ for index_expr in expr.slice.elts:
+ if isinstance(index_expr, ast.Slice):
+ indices.append(build_SliceExpr(ctx, base, index_expr))
+ else:
+ indices.append(build_expr(ctx, index_expr))
+ # Special-case logic for `typing.Tuple[()]`
+ if not indices:
+ # See note above r.e. magic number
+ r = ctx.make_range(
+ expr.lineno, expr.slice.col_offset, expr.slice.col_offset + 2
+ )
+ tup = TupleLiteral(r, [])
+ indices.append(tup)
+ return Subscript(base, indices)
+ return Subscript(base, [build_expr(ctx, expr.slice)])
+ else: # Ellipsis (can only happen in Python 2)
+ raise NotSupportedError(base.range(), "ellipsis is not supported")
+
+ @staticmethod
+ def build_List(ctx, expr):
+ return ListLiteral(
+ ctx.make_range(expr.lineno, expr.col_offset, expr.col_offset + 1),
+ [build_expr(ctx, e) for e in expr.elts],
+ )
+
+ @staticmethod
+ def build_Tuple(ctx, expr):
+ return TupleLiteral(
+ ctx.make_range(expr.lineno, expr.col_offset, expr.col_offset + 1),
+ [build_expr(ctx, e) for e in expr.elts],
+ )
+
+ @staticmethod
+ def build_Dict(ctx, expr):
+ range = ctx.make_range(expr.lineno, expr.col_offset, expr.col_offset + 1)
+ if expr.keys and not expr.keys[0]:
+ raise NotSupportedError(
+ range, "Dict expansion (e.g. `{**dict}`) is not supported"
+ )
+ return DictLiteral(
+ range,
+ [build_expr(ctx, e) for e in expr.keys],
+ [build_expr(ctx, e) for e in expr.values],
+ )
+
+ @staticmethod
+ def build_Num(ctx, expr):
+ value = str(expr.value)
+ r = ctx.make_range(expr.lineno, expr.col_offset, expr.col_offset + len(value))
+ return Const(r, value)
+
+ @staticmethod
+ def build_Constant(ctx, expr):
+ value = expr.value
+ if value is None or isinstance(value, bool):
+ # NB: this check has to happen before the int check because bool is
+ # a subclass of int
+ return ExprBuilder.build_NameConstant(ctx, expr)
+ if isinstance(value, (int, float, complex)):
+ return ExprBuilder.build_Num(ctx, expr)
+ elif isinstance(value, str):
+ return ExprBuilder.build_Str(ctx, expr)
+ elif isinstance(value, type(Ellipsis)):
+ return ExprBuilder.build_Ellipsis(ctx, expr)
+ else:
+ error_range = ctx.make_range(
+ expr.lineno, expr.col_offset, expr.col_offset + len(str(value))
+ )
+ raise FrontendError(error_range, "Unknown Constant expression type")
+
+ @staticmethod
+ def build_Str(ctx, expr):
+ value = str(expr.value)
+ r = ctx.make_range(
+ expr.lineno, expr.col_offset, expr.col_offset + len(value) + 1
+ )
+ return StringLiteral(r, value)
+
+ @staticmethod
+ def build_JoinedStr(ctx, expr):
+ s = ""
+ args = []
+ for value in expr.values:
+ r = ctx.make_range(value.lineno, value.col_offset, value.col_offset + 1)
+ if isinstance(value, ast.FormattedValue):
+ if value.conversion != -1:
+ raise NotSupportedError(r, "Don't support conversion in JoinedStr")
+ if value.format_spec is not None:
+ raise NotSupportedError(r, "Don't support formatting in JoinedStr")
+ s += "{}"
+ args.append(build_expr(ctx, value.value))
+ elif isinstance(value, ast.Str):
+ s += value.s
+ else:
+ raise NotSupportedError(r, "Unsupported value in JoinedStr")
+
+ r = ctx.make_range(expr.lineno, expr.col_offset, expr.col_offset + 1)
+ return Apply(Select(StringLiteral(r, s), Ident(r, "format")), args, [])
+
+ @staticmethod
+ def build_ListComp(ctx, stmt):
+ r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset)
+ if len(stmt.generators) != 1:
+ raise NotSupportedError(r, "Only a single generator is currently supported")
+
+ if len(stmt.generators[0].ifs) != 0:
+ raise NotSupportedError(r, "Comprehension ifs are not supported yet")
+
+ elt_expr = build_expr(ctx, stmt.elt)
+ target_expr = build_expr(ctx, stmt.generators[0].target)
+ iter_expr = build_expr(ctx, stmt.generators[0].iter)
+
+ return ListComp(r, elt_expr, target_expr, iter_expr)
+
+ @staticmethod
+ def build_GeneratorExp(ctx, stmt):
+ # Convert Generator expression to ListComp
+ return ExprBuilder.build_ListComp(ctx, stmt)
+
+ @staticmethod
+ def build_DictComp(ctx, stmt):
+ r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset)
+ if len(stmt.generators) != 1:
+ raise NotSupportedError(r, "Only a single generator is currently supported")
+
+ if len(stmt.generators[0].ifs) != 0:
+ raise NotSupportedError(r, "Comprehension ifs are not supported yet")
+
+ key_expr = build_expr(ctx, stmt.key)
+ value_expr = build_expr(ctx, stmt.value)
+ target_expr = build_expr(ctx, stmt.generators[0].target)
+ iter_expr = build_expr(ctx, stmt.generators[0].iter)
+
+ return DictComp(r, key_expr, value_expr, target_expr, iter_expr)
+
+ @staticmethod
+ def build_Starred(ctx, expr):
+ r = ctx.make_range(expr.lineno, expr.col_offset, expr.col_offset + 1)
+ return Starred(r, build_expr(ctx, expr.value))
+
+
+build_expr = ExprBuilder()
+build_stmt = StmtBuilder()
+build_withitem = WithItemBuilder()
+
+
+def find_before(ctx, pos, substr, offsets=(0, 0)):
+ new_pos = ctx.source[:pos].rindex(substr)
+ return ctx.make_raw_range(new_pos + offsets[0], new_pos + len(substr) + offsets[1])
diff --git a/venv/lib/python3.10/site-packages/torch/jit/generate_bytecode.py b/venv/lib/python3.10/site-packages/torch/jit/generate_bytecode.py
new file mode 100644
index 0000000000000000000000000000000000000000..8e56c7665d1ca68b5bb4a94cf8abc6a1c9bd1666
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/torch/jit/generate_bytecode.py
@@ -0,0 +1,33 @@
+from typing import List
+
+from torch._C import _compile_graph_to_code_table, _generate_upgraders_graph
+
+
+def format_bytecode(table):
+ # given a nested tuple, convert it to nested list
+ def listify(content):
+ if not isinstance(content, tuple):
+ return content
+ return [listify(i) for i in content]
+
+ formatted_table = {}
+ for entry in table:
+ identifier = entry[0]
+ content = entry[1]
+ content = listify(content)
+ formatted_table[identifier] = content
+ return formatted_table
+
+
+def generate_upgraders_bytecode() -> List:
+ yaml_content = []
+ upgraders_graph_map = _generate_upgraders_graph()
+ for upgrader_name, upgrader_graph in upgraders_graph_map.items():
+ bytecode_table = _compile_graph_to_code_table(upgrader_name, upgrader_graph)
+ entry = {upgrader_name: format_bytecode(bytecode_table)}
+ yaml_content.append(entry)
+ return yaml_content
+
+
+if __name__ == "__main__":
+ raise RuntimeError("This file is not meant to be run directly")
diff --git a/venv/lib/python3.10/site-packages/torch/jit/quantized.py b/venv/lib/python3.10/site-packages/torch/jit/quantized.py
new file mode 100644
index 0000000000000000000000000000000000000000..c7c679c794569791fc2081b088d07594cd89d84f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/torch/jit/quantized.py
@@ -0,0 +1,99 @@
+import torch
+
+
+class QuantizedLinear(torch.jit.ScriptModule):
+ def __init__(self, other):
+ raise RuntimeError(
+ "torch.jit.QuantizedLinear is no longer supported. Please use "
+ "torch.ao.nn.quantized.dynamic.Linear instead."
+ )
+
+
+# FP16 weights
+class QuantizedLinearFP16(torch.jit.ScriptModule):
+ def __init__(self, other):
+ super().__init__()
+ raise RuntimeError(
+ "torch.jit.QuantizedLinearFP16 is no longer supported. "
+ "Please use the torch.ao.nn.quantized.dynamic.Linear instead."
+ )
+
+
+# Quantized RNN cell implementations
+class QuantizedRNNCellBase(torch.jit.ScriptModule):
+ def __init__(self, other):
+ raise RuntimeError(
+ "torch.jit.QuantizedRNNCellBase is no longer supported. "
+ "Please use the torch.ao.nn.quantized.dynamic.RNNCell instead."
+ )
+
+
+class QuantizedRNNCell(QuantizedRNNCellBase):
+ def __init__(self, other):
+ raise RuntimeError(
+ "torch.jit.QuantizedRNNCell is no longer supported. "
+ "Please use the torch.ao.nn.quantized.dynamic.RNNCell instead."
+ )
+
+
+class QuantizedLSTMCell(QuantizedRNNCellBase):
+ def __init__(self, other):
+ super().__init__(other)
+ raise RuntimeError(
+ "torch.jit.QuantizedLSTMCell is no longer supported. "
+ "Please use the torch.ao.nn.quantized.dynamic.LSTMCell instead."
+ )
+
+
+class QuantizedGRUCell(QuantizedRNNCellBase):
+ def __init__(self, other):
+ super().__init__(other)
+ raise RuntimeError(
+ "torch.jit.QuantizedGRUCell is no longer supported. "
+ "Please use the torch.ao.nn.quantized.dynamic.GRUCell instead."
+ )
+
+
+class QuantizedRNNBase(torch.jit.ScriptModule):
+ def __init__(self, other, dtype=torch.int8):
+ raise RuntimeError(
+ "torch.jit.QuantizedRNNBase is no longer supported. "
+ "Please use the torch.ao.nn.quantized.dynamic instead."
+ )
+
+
+class QuantizedLSTM(QuantizedRNNBase):
+ def __init__(self, other, dtype):
+ raise RuntimeError(
+ "torch.jit.QuantizedLSTM is no longer supported. "
+ "Please use the torch.ao.nn.quantized.dynamic.LSTM instead."
+ )
+
+
+class QuantizedGRU(QuantizedRNNBase):
+ def __init__(self, *args, **kwargs):
+ raise RuntimeError(
+ "torch.jit.QuantizedGRU is no longer supported. "
+ "Please use the torch.ao.nn.quantized.dynamic.GRU instead."
+ )
+
+
+def quantize_rnn_cell_modules(module):
+ raise RuntimeError(
+ "quantize_rnn_cell_modules function is no longer supported. "
+ "Please use torch.ao.quantization.quantize_dynamic API instead."
+ )
+
+
+def quantize_linear_modules(module, dtype=torch.int8):
+ raise RuntimeError(
+ "quantize_linear_modules function is no longer supported. "
+ "Please use torch.ao.quantization.quantize_dynamic API instead."
+ )
+
+
+def quantize_rnn_modules(module, dtype=torch.int8):
+ raise RuntimeError(
+ "quantize_rnn_modules function is no longer supported. "
+ "Please use torch.ao.quantization.quantize_dynamic API instead."
+ )
diff --git a/venv/lib/python3.10/site-packages/torch/jit/supported_ops.py b/venv/lib/python3.10/site-packages/torch/jit/supported_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..7a670c722744e770767f6c91ac0082b6994e9782
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/torch/jit/supported_ops.py
@@ -0,0 +1,342 @@
+import inspect
+import textwrap
+
+import torch.jit
+from torch.jit._builtins import _find_builtin
+
+# this file is for generating documentation using sphinx autodoc
+# > help(torch.jit.supported_ops) will also give a nice listed of the
+# supported ops programmatically
+
+
+def _hidden(name):
+ return name.startswith("_") and not name.startswith("__")
+
+
+def _emit_type(type):
+ return str(type)
+
+
+def _emit_arg(indent, i, arg):
+ v = f"{arg.name} : {_emit_type(arg.type)}"
+ default = arg.default_value
+ if default is not None:
+ v = f"{v}={str(default)}"
+ if i > 0:
+ v = f"\n{' ' * indent}{v}"
+ return v
+
+
+def _emit_args(indent, arguments):
+ return ",".join(_emit_arg(indent, i, arg) for i, arg in enumerate(arguments))
+
+
+def _emit_ret(ret):
+ return _emit_type(ret.type)
+
+
+def _emit_rets(returns):
+ if len(returns) == 1:
+ return _emit_ret(returns[0])
+ return f"Tuple[{', '.join(_emit_ret(r) for r in returns)}]"
+
+
+def _emit_schema(mod, name, schema, arg_start=0, padding=4):
+ if mod is None:
+ qualified_name = name
+ else:
+ qualified_name = f"{mod}.{name}"
+ schema_str = "{}({}) -> {}".format(
+ qualified_name,
+ _emit_args(len(qualified_name) + 1 + padding, schema.arguments[arg_start:]),
+ _emit_rets(schema.returns),
+ )
+ return schema_str
+
+
+def _get_tensor_ops():
+ def is_tensor_method(schema):
+ if len(schema.arguments) == 0:
+ return False
+ self = schema.arguments[0]
+ if self.name != "self":
+ return False
+ if not self.type.isSubtypeOf(torch._C.TensorType.get()):
+ return False
+ return True
+
+ methods = []
+ # discover methods
+ for elem in dir(torch.Tensor):
+ if not _hidden(elem):
+ schemas = torch._C._jit_get_schemas_for_operator("aten::" + elem)
+ for schema in schemas:
+ if is_tensor_method(schema):
+ methods.append(_emit_schema("Tensor", elem, schema, arg_start=1))
+
+ return "Supported Tensor Methods", methods
+
+
+def _get_nn_functional_ops():
+ functions = []
+
+ # Iterate over torch.nn.functional
+ mod = torch.nn.functional
+ name = mod.__name__
+ for elem in dir(torch.nn.functional):
+ attr = getattr(mod, elem)
+ if not inspect.isfunction(attr) or _hidden(elem[0]):
+ # Ignore non-functions and internal methods
+ continue
+
+ attr_module = inspect.getmodule(attr)
+ if not attr_module:
+ raise RuntimeError(f"Module for {attr} not found")
+
+ if "torch.nn.functional" not in attr_module.__name__:
+ # Ignore functions from outside torch.nn.functional
+ continue
+
+ try:
+ # compile fn, get schema
+ scripted = torch.jit.script(attr)
+ scripted_schema = scripted.schema
+ functions.append(_emit_schema(name, elem, scripted_schema))
+ except: # noqa: B001,E722
+ # Skip interpolate / boolean dispatched things
+ pass
+
+ # Iterate over modules that we know contain a lot of builtins
+ for mod in torch.jit._builtins._modules_containing_builtins:
+ name = mod.__name__
+ for elem in dir(mod):
+ builtin = _find_builtin(getattr(mod, elem))
+ if builtin is not None:
+ schemas = torch._C._jit_get_schemas_for_operator(builtin)
+ for schema in schemas:
+ # remove _tan but not __and__
+ if not _hidden(elem):
+ functions.append(_emit_schema(name, elem, schema))
+ return "Supported PyTorch Functions", functions
+
+
+def _get_builtins_helper():
+ builtins = []
+ for fn, _builtin_name in torch.jit._builtins._builtin_ops:
+ mod = inspect.getmodule(fn)
+
+ if not hasattr(fn, "__name__"):
+ # typing classes
+ continue
+ if not mod:
+ continue
+ if _hidden(fn.__name__) or _hidden(fn.__qualname__) or _hidden(mod.__name__):
+ # skip internal-only methods
+ continue
+
+ if "torch._C" in mod.__name__:
+ continue
+
+ builtins.append((fn, _builtin_name))
+
+ return builtins
+
+
+def _is_math_fn(fn):
+ mod = inspect.getmodule(fn)
+ if not mod:
+ raise RuntimeError(f"Module for {fn} not found")
+
+ return mod.__name__ == "math"
+
+
+def _get_torchscript_builtins():
+ functions = []
+ builtins = filter(lambda fn: not _is_math_fn(fn[0]), _get_builtins_helper())
+ builtins_list = list(builtins)
+ # Iterate over the specially added builtins
+ for fn, _builtin_name in builtins_list:
+ mod = inspect.getmodule(fn)
+ if not mod:
+ raise RuntimeError(f"Module for {fn} not found")
+ builtin = _find_builtin(fn)
+ if builtin is not None:
+ schemas = torch._C._jit_get_schemas_for_operator(builtin)
+ for schema in schemas:
+ functions.append(_emit_schema(mod.__name__, fn.__name__, schema))
+ pass
+
+ return "TorchScript Builtin Functions", functions
+
+
+def _get_math_builtins():
+ functions = []
+ builtins = filter(lambda fn: _is_math_fn(fn[0]), _get_builtins_helper())
+ builtins_list = list(builtins)
+ # Iterate over the specially added builtins
+ for fn, _builtin_name in builtins_list:
+ mod = inspect.getmodule(fn)
+ if not mod:
+ raise RuntimeError(f"Module for {fn} not found")
+ builtin = _find_builtin(fn)
+ if builtin is not None:
+ schemas = torch._C._jit_get_schemas_for_operator(builtin)
+ for schema in schemas:
+ schema_str = _emit_schema(mod.__name__, fn.__name__, schema)
+ if "Tensor" in schema_str:
+ # Skip Tensor ops that have the same name as math functions
+ # (they will show up in the tensor methods section)
+ continue
+ functions.append(schema)
+ pass
+
+ return "``math`` Module", functions
+
+
+def _get_global_builtins():
+ # Taken from the 'globals' map in torch/csrc/jit/frontend/ir_emitter.cpp
+ supported_builtins = [
+ "print",
+ "tuple",
+ "float",
+ "complex",
+ "int",
+ "bool",
+ "str",
+ "getattr",
+ "hasattr",
+ "isinstance",
+ "len",
+ "hex",
+ "oct",
+ "round",
+ "hash",
+ "min",
+ "max",
+ "abs",
+ "all",
+ "divmod",
+ "list",
+ "ord",
+ "chr",
+ "bin",
+ "range",
+ "zip",
+ "enumerate",
+ "sorted",
+ ]
+
+ op_renames = {
+ "bool": "aten::Bool",
+ "int": "aten::Int",
+ "float": "aten::Float",
+ "complex": "aten::Complex",
+ "abs": "prim::abs",
+ "max": "prim::max",
+ "min": "prim::min",
+ "range": "fake::does_not_exist",
+ }
+
+ schemaless_op_explanations = {
+ "print": "Print any value",
+ "tuple": "Lists cannot be converted to tuples with this method since their size is not statically known",
+ "getattr": "Attribute name must be a literal string",
+ "hasattr": "Attribute name must be a literal string",
+ "isinstance": "Result is static",
+ "zip": "Arguments must be iterable. See :ref:`Iterables ` for details.",
+ "enumerate": "Arguments must be iterable. See :ref:`Iterables ` for details.",
+ "range": "Can only be used as an iterator in a for loop",
+ }
+
+ magic_methods = [
+ ("complex", "__complex__"),
+ ("float", "__float__"),
+ ("int", "__int__"),
+ ("bool", "__bool__"),
+ ("str", "__str__"),
+ ("len", "__len__"),
+ ("hex", "__hex__"),
+ ("oct", "__oct__"),
+ ]
+
+ magic_methods_rows = []
+ for fn, magic_method in magic_methods:
+ magic_methods_rows.append(f'"{fn}", "``{magic_method}``"')
+
+ schematized_ops = []
+ schemaless_ops = []
+
+ for fn in supported_builtins:
+ op_name = f"aten::{fn}"
+ if fn in op_renames:
+ op_name = op_renames[fn]
+ schemas = torch._C._jit_get_schemas_for_operator(op_name)
+ for s in schemas:
+ schematized_ops.append(_emit_schema(None, fn, s, padding=0))
+ if len(schemas) > 0:
+ schematized_ops.append("")
+ else:
+ table_row = f'":any:`{fn}`", "{schemaless_op_explanations[fn]}"'
+ schemaless_ops.append(table_row)
+
+ schematized_ops_str = "\n".join(schematized_ops)
+ schemaless_ops_str = "\n".join(schemaless_ops)
+ magic_methods_rows_str = "\n".join(magic_methods_rows)
+ schematized_ops_str = textwrap.indent(schematized_ops_str, "\t")
+ schemaless_ops_str = textwrap.indent(schemaless_ops_str, "\t")
+ magic_methods_rows_str = textwrap.indent(magic_methods_rows_str, "\t")
+ section = f"""
+The functions in the following table are supported but do not have a static schema
+
+.. csv-table::
+ :header: "Function", "Note"
+
+{schemaless_ops_str}
+
+The following functions will use the corresponding magic method on :any:`TorchScript classes`
+
+.. csv-table::
+ :header: "Function", "Magic Method"
+
+{magic_methods_rows_str}
+
+These built-in functions use the schema
+
+.. rst-class:: codeblock-height-limiter
+
+::
+
+{schematized_ops_str}
+ """
+
+ return "Python Built-in Functions", section
+
+
+def _list_supported_ops():
+ def emit_block(decls):
+ return "\n.. rst-class:: codeblock-height-limiter\n\n::\n\n{}\n".format(
+ "".join(f" {d}\n\n" for d in decls)
+ )
+
+ body = ""
+ op_gathering_fns = (
+ _get_tensor_ops,
+ _get_nn_functional_ops,
+ _get_torchscript_builtins,
+ _get_global_builtins,
+ _get_math_builtins,
+ )
+ for fn in op_gathering_fns:
+ header, items = fn()
+ link_target = header.replace("`", "").replace("-", "").lower().replace(" ", "-")
+ if isinstance(items, str):
+ section = f"{header}\n{'~' * len(header)}\n{items}\n"
+ else:
+ section = f"{header}\n{'~' * len(header)}\n{emit_block(items)}"
+ section = f".. _{link_target}:" + "\n\n" + section
+ body += section
+
+ return body
+
+
+__doc__ = _list_supported_ops()
diff --git a/venv/lib/python3.10/site-packages/torch/jit/unsupported_tensor_ops.py b/venv/lib/python3.10/site-packages/torch/jit/unsupported_tensor_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..4e553757eab48e1c1a65253fe4979dcb8afadf86
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/torch/jit/unsupported_tensor_ops.py
@@ -0,0 +1,78 @@
+from textwrap import dedent
+
+from typing import Any, Dict
+
+import torch.jit
+
+
+def execWrapper(code, glob, loc):
+ exec(code, glob, loc)
+
+
+def _gen_unsupported_methods_properties():
+ tensor_attrs = set(filter(lambda x: x[0] != "_", dir(torch.Tensor)))
+ tensor = torch.tensor([2])
+ funcs_template = dedent(
+ """
+ def func(x):
+ return x.{op}()
+ """
+ )
+
+ deprecated_apis = {
+ "volatile",
+ "resize",
+ "reinforce",
+ "new",
+ "name",
+ "map2_",
+ "has_names",
+ "grad_fn",
+ "resize_as",
+ }
+ tensor_attrs = tensor_attrs - deprecated_apis
+
+ properties = []
+ methods = []
+ sorted_tensor_attrs = sorted(tensor_attrs, key=lambda x: x.lower())
+ for attr in sorted_tensor_attrs:
+ funcs_str = funcs_template.format(op=attr)
+ scope: Dict[str, Any] = {}
+ execWrapper(funcs_str, globals(), scope)
+ try:
+ cu = torch.jit.CompilationUnit(funcs_str)
+ except Exception as e:
+ if "nonexistent attribute" not in repr(e):
+ continue
+ attr_repr = repr(getattr(tensor, attr))
+ if "bound method" in attr_repr or "built-in method" in attr_repr:
+ methods.append(attr)
+ else:
+ properties.append(attr)
+
+ mapped_methods = ("\t* :meth:`~torch.Tensor." + x + r"`" for x in methods)
+ mapped_properties = ("\t* :attr:`~torch.Tensor." + x + r"`" for x in properties)
+ return "\n".join(mapped_methods), "\n".join(mapped_properties)
+
+
+def _list_unsupported_tensor_ops():
+ header = """\n\n
+Unsupported Tensor Methods
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ """
+ methods, properties = _gen_unsupported_methods_properties()
+ return (
+ header
+ + "\n"
+ + methods
+ + """
+
+Unsupported Tensor Properties
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ """
+ + "\n"
+ + properties
+ )
+
+
+__doc__ = _list_unsupported_tensor_ops()
diff --git a/venv/lib/python3.10/site-packages/tzdata-2024.1.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/tzdata-2024.1.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tzdata-2024.1.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/venv/lib/python3.10/site-packages/tzdata-2024.1.dist-info/LICENSE b/venv/lib/python3.10/site-packages/tzdata-2024.1.dist-info/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..c2f84aeb06f7a520b7cf17bdd9c9c3854dc4c469
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tzdata-2024.1.dist-info/LICENSE
@@ -0,0 +1,15 @@
+Apache Software License 2.0
+
+Copyright (c) 2020, Paul Ganssle (Google)
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/venv/lib/python3.10/site-packages/tzdata-2024.1.dist-info/LICENSE_APACHE b/venv/lib/python3.10/site-packages/tzdata-2024.1.dist-info/LICENSE_APACHE
new file mode 100644
index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tzdata-2024.1.dist-info/LICENSE_APACHE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/venv/lib/python3.10/site-packages/tzdata-2024.1.dist-info/METADATA b/venv/lib/python3.10/site-packages/tzdata-2024.1.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..5ca4be3198422cb791a135e09d1140b80cc511a5
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tzdata-2024.1.dist-info/METADATA
@@ -0,0 +1,33 @@
+Metadata-Version: 2.1
+Name: tzdata
+Version: 2024.1
+Summary: Provider of IANA time zone data
+Home-page: https://github.com/python/tzdata
+Author: Python Software Foundation
+Author-email: datetime-sig@python.org
+License: Apache-2.0
+Project-URL: Bug Reports, https://github.com/python/tzdata/issues
+Project-URL: Source, https://github.com/python/tzdata
+Project-URL: Documentation, https://tzdata.readthedocs.io
+Classifier: Development Status :: 4 - Beta
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 3
+Requires-Python: >=2
+Description-Content-Type: text/x-rst
+License-File: LICENSE
+License-File: licenses/LICENSE_APACHE
+
+tzdata: Python package providing IANA time zone data
+====================================================
+
+This is a Python package containing ``zic``-compiled binaries for the IANA time
+zone database. It is intended to be a fallback for systems that do not have
+system time zone data installed (or don't have it installed in a standard
+location), as a part of `PEP 615 `_
+
+This repository generates a ``pip``-installable package, published on PyPI as
+`tzdata `_.
+
+For more information, see `the documentation `_.
diff --git a/venv/lib/python3.10/site-packages/tzdata-2024.1.dist-info/RECORD b/venv/lib/python3.10/site-packages/tzdata-2024.1.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..b48ef410177c690efef9a31d64c89f09360cba8f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tzdata-2024.1.dist-info/RECORD
@@ -0,0 +1,655 @@
+tzdata-2024.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+tzdata-2024.1.dist-info/LICENSE,sha256=M-jlAC01EtP8wigrmV5rrZ0zR4G5xawxhD9ASQDh87Q,592
+tzdata-2024.1.dist-info/LICENSE_APACHE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
+tzdata-2024.1.dist-info/METADATA,sha256=opWqMTU2QGRjhy337uxoIBXNr84QcEx8I9-iisPqxmA,1393
+tzdata-2024.1.dist-info/RECORD,,
+tzdata-2024.1.dist-info/WHEEL,sha256=-G_t0oGuE7UD0DrSpVZnq1hHMBV9DD2XkS5v7XpmTnk,110
+tzdata-2024.1.dist-info/top_level.txt,sha256=MO6QqC0xRrN67Gh9xU_nMmadwBVlYzPNkq_h4gYuzaQ,7
+tzdata/__init__.py,sha256=iofGPw33aJlVNgOXQP4kzxiXOEj8im69E8cgZZS874Q,252
+tzdata/__pycache__/__init__.cpython-310.pyc,,
+tzdata/zoneinfo/Africa/Abidjan,sha256=8-f8qg6YQP9BadNWfY-1kmZEhI9JY9es-SMghDxdSG4,130
+tzdata/zoneinfo/Africa/Accra,sha256=8-f8qg6YQP9BadNWfY-1kmZEhI9JY9es-SMghDxdSG4,130
+tzdata/zoneinfo/Africa/Addis_Ababa,sha256=B4OFT1LDOtprbSpdhnZi8K6OFSONL857mtpPTTGetGY,191
+tzdata/zoneinfo/Africa/Algiers,sha256=L2nS4gLNFvuo89p3YtB-lSDYY2284SqkGH9pQQI8uwc,470
+tzdata/zoneinfo/Africa/Asmara,sha256=B4OFT1LDOtprbSpdhnZi8K6OFSONL857mtpPTTGetGY,191
+tzdata/zoneinfo/Africa/Asmera,sha256=B4OFT1LDOtprbSpdhnZi8K6OFSONL857mtpPTTGetGY,191
+tzdata/zoneinfo/Africa/Bamako,sha256=8-f8qg6YQP9BadNWfY-1kmZEhI9JY9es-SMghDxdSG4,130
+tzdata/zoneinfo/Africa/Bangui,sha256=5e8SiFccxWxSdsqWbhyKZ1xnR3JtdY7K_n7_zm7Ke-Q,180
+tzdata/zoneinfo/Africa/Banjul,sha256=8-f8qg6YQP9BadNWfY-1kmZEhI9JY9es-SMghDxdSG4,130
+tzdata/zoneinfo/Africa/Bissau,sha256=wa3uva129dJHRCi7tYt04kFOn1-osMS2afMjleO9mDw,149
+tzdata/zoneinfo/Africa/Blantyre,sha256=_UqXNoIwqJZ2yYd3lRCpkg_o2RH6BlSBU20QSM0PUp4,131
+tzdata/zoneinfo/Africa/Brazzaville,sha256=5e8SiFccxWxSdsqWbhyKZ1xnR3JtdY7K_n7_zm7Ke-Q,180
+tzdata/zoneinfo/Africa/Bujumbura,sha256=_UqXNoIwqJZ2yYd3lRCpkg_o2RH6BlSBU20QSM0PUp4,131
+tzdata/zoneinfo/Africa/Cairo,sha256=icuaNiEvuC6TPc2fqhDv36lpop7IDDIGO7tFGMAz0b4,1309
+tzdata/zoneinfo/Africa/Casablanca,sha256=MMps8T4AwqbEN6PIN_pkNiPMBEBqtRZRZceLN-9rxMM,1919
+tzdata/zoneinfo/Africa/Ceuta,sha256=oEIgK53afz1SYxYB_D0jR98Ss3g581yb8TnLppPaYcY,562
+tzdata/zoneinfo/Africa/Conakry,sha256=8-f8qg6YQP9BadNWfY-1kmZEhI9JY9es-SMghDxdSG4,130
+tzdata/zoneinfo/Africa/Dakar,sha256=8-f8qg6YQP9BadNWfY-1kmZEhI9JY9es-SMghDxdSG4,130
+tzdata/zoneinfo/Africa/Dar_es_Salaam,sha256=B4OFT1LDOtprbSpdhnZi8K6OFSONL857mtpPTTGetGY,191
+tzdata/zoneinfo/Africa/Djibouti,sha256=B4OFT1LDOtprbSpdhnZi8K6OFSONL857mtpPTTGetGY,191
+tzdata/zoneinfo/Africa/Douala,sha256=5e8SiFccxWxSdsqWbhyKZ1xnR3JtdY7K_n7_zm7Ke-Q,180
+tzdata/zoneinfo/Africa/El_Aaiun,sha256=6hfLbLfrD1Qy9ZZqLXr1Xw7fzeEs_FqeHN2zZJZUVJI,1830
+tzdata/zoneinfo/Africa/Freetown,sha256=8-f8qg6YQP9BadNWfY-1kmZEhI9JY9es-SMghDxdSG4,130
+tzdata/zoneinfo/Africa/Gaborone,sha256=_UqXNoIwqJZ2yYd3lRCpkg_o2RH6BlSBU20QSM0PUp4,131
+tzdata/zoneinfo/Africa/Harare,sha256=_UqXNoIwqJZ2yYd3lRCpkg_o2RH6BlSBU20QSM0PUp4,131
+tzdata/zoneinfo/Africa/Johannesburg,sha256=0Zrr4kNcToS_euZVM9I6nUQPmBYuW01pxz94PgIpnsg,190
+tzdata/zoneinfo/Africa/Juba,sha256=VTpoMAP-jJ6cKsDeNVr7l3LKGoKDUxGU2b1gqvDPz34,458
+tzdata/zoneinfo/Africa/Kampala,sha256=B4OFT1LDOtprbSpdhnZi8K6OFSONL857mtpPTTGetGY,191
+tzdata/zoneinfo/Africa/Khartoum,sha256=NRwOwIg4SR6XuD11k3hxBz77uoBpzejXq7vxtq2Xys8,458
+tzdata/zoneinfo/Africa/Kigali,sha256=_UqXNoIwqJZ2yYd3lRCpkg_o2RH6BlSBU20QSM0PUp4,131
+tzdata/zoneinfo/Africa/Kinshasa,sha256=5e8SiFccxWxSdsqWbhyKZ1xnR3JtdY7K_n7_zm7Ke-Q,180
+tzdata/zoneinfo/Africa/Lagos,sha256=5e8SiFccxWxSdsqWbhyKZ1xnR3JtdY7K_n7_zm7Ke-Q,180
+tzdata/zoneinfo/Africa/Libreville,sha256=5e8SiFccxWxSdsqWbhyKZ1xnR3JtdY7K_n7_zm7Ke-Q,180
+tzdata/zoneinfo/Africa/Lome,sha256=8-f8qg6YQP9BadNWfY-1kmZEhI9JY9es-SMghDxdSG4,130
+tzdata/zoneinfo/Africa/Luanda,sha256=5e8SiFccxWxSdsqWbhyKZ1xnR3JtdY7K_n7_zm7Ke-Q,180
+tzdata/zoneinfo/Africa/Lubumbashi,sha256=_UqXNoIwqJZ2yYd3lRCpkg_o2RH6BlSBU20QSM0PUp4,131
+tzdata/zoneinfo/Africa/Lusaka,sha256=_UqXNoIwqJZ2yYd3lRCpkg_o2RH6BlSBU20QSM0PUp4,131
+tzdata/zoneinfo/Africa/Malabo,sha256=5e8SiFccxWxSdsqWbhyKZ1xnR3JtdY7K_n7_zm7Ke-Q,180
+tzdata/zoneinfo/Africa/Maputo,sha256=_UqXNoIwqJZ2yYd3lRCpkg_o2RH6BlSBU20QSM0PUp4,131
+tzdata/zoneinfo/Africa/Maseru,sha256=0Zrr4kNcToS_euZVM9I6nUQPmBYuW01pxz94PgIpnsg,190
+tzdata/zoneinfo/Africa/Mbabane,sha256=0Zrr4kNcToS_euZVM9I6nUQPmBYuW01pxz94PgIpnsg,190
+tzdata/zoneinfo/Africa/Mogadishu,sha256=B4OFT1LDOtprbSpdhnZi8K6OFSONL857mtpPTTGetGY,191
+tzdata/zoneinfo/Africa/Monrovia,sha256=WM-JVfr502Vgy18Fe6iAJ2yMgOWbwwumIQh_yp53eKM,164
+tzdata/zoneinfo/Africa/Nairobi,sha256=B4OFT1LDOtprbSpdhnZi8K6OFSONL857mtpPTTGetGY,191
+tzdata/zoneinfo/Africa/Ndjamena,sha256=Tlj4ZUUNJxEhvAoo7TJKqWv1J7tEYaf1FEMez-K9xEg,160
+tzdata/zoneinfo/Africa/Niamey,sha256=5e8SiFccxWxSdsqWbhyKZ1xnR3JtdY7K_n7_zm7Ke-Q,180
+tzdata/zoneinfo/Africa/Nouakchott,sha256=8-f8qg6YQP9BadNWfY-1kmZEhI9JY9es-SMghDxdSG4,130
+tzdata/zoneinfo/Africa/Ouagadougou,sha256=8-f8qg6YQP9BadNWfY-1kmZEhI9JY9es-SMghDxdSG4,130
+tzdata/zoneinfo/Africa/Porto-Novo,sha256=5e8SiFccxWxSdsqWbhyKZ1xnR3JtdY7K_n7_zm7Ke-Q,180
+tzdata/zoneinfo/Africa/Sao_Tome,sha256=Pfiutakw5B5xr1OSg1uFvT0GwC6jVOqqxnx69GEJu50,173
+tzdata/zoneinfo/Africa/Timbuktu,sha256=8-f8qg6YQP9BadNWfY-1kmZEhI9JY9es-SMghDxdSG4,130
+tzdata/zoneinfo/Africa/Tripoli,sha256=zzMBLZZh4VQ4_ARe5k4L_rsuqKP7edKvVt8F6kvj5FM,431
+tzdata/zoneinfo/Africa/Tunis,sha256=uoAEER48RJqNeGoYBuk5IeYqjc8sHvWLvKssuVCd18g,449
+tzdata/zoneinfo/Africa/Windhoek,sha256=g1jLRko_2peGsUTg0_wZycOC4gxTAHwfV2SO9I3KdCM,638
+tzdata/zoneinfo/Africa/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+tzdata/zoneinfo/Africa/__pycache__/__init__.cpython-310.pyc,,
+tzdata/zoneinfo/America/Adak,sha256=q_sZgOINX4TsX9iBx1gNd6XGwBnzCjg6qpdAQhK0ieA,969
+tzdata/zoneinfo/America/Anchorage,sha256=d8oMIpYvBpmLzl5I2By4ZaFEZsg_9dxgfqpIM0QFi_Y,977
+tzdata/zoneinfo/America/Anguilla,sha256=q76GKN1Uh8iJ24Fs46UHe7tH9rr6_rlBHZLW7y9wzo0,177
+tzdata/zoneinfo/America/Antigua,sha256=q76GKN1Uh8iJ24Fs46UHe7tH9rr6_rlBHZLW7y9wzo0,177
+tzdata/zoneinfo/America/Araguaina,sha256=TawYX4lVAxq0BxUGhTDx4C8vtBRnLuWi8qLV_oXDiUo,592
+tzdata/zoneinfo/America/Argentina/Buenos_Aires,sha256=IEVOpSfI6oiJJmFNIb9Vb0bOOMIgxO5bghFw7vkHFGk,708
+tzdata/zoneinfo/America/Argentina/Catamarca,sha256=UC0fxx7ZPmjPw3D0BK-5vap-c1cBzbgR293MdmEfOx0,708
+tzdata/zoneinfo/America/Argentina/ComodRivadavia,sha256=UC0fxx7ZPmjPw3D0BK-5vap-c1cBzbgR293MdmEfOx0,708
+tzdata/zoneinfo/America/Argentina/Cordoba,sha256=9Ij3WjT9mWMKQ43LeSUIqQuDb9zS3FSlHYPVNQJTFf0,708
+tzdata/zoneinfo/America/Argentina/Jujuy,sha256=7YpjOcmVaKKpiq31rQe8TTDNExdH9jjZIhdcZv-ShUg,690
+tzdata/zoneinfo/America/Argentina/La_Rioja,sha256=mUkRD5jaWJUy2f8vNFqOlMgKPptULOBn-vf_jMgF6x8,717
+tzdata/zoneinfo/America/Argentina/Mendoza,sha256=dL4q0zgY2FKPbG8cC-Wknnpp8tF2Y7SWgWSC_G_WznI,708
+tzdata/zoneinfo/America/Argentina/Rio_Gallegos,sha256=bCpWMlEI8KWe4c3n6fn8u6WCPnxjYtVy57ERtLTZaEs,708
+tzdata/zoneinfo/America/Argentina/Salta,sha256=H_ybxVycfOe7LlUA3GngoS0jENHkQURIRhjfJQF2kfU,690
+tzdata/zoneinfo/America/Argentina/San_Juan,sha256=Mj5vIUzQl5DtsPe3iMzS7rR-88U9HKW2csQqUda4JNM,717
+tzdata/zoneinfo/America/Argentina/San_Luis,sha256=rka8BokogyvMRFH6jr8D6s1tFIpsUeqHJ_feLK5O6ds,717
+tzdata/zoneinfo/America/Argentina/Tucuman,sha256=yv3aC-hALLio2yqneLIIylZhXKDlbPJGAd_abgsj9gg,726
+tzdata/zoneinfo/America/Argentina/Ushuaia,sha256=mcmZgB1pEHX6i7nlyRzjLnG8bqAtAK1TwMdRD2pZqBE,708
+tzdata/zoneinfo/America/Argentina/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+tzdata/zoneinfo/America/Argentina/__pycache__/__init__.cpython-310.pyc,,
+tzdata/zoneinfo/America/Aruba,sha256=q76GKN1Uh8iJ24Fs46UHe7tH9rr6_rlBHZLW7y9wzo0,177
+tzdata/zoneinfo/America/Asuncion,sha256=PuuUl8VILSBeZWDyLkM67bWl47xPMcJ0fY-rAhvSFzc,884
+tzdata/zoneinfo/America/Atikokan,sha256=p41zBnujy9lPiiPf3WqotoyzOxhIS8F7TiDqGuwvCoE,149
+tzdata/zoneinfo/America/Atka,sha256=q_sZgOINX4TsX9iBx1gNd6XGwBnzCjg6qpdAQhK0ieA,969
+tzdata/zoneinfo/America/Bahia,sha256=_-ZFw-HzXc7byacHW_NJHtJ03ADFdqt1kaYgyWYobYw,682
+tzdata/zoneinfo/America/Bahia_Banderas,sha256=F2Tz2IIWs9nqdSb5sdKLrO6Cu0xiGLbQZ3TamKR4v5A,728
+tzdata/zoneinfo/America/Barbados,sha256=gdiJf9ZKOMs9QB4ex0-crvdmhNfHpNzXTV2xTaNDCAg,278
+tzdata/zoneinfo/America/Belem,sha256=w0jv-gdBbEBZQBF2z2liKpRM9CEOWA36O1qU1nJKeCs,394
+tzdata/zoneinfo/America/Belize,sha256=uYBPJqnCGnOOeKnoz1IG9POWTvXD5kUirpFuB0PHjVo,1045
+tzdata/zoneinfo/America/Blanc-Sablon,sha256=q76GKN1Uh8iJ24Fs46UHe7tH9rr6_rlBHZLW7y9wzo0,177
+tzdata/zoneinfo/America/Boa_Vista,sha256=hYTFFNNZJdl_nSYIdfI8SQhtmfiakjCDI_15TlB-xEw,430
+tzdata/zoneinfo/America/Bogota,sha256=BqH6uClrrlT-VsBmke2Mh-IfA1R1l1h031CRUSLS1no,179
+tzdata/zoneinfo/America/Boise,sha256=Jt3omyPSPRoKE-KXVd-wxVON-CDE5oGaJA7Ar90Q2OM,999
+tzdata/zoneinfo/America/Buenos_Aires,sha256=IEVOpSfI6oiJJmFNIb9Vb0bOOMIgxO5bghFw7vkHFGk,708
+tzdata/zoneinfo/America/Cambridge_Bay,sha256=NFwNVfgxb2YMLzc-42RA-SKtNcODpukEfYf_QWWYTsI,883
+tzdata/zoneinfo/America/Campo_Grande,sha256=mngKYjaH_ENVmJ-mtURVjjFo5kHgLfYNPHZaCVSxQFE,952
+tzdata/zoneinfo/America/Cancun,sha256=XOYTJdVeHFfKeSGxHcZ_stJ9_Vkqn0q0LmS1mhnGI8o,529
+tzdata/zoneinfo/America/Caracas,sha256=UHmUwc0mFPoidR4UDCWb4T4w_mpCBsSb4BkW3SOKIVY,190
+tzdata/zoneinfo/America/Catamarca,sha256=UC0fxx7ZPmjPw3D0BK-5vap-c1cBzbgR293MdmEfOx0,708
+tzdata/zoneinfo/America/Cayenne,sha256=9URU4o1v5759UWuh8xI9vnaANOceOeRW67XoGQuuUa8,151
+tzdata/zoneinfo/America/Cayman,sha256=p41zBnujy9lPiiPf3WqotoyzOxhIS8F7TiDqGuwvCoE,149
+tzdata/zoneinfo/America/Chicago,sha256=wntzn_RqffBZThINcltDkhfhHkTqmlDNxJEwODtUguc,1754
+tzdata/zoneinfo/America/Chihuahua,sha256=hHey29pNZGuKh_bTiluGQSOGAhiQuCG4VMNGlJCgxPs,691
+tzdata/zoneinfo/America/Ciudad_Juarez,sha256=eJkqieD7ixtltRojAKRk4iNRk-bZZZDPQV2hyR1vMmI,718
+tzdata/zoneinfo/America/Coral_Harbour,sha256=p41zBnujy9lPiiPf3WqotoyzOxhIS8F7TiDqGuwvCoE,149
+tzdata/zoneinfo/America/Cordoba,sha256=9Ij3WjT9mWMKQ43LeSUIqQuDb9zS3FSlHYPVNQJTFf0,708
+tzdata/zoneinfo/America/Costa_Rica,sha256=ihoqA_tHmYm0YjTRLZu3q8PqsqqOeb1CELjWhPf_HXE,232
+tzdata/zoneinfo/America/Creston,sha256=rhFFPCHQiYTedfLv7ATckxeKe04jxeUvIJi4vUXMtUc,240
+tzdata/zoneinfo/America/Cuiaba,sha256=OaIle0Cr-BKe0hOik5rwdcoCbQ5LSHkHqBS2cLoCqAU,934
+tzdata/zoneinfo/America/Curacao,sha256=q76GKN1Uh8iJ24Fs46UHe7tH9rr6_rlBHZLW7y9wzo0,177
+tzdata/zoneinfo/America/Danmarkshavn,sha256=cQORuA8pR0vw3ZwYfeGkWaT1tPU66nMQ2xRKT1T1Yb4,447
+tzdata/zoneinfo/America/Dawson,sha256=BlKV0U36jqnlxM5-Pxn8OIiY5kJEcLlt3QZo-GsMzlY,1029
+tzdata/zoneinfo/America/Dawson_Creek,sha256=t4USMuIvq1VVL9gYCabraAYs31kmAqAnwf7GzEiJJNc,683
+tzdata/zoneinfo/America/Denver,sha256=m7cDkg7KS2EZ6BoQVYOk9soiBlHxO0GEeat81WxBPz4,1042
+tzdata/zoneinfo/America/Detroit,sha256=I4F8Mt9nx38AF6D-steYskBa_HHO6jKU1-W0yRFr50A,899
+tzdata/zoneinfo/America/Dominica,sha256=q76GKN1Uh8iJ24Fs46UHe7tH9rr6_rlBHZLW7y9wzo0,177
+tzdata/zoneinfo/America/Edmonton,sha256=Dq2mxcSNWZhMWRqxwwtMcaqwAIGMwkOzz-mW8fJscV8,970
+tzdata/zoneinfo/America/Eirunepe,sha256=6tKYaRpnbBSmXiwXy7_m4WW_rbVfn5LUec0keC3J7Iw,436
+tzdata/zoneinfo/America/El_Salvador,sha256=4wjsCpRH9AFk5abLAbnuv-zouhRKcwb0aenk-nWtmz0,176
+tzdata/zoneinfo/America/Ensenada,sha256=x2_eWDUWxIi5gKTGmM_d5V1HFt1-JN-j8dIpqj5Dn7M,1025
+tzdata/zoneinfo/America/Fort_Nelson,sha256=_j7IJ-hXHtV_7dSMg6pxGQLb6z_IaUMj3aJde_F49QQ,1448
+tzdata/zoneinfo/America/Fort_Wayne,sha256=5nj0KhPvvXvg8mqc5T4EscKKWC6rBWEcsBwWg2Qy8Hs,531
+tzdata/zoneinfo/America/Fortaleza,sha256=ugF4DWO3j_khONebf7CLsT9ldL-JOWey_69S0jl2LIA,484
+tzdata/zoneinfo/America/Glace_Bay,sha256=I1posPHAEfg_Lc_FQdX1B8F8_A0NeJnK72p36PE7pKM,880
+tzdata/zoneinfo/America/Godthab,sha256=LlGZ5Y_ud9JwWRvncHnUHRArQbbnNcmmrz3duMhR3Hc,965
+tzdata/zoneinfo/America/Goose_Bay,sha256=gCJA1Sk2ciUg2WInn8DmPBwRAw0FjQbYPaUJK80mtMI,1580
+tzdata/zoneinfo/America/Grand_Turk,sha256=Gp8hpMt9P3QoEHmsIX2bqGNMkUSvlwZqqNzccR-cbe8,853
+tzdata/zoneinfo/America/Grenada,sha256=q76GKN1Uh8iJ24Fs46UHe7tH9rr6_rlBHZLW7y9wzo0,177
+tzdata/zoneinfo/America/Guadeloupe,sha256=q76GKN1Uh8iJ24Fs46UHe7tH9rr6_rlBHZLW7y9wzo0,177
+tzdata/zoneinfo/America/Guatemala,sha256=BGPGI4lyN6IFF_T0kx1q2lh3U5SEhbyDqLFuW8EFCaU,212
+tzdata/zoneinfo/America/Guayaquil,sha256=8OIaCy-SirKKz4I77l6MQFDgSLHtjN0TvklLVEZ_008,179
+tzdata/zoneinfo/America/Guyana,sha256=PmnEtWtOTamsPJXEo7PcNQCy2Rp-evGyJh4cf0pjAR4,181
+tzdata/zoneinfo/America/Halifax,sha256=kO5ahBM2oTLfWS4KX15FbKXfo5wg-f9vw1_hMOISGig,1672
+tzdata/zoneinfo/America/Havana,sha256=ms5rCuq2yBM49VmTymMtFQN3c5aBN1lkd8jjzKdnNm8,1117
+tzdata/zoneinfo/America/Hermosillo,sha256=W-QiSzPq2J-hWWQ-uzD6McLKzG8XPEawbJpnXlNp3-Q,286
+tzdata/zoneinfo/America/Indiana/Indianapolis,sha256=5nj0KhPvvXvg8mqc5T4EscKKWC6rBWEcsBwWg2Qy8Hs,531
+tzdata/zoneinfo/America/Indiana/Knox,sha256=KJCzXct8CTMItVLYLYeBqM6aT6b53gWCg6aDbsH58oI,1016
+tzdata/zoneinfo/America/Indiana/Marengo,sha256=ygWmq8sYee8NFwlSZyQ_tsKopFQMp9Ne557zGGbyF2Y,567
+tzdata/zoneinfo/America/Indiana/Petersburg,sha256=BIrubzHEp5QoyMaPgYbC1zSa_F3LwpXzKM8xH3rHspI,683
+tzdata/zoneinfo/America/Indiana/Tell_City,sha256=em2YMHDWEFXdZH0BKi5bLRAQ8bYDfop2T0Q8SqDh0B8,522
+tzdata/zoneinfo/America/Indiana/Vevay,sha256=dPk334e7MQwl71-avNyREBYVWuFTQcVKfltlRhrlRpw,369
+tzdata/zoneinfo/America/Indiana/Vincennes,sha256=jiODDXepmLP3gvCkBufdE3rp5cEXftBHnKne8_XOOCg,558
+tzdata/zoneinfo/America/Indiana/Winamac,sha256=hsEunaLrbxvspyV3Qm4UD7x7qOAeBtzcbbzANNMrdiw,603
+tzdata/zoneinfo/America/Indiana/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+tzdata/zoneinfo/America/Indiana/__pycache__/__init__.cpython-310.pyc,,
+tzdata/zoneinfo/America/Indianapolis,sha256=5nj0KhPvvXvg8mqc5T4EscKKWC6rBWEcsBwWg2Qy8Hs,531
+tzdata/zoneinfo/America/Inuvik,sha256=d_ZX-USS70HIT-_PRJKMY6mbQRvbKLvsy9ar7uL2M40,817
+tzdata/zoneinfo/America/Iqaluit,sha256=nONS7zksGHTrbEJj73LYRZW964OncQuj_V6fNjpDoQ0,855
+tzdata/zoneinfo/America/Jamaica,sha256=pDexcAMzrv9TqLWGjVOHwIDcFMLT6Vqlzjb5AbNmkoQ,339
+tzdata/zoneinfo/America/Jujuy,sha256=7YpjOcmVaKKpiq31rQe8TTDNExdH9jjZIhdcZv-ShUg,690
+tzdata/zoneinfo/America/Juneau,sha256=V8IqRaJHSH7onK1gu3YYtW_a4VkNwjx5DCvQXpFdYAo,966
+tzdata/zoneinfo/America/Kentucky/Louisville,sha256=zS2SS573D9TmQZFWtSyRIVN3ZXVN_2FpVBbtqQFMzKU,1242
+tzdata/zoneinfo/America/Kentucky/Monticello,sha256=54or2oQ9bSbM9ifRoOjV7UjRF83jSSPuxfGeXH0nIqk,972
+tzdata/zoneinfo/America/Kentucky/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+tzdata/zoneinfo/America/Kentucky/__pycache__/__init__.cpython-310.pyc,,
+tzdata/zoneinfo/America/Knox_IN,sha256=KJCzXct8CTMItVLYLYeBqM6aT6b53gWCg6aDbsH58oI,1016
+tzdata/zoneinfo/America/Kralendijk,sha256=q76GKN1Uh8iJ24Fs46UHe7tH9rr6_rlBHZLW7y9wzo0,177
+tzdata/zoneinfo/America/La_Paz,sha256=2iYBxnc0HIwAzlx-Q3AI9Lb0GI87VY279oGcroBZSVs,170
+tzdata/zoneinfo/America/Lima,sha256=7vNjRhxzL-X4kyba-NkzXYNAOE-cqqcXvzXTqcTXBhY,283
+tzdata/zoneinfo/America/Los_Angeles,sha256=IA0FdU9tg6Nxz0CNcIUSV5dlezsL6-uh5QjP_oaj5cg,1294
+tzdata/zoneinfo/America/Louisville,sha256=zS2SS573D9TmQZFWtSyRIVN3ZXVN_2FpVBbtqQFMzKU,1242
+tzdata/zoneinfo/America/Lower_Princes,sha256=q76GKN1Uh8iJ24Fs46UHe7tH9rr6_rlBHZLW7y9wzo0,177
+tzdata/zoneinfo/America/Maceio,sha256=dSVg0dHedT9w1QO2F1AvWoel4_h8wmuYS4guEaL-5Kk,502
+tzdata/zoneinfo/America/Managua,sha256=ZYsoyN_GIlwAIpIj1spjQDPWGQ9kFZSipjUbO8caGfw,295
+tzdata/zoneinfo/America/Manaus,sha256=9kgrhpryB94YOVoshJliiiDSf9mwjb3OZwX0HusNRrk,412
+tzdata/zoneinfo/America/Marigot,sha256=q76GKN1Uh8iJ24Fs46UHe7tH9rr6_rlBHZLW7y9wzo0,177
+tzdata/zoneinfo/America/Martinique,sha256=m3rC6Mogc6cc1a9XJ8FPIYhZaSFNdYkxaZ-pfHhG3X4,178
+tzdata/zoneinfo/America/Matamoros,sha256=KxgAMGkE7TJuug9byFsT3KN836X3OyXq77v-tFpLVvc,437
+tzdata/zoneinfo/America/Mazatlan,sha256=C5CBj73KgB8vbDbDEgqMHfPeMeglQj156WNbwYSxux8,718
+tzdata/zoneinfo/America/Mendoza,sha256=dL4q0zgY2FKPbG8cC-Wknnpp8tF2Y7SWgWSC_G_WznI,708
+tzdata/zoneinfo/America/Menominee,sha256=oUmJmzOZtChYrB9In-E1GqEVi2ogKjPESXlUySUGs94,917
+tzdata/zoneinfo/America/Merida,sha256=KTdHMhhdhJtTg40KW2qSfd6N9PAQ50d_ektYDt2ouy0,654
+tzdata/zoneinfo/America/Metlakatla,sha256=EVj1LkMCgry6mT8Ln_FpHxpJSU0oSncfbHGWIQ0SI_0,586
+tzdata/zoneinfo/America/Mexico_City,sha256=vhDy1hSceJyFa3bIqn2qRi1kgxtvrCCaaB7s65mljtY,773
+tzdata/zoneinfo/America/Miquelon,sha256=Eey-Id5b4HFODINweRFtbDjcgjs_myiC2UwsgYt4kVk,550
+tzdata/zoneinfo/America/Moncton,sha256=knrBNDFwHAGFr0nWJTBQ-10F_fZ5x4n3SnZtH-KI6h8,1493
+tzdata/zoneinfo/America/Monterrey,sha256=GWEQgKgJQV89hVpFOO6nS1AYvdM6Lcw_xeYwMfkV6bg,644
+tzdata/zoneinfo/America/Montevideo,sha256=l7FjW6qscGzdvfjlbIeZ5CQ_AFWS3ZeVDS5ppMJCNM0,969
+tzdata/zoneinfo/America/Montreal,sha256=gVq023obEpKGfS-SS3GOG7oyRVzp-SIF2y_rZQKcZ2E,1717
+tzdata/zoneinfo/America/Montserrat,sha256=q76GKN1Uh8iJ24Fs46UHe7tH9rr6_rlBHZLW7y9wzo0,177
+tzdata/zoneinfo/America/Nassau,sha256=gVq023obEpKGfS-SS3GOG7oyRVzp-SIF2y_rZQKcZ2E,1717
+tzdata/zoneinfo/America/New_York,sha256=1_IgazpFmJ_JrWPVWJIlMvpzUigNX4cXa_HbecsdH6k,1744
+tzdata/zoneinfo/America/Nipigon,sha256=gVq023obEpKGfS-SS3GOG7oyRVzp-SIF2y_rZQKcZ2E,1717
+tzdata/zoneinfo/America/Nome,sha256=_-incQnh0DwK9hJqFaYzO4osUKAUB2k2lae565sblpA,975
+tzdata/zoneinfo/America/Noronha,sha256=Q0r3GtA5y2RGkOj56OTZG5tuBy1B6kfbhyrJqCgf27g,484
+tzdata/zoneinfo/America/North_Dakota/Beulah,sha256=RvaBIS60bNNRmREi6BXSWEbJSrcP7J8Nmxg8OkBcrow,1043
+tzdata/zoneinfo/America/North_Dakota/Center,sha256=M09x4Mx6hcBAwktvwv16YvPRmsuDjZEDwHT0Umkcgyo,990
+tzdata/zoneinfo/America/North_Dakota/New_Salem,sha256=mZca9gyfO2USzax7v0mLJEYBKBVmIqylWqnfLgSsVys,990
+tzdata/zoneinfo/America/North_Dakota/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+tzdata/zoneinfo/America/North_Dakota/__pycache__/__init__.cpython-310.pyc,,
+tzdata/zoneinfo/America/Nuuk,sha256=LlGZ5Y_ud9JwWRvncHnUHRArQbbnNcmmrz3duMhR3Hc,965
+tzdata/zoneinfo/America/Ojinaga,sha256=EMAldBXpY3Vgog_8yESXQb3qoS1v69jmWm0JPgs3k9U,718
+tzdata/zoneinfo/America/Panama,sha256=p41zBnujy9lPiiPf3WqotoyzOxhIS8F7TiDqGuwvCoE,149
+tzdata/zoneinfo/America/Pangnirtung,sha256=nONS7zksGHTrbEJj73LYRZW964OncQuj_V6fNjpDoQ0,855
+tzdata/zoneinfo/America/Paramaribo,sha256=C2v9tR6no54CRECWDFhANTl40UsA4AhHsdnGoNCb4_Q,187
+tzdata/zoneinfo/America/Phoenix,sha256=rhFFPCHQiYTedfLv7ATckxeKe04jxeUvIJi4vUXMtUc,240
+tzdata/zoneinfo/America/Port-au-Prince,sha256=wsS6VbQ__bKJ2IUMPy_Pao0CLRK5pXEBrqkaYuqs3Ns,565
+tzdata/zoneinfo/America/Port_of_Spain,sha256=q76GKN1Uh8iJ24Fs46UHe7tH9rr6_rlBHZLW7y9wzo0,177
+tzdata/zoneinfo/America/Porto_Acre,sha256=VjuQUr668phq5bcH40r94BPnZBKHzJf_MQBfM6Db96U,418
+tzdata/zoneinfo/America/Porto_Velho,sha256=9yPU8EXtKDQHLF745ETc9qZZ9Me2CK6jvgb6S53pSKg,394
+tzdata/zoneinfo/America/Puerto_Rico,sha256=q76GKN1Uh8iJ24Fs46UHe7tH9rr6_rlBHZLW7y9wzo0,177
+tzdata/zoneinfo/America/Punta_Arenas,sha256=2Aqh7bqo-mQlnMjURDkCOeEYmeXhkzKP7OxFAvhTjjA,1218
+tzdata/zoneinfo/America/Rainy_River,sha256=ANzwYGBU1PknQW4LR-H92i5c4Db95LU-UQhPhWZCjDo,1294
+tzdata/zoneinfo/America/Rankin_Inlet,sha256=JQCXQBdyc8uJTjIFO4jZuzS0OjG0gRHv8MPmdzN93CU,807
+tzdata/zoneinfo/America/Recife,sha256=3yZTwF3MJlkY0D48CQUTzCRwDCfGNq8EXXTZYlBgUTg,484
+tzdata/zoneinfo/America/Regina,sha256=_JHuns225iE-THc9NFp-RBq4PWULAuGw2OLbpOB_UMw,638
+tzdata/zoneinfo/America/Resolute,sha256=2UeJBR2ZSkn1bUZy0G0SEhBtY9vycwSRU4naK-sw044,807
+tzdata/zoneinfo/America/Rio_Branco,sha256=VjuQUr668phq5bcH40r94BPnZBKHzJf_MQBfM6Db96U,418
+tzdata/zoneinfo/America/Rosario,sha256=9Ij3WjT9mWMKQ43LeSUIqQuDb9zS3FSlHYPVNQJTFf0,708
+tzdata/zoneinfo/America/Santa_Isabel,sha256=x2_eWDUWxIi5gKTGmM_d5V1HFt1-JN-j8dIpqj5Dn7M,1025
+tzdata/zoneinfo/America/Santarem,sha256=dDEGsnrm4wrzl4sK6K8PzEroBKD7A1V7HBa8cWW4cMk,409
+tzdata/zoneinfo/America/Santiago,sha256=_QBpU8K0QqLh5m2yqWfdkypIJDkPAc3dnIAc5jRQxxU,1354
+tzdata/zoneinfo/America/Santo_Domingo,sha256=xmJo59mZXN7Wnf-3Jjl37mCC-8GfN6xmk2l_vngyfeI,317
+tzdata/zoneinfo/America/Sao_Paulo,sha256=-izrIi8GXAKJ85l_8MVLoFp0pZm0Uihw-oapbiThiJE,952
+tzdata/zoneinfo/America/Scoresbysund,sha256=wrhIEVAFI29qKT3TdOWiiJwI80AohXwwfb1mCPSAXHo,984
+tzdata/zoneinfo/America/Shiprock,sha256=m7cDkg7KS2EZ6BoQVYOk9soiBlHxO0GEeat81WxBPz4,1042
+tzdata/zoneinfo/America/Sitka,sha256=pF5yln--MOzEMDacNd_Id0HX9pAmge8POfcxyTNh1-0,956
+tzdata/zoneinfo/America/St_Barthelemy,sha256=q76GKN1Uh8iJ24Fs46UHe7tH9rr6_rlBHZLW7y9wzo0,177
+tzdata/zoneinfo/America/St_Johns,sha256=v99q_AFMPll5MMxMp98aqY40cmis2wciTfTqs2_kb0k,1878
+tzdata/zoneinfo/America/St_Kitts,sha256=q76GKN1Uh8iJ24Fs46UHe7tH9rr6_rlBHZLW7y9wzo0,177
+tzdata/zoneinfo/America/St_Lucia,sha256=q76GKN1Uh8iJ24Fs46UHe7tH9rr6_rlBHZLW7y9wzo0,177
+tzdata/zoneinfo/America/St_Thomas,sha256=q76GKN1Uh8iJ24Fs46UHe7tH9rr6_rlBHZLW7y9wzo0,177
+tzdata/zoneinfo/America/St_Vincent,sha256=q76GKN1Uh8iJ24Fs46UHe7tH9rr6_rlBHZLW7y9wzo0,177
+tzdata/zoneinfo/America/Swift_Current,sha256=F-b65Yaax23CsuhSmeTDl6Tv9du4IsvWvMbbSuwHkLM,368
+tzdata/zoneinfo/America/Tegucigalpa,sha256=KlvqBJGswa9DIXlE3acU-pgd4IFqDeBRrUz02PmlNC0,194
+tzdata/zoneinfo/America/Thule,sha256=LzL5jdmZkxRkHdA3XkoqJPG_ImllnSRhYYLQpMf_TY8,455
+tzdata/zoneinfo/America/Thunder_Bay,sha256=gVq023obEpKGfS-SS3GOG7oyRVzp-SIF2y_rZQKcZ2E,1717
+tzdata/zoneinfo/America/Tijuana,sha256=x2_eWDUWxIi5gKTGmM_d5V1HFt1-JN-j8dIpqj5Dn7M,1025
+tzdata/zoneinfo/America/Toronto,sha256=gVq023obEpKGfS-SS3GOG7oyRVzp-SIF2y_rZQKcZ2E,1717
+tzdata/zoneinfo/America/Tortola,sha256=q76GKN1Uh8iJ24Fs46UHe7tH9rr6_rlBHZLW7y9wzo0,177
+tzdata/zoneinfo/America/Vancouver,sha256=Epou71sUffvHB1rd7wT0krvo3okXAV45_TWcOFpy26Q,1330
+tzdata/zoneinfo/America/Virgin,sha256=q76GKN1Uh8iJ24Fs46UHe7tH9rr6_rlBHZLW7y9wzo0,177
+tzdata/zoneinfo/America/Whitehorse,sha256=CyY4jNd0fzNSdf1HlYGfaktApmH71tRNRlpOEO32DGs,1029
+tzdata/zoneinfo/America/Winnipeg,sha256=ANzwYGBU1PknQW4LR-H92i5c4Db95LU-UQhPhWZCjDo,1294
+tzdata/zoneinfo/America/Yakutat,sha256=pvHLVNA1mI-H9fBDnlnpI6B9XzVFQeyvI9nyIkaFNYQ,946
+tzdata/zoneinfo/America/Yellowknife,sha256=Dq2mxcSNWZhMWRqxwwtMcaqwAIGMwkOzz-mW8fJscV8,970
+tzdata/zoneinfo/America/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+tzdata/zoneinfo/America/__pycache__/__init__.cpython-310.pyc,,
+tzdata/zoneinfo/Antarctica/Casey,sha256=1jc-FAjvkKnmCjhz8-yQgEKrN_sVmzAi8DVoy9_K8AQ,287
+tzdata/zoneinfo/Antarctica/Davis,sha256=Pom_267rsoZl6yLvYllu_SW1kixIrSPmsd-HLztn33Y,197
+tzdata/zoneinfo/Antarctica/DumontDUrville,sha256=aDABBVtu-dydiHNODt3ReC8cNkO3wTp16c-OkFIAbhk,154
+tzdata/zoneinfo/Antarctica/Macquarie,sha256=aOZlIzIdTwevaTXoQkDlex2LSFDrg64GvRfcLnfCDAM,976
+tzdata/zoneinfo/Antarctica/Mawson,sha256=UYuiBSE0qZ-2kkBAa6Xq5g9NXg-W_R0P-rl2tlO0jHc,152
+tzdata/zoneinfo/Antarctica/McMurdo,sha256=Dgbn5VrtvJLvWz0Qbnw5KrFijP2KQosg6S6ZAooL-7k,1043
+tzdata/zoneinfo/Antarctica/Palmer,sha256=3MXfhQBaRB57_jqHZMl-M_K48NMFe4zALc7vaMyS5xw,887
+tzdata/zoneinfo/Antarctica/Rothera,sha256=XeddRL2YTDfEWzQI7nDqfW-Tfg-5EebxsHsMHyzGudI,132
+tzdata/zoneinfo/Antarctica/South_Pole,sha256=Dgbn5VrtvJLvWz0Qbnw5KrFijP2KQosg6S6ZAooL-7k,1043
+tzdata/zoneinfo/Antarctica/Syowa,sha256=RoU-lCdq8u6o6GwvFSqHHAkt8ZXcUSc7j8cJH6pLRhw,133
+tzdata/zoneinfo/Antarctica/Troll,sha256=s4z0F_uKzx3biKjEzvHwb56132XRs6IR22fCQglW5GI,158
+tzdata/zoneinfo/Antarctica/Vostok,sha256=cDp-B4wKXE8U5b_zqJIlxdGY-AIAMCTJOZG3bRZBKNc,170
+tzdata/zoneinfo/Antarctica/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+tzdata/zoneinfo/Antarctica/__pycache__/__init__.cpython-310.pyc,,
+tzdata/zoneinfo/Arctic/Longyearbyen,sha256=p_2ZMteF1NaQkAuDTDVjwYEMHPLgFxG8wJJq9sB2fLc,705
+tzdata/zoneinfo/Arctic/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+tzdata/zoneinfo/Arctic/__pycache__/__init__.cpython-310.pyc,,
+tzdata/zoneinfo/Asia/Aden,sha256=RoU-lCdq8u6o6GwvFSqHHAkt8ZXcUSc7j8cJH6pLRhw,133
+tzdata/zoneinfo/Asia/Almaty,sha256=87WNMKCF7W2V6tq5LvX5DXWoi9MuwjCAY3f9dgwui4s,618
+tzdata/zoneinfo/Asia/Amman,sha256=KOnKO4_1XRlQvLG61GTbfKImSthwBHMSnzV1ExW8i5Q,928
+tzdata/zoneinfo/Asia/Anadyr,sha256=30bdZurg4Q__lCpH509TE0U7pOcEY6qxjvuPF9ai5yc,743
+tzdata/zoneinfo/Asia/Aqtau,sha256=bRj27vG5HvGegFg5eIKNmq3dfteYmr7KmTs4JFO-7SM,606
+tzdata/zoneinfo/Asia/Aqtobe,sha256=Pm7yI5cmfzx8CGXR2mQJDjtH12KCpx8ezFKchiJVVJ4,615
+tzdata/zoneinfo/Asia/Ashgabat,sha256=OTLHdQ8jFPDvxu_IwKX_c3W3jdN6e7FGoCSEEb0XKuw,375
+tzdata/zoneinfo/Asia/Ashkhabad,sha256=OTLHdQ8jFPDvxu_IwKX_c3W3jdN6e7FGoCSEEb0XKuw,375
+tzdata/zoneinfo/Asia/Atyrau,sha256=1YG4QzLxPRZQeGHiOrbm0cRs8ERTNg1NF9dWEwW2Pi0,616
+tzdata/zoneinfo/Asia/Baghdad,sha256=zFe6LXSfuoJjGsmYTMGjJtBcAMLiKFkD7j7-VaqKwH8,630
+tzdata/zoneinfo/Asia/Bahrain,sha256=YWDWV1o3HHWxnmwlzwMWC53C84ZYPkK_gYn9-P0Xx4U,152
+tzdata/zoneinfo/Asia/Baku,sha256=_Wh6ONaRatMc9lpwGO6zB9pTE38NZ4oWg4_-sZl17mA,744
+tzdata/zoneinfo/Asia/Bangkok,sha256=zcjiwoLYvJpenDyvL8Rf9OnlzRj13sjLhzNArXxYTWQ,152
+tzdata/zoneinfo/Asia/Barnaul,sha256=UGFYJYvtgYVS8Tqsqvj6p0OQCmN3zdY9wITWg8ODG-k,753
+tzdata/zoneinfo/Asia/Beirut,sha256=FgM4gqbWFp6KuUnVn-H8UIXZgTydBeOxDdbebJ0GpUc,732
+tzdata/zoneinfo/Asia/Bishkek,sha256=RXdxVxaiE5zxX5atQl-7ZesEeZVjsCXBGZ6cJbVU9pE,618
+tzdata/zoneinfo/Asia/Brunei,sha256=3ajgII3xZ-Wc-dqXRTSMw8qQRDSjXlSBIxyE_sDRGTk,320
+tzdata/zoneinfo/Asia/Calcutta,sha256=OgC9vhvElZ5ydWfHMLpRsDRV7NRV98GQxa0UOG63mw0,220
+tzdata/zoneinfo/Asia/Chita,sha256=1Lme3ccO47R5gmTe5VCq1BSb0m_1opWibq21zvZlntg,750
+tzdata/zoneinfo/Asia/Choibalsan,sha256=hsakX_o0anB6tNBNp_FKGx4k57IcODYubf1u2G_2Vqk,619
+tzdata/zoneinfo/Asia/Chongqing,sha256=v4t-2C_m5j5tmPjOqTTurJAc0Wq6hetXVc4_i0KJ6oo,393
+tzdata/zoneinfo/Asia/Chungking,sha256=v4t-2C_m5j5tmPjOqTTurJAc0Wq6hetXVc4_i0KJ6oo,393
+tzdata/zoneinfo/Asia/Colombo,sha256=QAyjK7gtXUWfLuju1M0H3_ew6iTM-bwfzO5obgvaHy8,247
+tzdata/zoneinfo/Asia/Dacca,sha256=rCGmEwbW4qkUU2QfTj5zLrydVCq8HTWl1dsqEDQOvvo,231
+tzdata/zoneinfo/Asia/Damascus,sha256=AtZTDRzHEB7QnKxFXvtWsNUI1cCCe27sAfpDfQd0MwY,1234
+tzdata/zoneinfo/Asia/Dhaka,sha256=rCGmEwbW4qkUU2QfTj5zLrydVCq8HTWl1dsqEDQOvvo,231
+tzdata/zoneinfo/Asia/Dili,sha256=ByL6yx7Cuq6axUp5D1n8a9MtmAod_mw6JQP_ltYdOUg,170
+tzdata/zoneinfo/Asia/Dubai,sha256=DZ6lBT6DGIAypvtNMB1dtoj0MBHltrH5F6EbcaDaexY,133
+tzdata/zoneinfo/Asia/Dushanbe,sha256=8qbn76rf9xu47NYVdfGvjnkf2KZxNN5J8ekFiXUz3AQ,366
+tzdata/zoneinfo/Asia/Famagusta,sha256=385fbaRnx-mdEaXqSyBKVBDDKPzCGKbynWYt75wwCug,940
+tzdata/zoneinfo/Asia/Gaza,sha256=-PC__gGODaDGgv5LLzH7ptNLbNdStPkxGY4LmebvcNU,2950
+tzdata/zoneinfo/Asia/Harbin,sha256=v4t-2C_m5j5tmPjOqTTurJAc0Wq6hetXVc4_i0KJ6oo,393
+tzdata/zoneinfo/Asia/Hebron,sha256=4FujfuE-ECIXgKW4pv0lxq2ZkAj7jDwt0rezuA0fFzg,2968
+tzdata/zoneinfo/Asia/Ho_Chi_Minh,sha256=R-ReVMreMcETG0Sifjfe5z-PgQpUsKjT6dVbEKzT3sE,236
+tzdata/zoneinfo/Asia/Hong_Kong,sha256=9AaPcyRtuXQX9zRnRTVkxX1mRs5JCbn6JTaSPvzX608,775
+tzdata/zoneinfo/Asia/Hovd,sha256=eqAvD2RfuIfSDhtqk58MECIjz5X14OHZ7aO4z14kndk,594
+tzdata/zoneinfo/Asia/Irkutsk,sha256=sWxp8g_aSfFan4ZyF9s6-pEX5Vgwxi_jNv7vwN06XIo,760
+tzdata/zoneinfo/Asia/Istanbul,sha256=KnFjsWuUgG9pmRNI59CmDEbrYbHwMF9fS4P2E9sQgG8,1200
+tzdata/zoneinfo/Asia/Jakarta,sha256=4qCZ6kix9xZriNIZsyb3xENz0IkJzZcjtENGlG_Wo4Q,248
+tzdata/zoneinfo/Asia/Jayapura,sha256=BUa0kX1iOdf0E-v7415h7l0lQv4DBCYX_3dAbYmQ0xU,171
+tzdata/zoneinfo/Asia/Jerusalem,sha256=n83o1YTeoFhfXIcnqvNfSKFJ4NvTqDv2zvi8qcFAIeM,1074
+tzdata/zoneinfo/Asia/Kabul,sha256=pNIwTfiSG71BGKvrhKqo1xdxckAx9vfcx5nJanrL81Q,159
+tzdata/zoneinfo/Asia/Kamchatka,sha256=Qix8x3s-m8UTeiwzNPBy_ZQvAzX_aaihz_PzLfTiUac,727
+tzdata/zoneinfo/Asia/Karachi,sha256=ujo4wv-3oa9tfrFT5jsLcEYcjeGeBRgG2QwdXg_ijU4,266
+tzdata/zoneinfo/Asia/Kashgar,sha256=hJyv03dhHML8K0GJGrY8b7M0OUkEXblh_RYmdZMxWtQ,133
+tzdata/zoneinfo/Asia/Kathmandu,sha256=drjxv-ByIxodnn-FATEOJ8DQgEjEj3Qihgtkd8FCxDg,161
+tzdata/zoneinfo/Asia/Katmandu,sha256=drjxv-ByIxodnn-FATEOJ8DQgEjEj3Qihgtkd8FCxDg,161
+tzdata/zoneinfo/Asia/Khandyga,sha256=fdEDOsDJkLuENybqIXtTiI4k2e24dKHDfBTww9AtbSw,775
+tzdata/zoneinfo/Asia/Kolkata,sha256=OgC9vhvElZ5ydWfHMLpRsDRV7NRV98GQxa0UOG63mw0,220
+tzdata/zoneinfo/Asia/Krasnoyarsk,sha256=buNI5S1g7eedK-PpnrLkBFFZDUyCtHxcxXDQGF2ARos,741
+tzdata/zoneinfo/Asia/Kuala_Lumpur,sha256=CVSy2aMB2U9DSAJGBqcbvLL6JNPNNwn1vIvKYFA5eF0,256
+tzdata/zoneinfo/Asia/Kuching,sha256=3ajgII3xZ-Wc-dqXRTSMw8qQRDSjXlSBIxyE_sDRGTk,320
+tzdata/zoneinfo/Asia/Kuwait,sha256=RoU-lCdq8u6o6GwvFSqHHAkt8ZXcUSc7j8cJH6pLRhw,133
+tzdata/zoneinfo/Asia/Macao,sha256=mr89i_wpMoWhAtqZrF2SGcoILcUw6rYrDkIUNADes7E,791
+tzdata/zoneinfo/Asia/Macau,sha256=mr89i_wpMoWhAtqZrF2SGcoILcUw6rYrDkIUNADes7E,791
+tzdata/zoneinfo/Asia/Magadan,sha256=wAufMGWL_s1Aw2l3myAfBFtrROVPes3dMoNuDEoNwT8,751
+tzdata/zoneinfo/Asia/Makassar,sha256=NV9j_RTuiU47mvJvfKE8daXH5AFYJ8Ki4gvHBJSxyLc,190
+tzdata/zoneinfo/Asia/Manila,sha256=Vk8aVoXR_edPDnARFdmEui4pq4Q3yNuiPUCzeIAPLBI,238
+tzdata/zoneinfo/Asia/Muscat,sha256=DZ6lBT6DGIAypvtNMB1dtoj0MBHltrH5F6EbcaDaexY,133
+tzdata/zoneinfo/Asia/Nicosia,sha256=TYYqWp8sK0AwBUHAp0wuuihZuQ19RXdt28bth33zOBI,597
+tzdata/zoneinfo/Asia/Novokuznetsk,sha256=aYW9rpcxpf_zrOZc2vmpcqgiuCRKMHB1lMrioI43KCw,726
+tzdata/zoneinfo/Asia/Novosibirsk,sha256=I2n4MCElad9sMcyJAAc4YdVT6ewbhR79OoAAuhEJfCY,753
+tzdata/zoneinfo/Asia/Omsk,sha256=y7u47EObB3wI8MxKHBRTFM-BEZZqhGpzDg7x5lcwJXY,741
+tzdata/zoneinfo/Asia/Oral,sha256=Q-Gf85NIvdAtU52Zkgf78rVHPlg85xyMe9Zm9ybh0po,625
+tzdata/zoneinfo/Asia/Phnom_Penh,sha256=zcjiwoLYvJpenDyvL8Rf9OnlzRj13sjLhzNArXxYTWQ,152
+tzdata/zoneinfo/Asia/Pontianak,sha256=o0x0jNTlwjiUqAzGX_HlzvCMru2zUURgQ4xzpS95xds,247
+tzdata/zoneinfo/Asia/Pyongyang,sha256=NxC5da8oTZ4StiFQnlhjlp9FTRuMM-Xwsq3Yg4y0xkA,183
+tzdata/zoneinfo/Asia/Qatar,sha256=YWDWV1o3HHWxnmwlzwMWC53C84ZYPkK_gYn9-P0Xx4U,152
+tzdata/zoneinfo/Asia/Qostanay,sha256=5tZkj1o0p4vaREsPO0YgIiw6eDf1cqO52x-0EMg_2L4,624
+tzdata/zoneinfo/Asia/Qyzylorda,sha256=JltKDEnuHmIQGYdFTAJMDDpdDA_HxjJOAHHaV7kFrlQ,624
+tzdata/zoneinfo/Asia/Rangoon,sha256=6J2DXIEdTaRKqLOGeCzogo3whaoO6PJWYamIHS8A6Qw,187
+tzdata/zoneinfo/Asia/Riyadh,sha256=RoU-lCdq8u6o6GwvFSqHHAkt8ZXcUSc7j8cJH6pLRhw,133
+tzdata/zoneinfo/Asia/Saigon,sha256=R-ReVMreMcETG0Sifjfe5z-PgQpUsKjT6dVbEKzT3sE,236
+tzdata/zoneinfo/Asia/Sakhalin,sha256=M_TBd-03j-3Yc9KwhGEoBTwSJxWO1lPBG7ndst16PGo,755
+tzdata/zoneinfo/Asia/Samarkand,sha256=KZ_q-6GMDVgJb8RFqcrbVcPC0WLczolClC4nZA1HVNU,366
+tzdata/zoneinfo/Asia/Seoul,sha256=ZKcLb7zJtl52Lb0l64m29AwTcUbtyNvU0IHq-s2reN4,415
+tzdata/zoneinfo/Asia/Shanghai,sha256=v4t-2C_m5j5tmPjOqTTurJAc0Wq6hetXVc4_i0KJ6oo,393
+tzdata/zoneinfo/Asia/Singapore,sha256=CVSy2aMB2U9DSAJGBqcbvLL6JNPNNwn1vIvKYFA5eF0,256
+tzdata/zoneinfo/Asia/Srednekolymsk,sha256=06mojetFbDd4ag1p8NK0Fg6rF2OOnZMFRRC90N2ATZc,742
+tzdata/zoneinfo/Asia/Taipei,sha256=oEwscvT3aoMXjQNt2X0VfuHzLkeORN2npcEJI2h-5s8,511
+tzdata/zoneinfo/Asia/Tashkent,sha256=0vpN2gI9GY50z1nea6zCPFf2B6VCu6XQQHx4l6rhnTI,366
+tzdata/zoneinfo/Asia/Tbilisi,sha256=ON_Uzv2VTSk6mRefNU-aI-qkqtCoUX6oECVqpeS42eI,629
+tzdata/zoneinfo/Asia/Tehran,sha256=ozLlhNXzpJCZx7bc-VpcmNdgdtn6lPtF6f9qkaDEycI,812
+tzdata/zoneinfo/Asia/Tel_Aviv,sha256=n83o1YTeoFhfXIcnqvNfSKFJ4NvTqDv2zvi8qcFAIeM,1074
+tzdata/zoneinfo/Asia/Thimbu,sha256=N6d_vfFvYORfMnr1fHJjYSt4DBORSbLi_2T-r2dJBnI,154
+tzdata/zoneinfo/Asia/Thimphu,sha256=N6d_vfFvYORfMnr1fHJjYSt4DBORSbLi_2T-r2dJBnI,154
+tzdata/zoneinfo/Asia/Tokyo,sha256=WaOHFDDw07k-YZ-jCkOkHR6IvdSf8m8J0PQFpQBwb5Y,213
+tzdata/zoneinfo/Asia/Tomsk,sha256=Bf7GoFTcUeP2hYyuYpruJji33tcEoLP-80o38A6i4zU,753
+tzdata/zoneinfo/Asia/Ujung_Pandang,sha256=NV9j_RTuiU47mvJvfKE8daXH5AFYJ8Ki4gvHBJSxyLc,190
+tzdata/zoneinfo/Asia/Ulaanbaatar,sha256=--I8P6_e4BtRIe3wCSkPtwHOu_k9rPsw-KqQKHJC9vM,594
+tzdata/zoneinfo/Asia/Ulan_Bator,sha256=--I8P6_e4BtRIe3wCSkPtwHOu_k9rPsw-KqQKHJC9vM,594
+tzdata/zoneinfo/Asia/Urumqi,sha256=hJyv03dhHML8K0GJGrY8b7M0OUkEXblh_RYmdZMxWtQ,133
+tzdata/zoneinfo/Asia/Ust-Nera,sha256=6NkuV1zOms-4qHQhq-cGc-cqEVgKHk7qd3MLDM-e2BA,771
+tzdata/zoneinfo/Asia/Vientiane,sha256=zcjiwoLYvJpenDyvL8Rf9OnlzRj13sjLhzNArXxYTWQ,152
+tzdata/zoneinfo/Asia/Vladivostok,sha256=zkOXuEDgpxX8HQGgDlh9SbAQzHOaNxX2XSI6Y4gMD-k,742
+tzdata/zoneinfo/Asia/Yakutsk,sha256=xD6zA4E228dC1mIUQ7cMO-9LORSfE-Fok0awGDG6juk,741
+tzdata/zoneinfo/Asia/Yangon,sha256=6J2DXIEdTaRKqLOGeCzogo3whaoO6PJWYamIHS8A6Qw,187
+tzdata/zoneinfo/Asia/Yekaterinburg,sha256=q17eUyqOEK2LJYKXYLCJqylj-vmaCG2vSNMttqrQTRk,760
+tzdata/zoneinfo/Asia/Yerevan,sha256=pLEBdchA8H9l-9hdA6FjHmwaj5T1jupK0u-bor1KKa0,708
+tzdata/zoneinfo/Asia/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+tzdata/zoneinfo/Asia/__pycache__/__init__.cpython-310.pyc,,
+tzdata/zoneinfo/Atlantic/Azores,sha256=KmvA_G-yNl76C0A17JdtFg7ju9LHa5JIWh15GOzLxds,1453
+tzdata/zoneinfo/Atlantic/Bermuda,sha256=PuxqD2cD99Pzjb8hH99Dws053d_zXnZHjeH0kZ8LSLI,1024
+tzdata/zoneinfo/Atlantic/Canary,sha256=XMmxBlscPIWXhiauKy_d5bxX4xjNMM-5Vw84FwZkT00,478
+tzdata/zoneinfo/Atlantic/Cape_Verde,sha256=E5ss6xpIpD0g_VEDsFMFi-ltsebp98PBSpULoVxIAyU,175
+tzdata/zoneinfo/Atlantic/Faeroe,sha256=Iw0qB0mBuviH5w3Qy8jaxCOes07ZHh2wkW8MPUWJqj0,441
+tzdata/zoneinfo/Atlantic/Faroe,sha256=Iw0qB0mBuviH5w3Qy8jaxCOes07ZHh2wkW8MPUWJqj0,441
+tzdata/zoneinfo/Atlantic/Jan_Mayen,sha256=p_2ZMteF1NaQkAuDTDVjwYEMHPLgFxG8wJJq9sB2fLc,705
+tzdata/zoneinfo/Atlantic/Madeira,sha256=IX1jlaiB-DaaGwjnfc5pYr8eEtX7_Wol-T50QNAs3qw,1453
+tzdata/zoneinfo/Atlantic/Reykjavik,sha256=8-f8qg6YQP9BadNWfY-1kmZEhI9JY9es-SMghDxdSG4,130
+tzdata/zoneinfo/Atlantic/South_Georgia,sha256=kPGfCLQD2C6_Xc5TyAmqmXP-GYdLLPucpBn3S7ybWu8,132
+tzdata/zoneinfo/Atlantic/St_Helena,sha256=8-f8qg6YQP9BadNWfY-1kmZEhI9JY9es-SMghDxdSG4,130
+tzdata/zoneinfo/Atlantic/Stanley,sha256=QqQd8IWklNapMKjN5vF7vvVn4K-yl3VKvM5zkCKabCM,789
+tzdata/zoneinfo/Atlantic/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+tzdata/zoneinfo/Atlantic/__pycache__/__init__.cpython-310.pyc,,
+tzdata/zoneinfo/Australia/ACT,sha256=gg1FqGioj4HHMdWyx1i07QAAObYmCoBDP44PCUpgS1k,904
+tzdata/zoneinfo/Australia/Adelaide,sha256=Gk1SdGRVmB233I-WETXAMCZz7L7HVzoN4aUoIcgNr3g,921
+tzdata/zoneinfo/Australia/Brisbane,sha256=2kVWz9CI_qtfdb55g0iL59gUBC7lnO3GUalIQxtHADY,289
+tzdata/zoneinfo/Australia/Broken_Hill,sha256=dzk9LvGA_xRStnAIjAFuTJ8Uwz_s7qGWGQmiXPgDsLY,941
+tzdata/zoneinfo/Australia/Canberra,sha256=gg1FqGioj4HHMdWyx1i07QAAObYmCoBDP44PCUpgS1k,904
+tzdata/zoneinfo/Australia/Currie,sha256=1IAVgf0AA3sBPXFhaxGfu9UQ_cpd4GNpsQ9xio2l4y0,1003
+tzdata/zoneinfo/Australia/Darwin,sha256=ZoexbhgdUlV4leV-dhBu6AxDVkJy43xrPb9UQ3EQCdI,234
+tzdata/zoneinfo/Australia/Eucla,sha256=3NqsFfMzR6-lSUPViNXBAOyJPqyokisse7uDXurURpk,314
+tzdata/zoneinfo/Australia/Hobart,sha256=1IAVgf0AA3sBPXFhaxGfu9UQ_cpd4GNpsQ9xio2l4y0,1003
+tzdata/zoneinfo/Australia/LHI,sha256=82i9JWWcApPQK7eex9rH1bc6kt_6_OFLTdL_uLoRqto,692
+tzdata/zoneinfo/Australia/Lindeman,sha256=iHkCc0QJ7iaQffiTTXQVJ2swsC7QJxLUMHQOGCFlkTk,325
+tzdata/zoneinfo/Australia/Lord_Howe,sha256=82i9JWWcApPQK7eex9rH1bc6kt_6_OFLTdL_uLoRqto,692
+tzdata/zoneinfo/Australia/Melbourne,sha256=X7JPMEj_SYWyfgWFMkp6FOmT6GfyjR-lF9hFGgTavnE,904
+tzdata/zoneinfo/Australia/NSW,sha256=gg1FqGioj4HHMdWyx1i07QAAObYmCoBDP44PCUpgS1k,904
+tzdata/zoneinfo/Australia/North,sha256=ZoexbhgdUlV4leV-dhBu6AxDVkJy43xrPb9UQ3EQCdI,234
+tzdata/zoneinfo/Australia/Perth,sha256=ZsuelcBC1YfWugH2CrlOXQcSDD4gGUJCobB1W-aupHo,306
+tzdata/zoneinfo/Australia/Queensland,sha256=2kVWz9CI_qtfdb55g0iL59gUBC7lnO3GUalIQxtHADY,289
+tzdata/zoneinfo/Australia/South,sha256=Gk1SdGRVmB233I-WETXAMCZz7L7HVzoN4aUoIcgNr3g,921
+tzdata/zoneinfo/Australia/Sydney,sha256=gg1FqGioj4HHMdWyx1i07QAAObYmCoBDP44PCUpgS1k,904
+tzdata/zoneinfo/Australia/Tasmania,sha256=1IAVgf0AA3sBPXFhaxGfu9UQ_cpd4GNpsQ9xio2l4y0,1003
+tzdata/zoneinfo/Australia/Victoria,sha256=X7JPMEj_SYWyfgWFMkp6FOmT6GfyjR-lF9hFGgTavnE,904
+tzdata/zoneinfo/Australia/West,sha256=ZsuelcBC1YfWugH2CrlOXQcSDD4gGUJCobB1W-aupHo,306
+tzdata/zoneinfo/Australia/Yancowinna,sha256=dzk9LvGA_xRStnAIjAFuTJ8Uwz_s7qGWGQmiXPgDsLY,941
+tzdata/zoneinfo/Australia/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+tzdata/zoneinfo/Australia/__pycache__/__init__.cpython-310.pyc,,
+tzdata/zoneinfo/Brazil/Acre,sha256=VjuQUr668phq5bcH40r94BPnZBKHzJf_MQBfM6Db96U,418
+tzdata/zoneinfo/Brazil/DeNoronha,sha256=Q0r3GtA5y2RGkOj56OTZG5tuBy1B6kfbhyrJqCgf27g,484
+tzdata/zoneinfo/Brazil/East,sha256=-izrIi8GXAKJ85l_8MVLoFp0pZm0Uihw-oapbiThiJE,952
+tzdata/zoneinfo/Brazil/West,sha256=9kgrhpryB94YOVoshJliiiDSf9mwjb3OZwX0HusNRrk,412
+tzdata/zoneinfo/Brazil/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+tzdata/zoneinfo/Brazil/__pycache__/__init__.cpython-310.pyc,,
+tzdata/zoneinfo/CET,sha256=9q70fJErxHX0_hfgu5Wk0oH5ZZLUWhBIHJI1z7gHgBI,621
+tzdata/zoneinfo/CST6CDT,sha256=ajbQjR1ESk2m3dg1sAR2slqafjcfIhw-SC4SC6F7VBY,951
+tzdata/zoneinfo/Canada/Atlantic,sha256=kO5ahBM2oTLfWS4KX15FbKXfo5wg-f9vw1_hMOISGig,1672
+tzdata/zoneinfo/Canada/Central,sha256=ANzwYGBU1PknQW4LR-H92i5c4Db95LU-UQhPhWZCjDo,1294
+tzdata/zoneinfo/Canada/Eastern,sha256=gVq023obEpKGfS-SS3GOG7oyRVzp-SIF2y_rZQKcZ2E,1717
+tzdata/zoneinfo/Canada/Mountain,sha256=Dq2mxcSNWZhMWRqxwwtMcaqwAIGMwkOzz-mW8fJscV8,970
+tzdata/zoneinfo/Canada/Newfoundland,sha256=v99q_AFMPll5MMxMp98aqY40cmis2wciTfTqs2_kb0k,1878
+tzdata/zoneinfo/Canada/Pacific,sha256=Epou71sUffvHB1rd7wT0krvo3okXAV45_TWcOFpy26Q,1330
+tzdata/zoneinfo/Canada/Saskatchewan,sha256=_JHuns225iE-THc9NFp-RBq4PWULAuGw2OLbpOB_UMw,638
+tzdata/zoneinfo/Canada/Yukon,sha256=CyY4jNd0fzNSdf1HlYGfaktApmH71tRNRlpOEO32DGs,1029
+tzdata/zoneinfo/Canada/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+tzdata/zoneinfo/Canada/__pycache__/__init__.cpython-310.pyc,,
+tzdata/zoneinfo/Chile/Continental,sha256=_QBpU8K0QqLh5m2yqWfdkypIJDkPAc3dnIAc5jRQxxU,1354
+tzdata/zoneinfo/Chile/EasterIsland,sha256=EwVM74XjsboPVxK9bWmdd4nTrtvasP1zlLdxrMB_YaE,1174
+tzdata/zoneinfo/Chile/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+tzdata/zoneinfo/Chile/__pycache__/__init__.cpython-310.pyc,,
+tzdata/zoneinfo/Cuba,sha256=ms5rCuq2yBM49VmTymMtFQN3c5aBN1lkd8jjzKdnNm8,1117
+tzdata/zoneinfo/EET,sha256=ftIfVTZNlKejEciANKFFxES2uv_Z4rTAgyjwvk1lLpE,497
+tzdata/zoneinfo/EST,sha256=Eqcp0sCDGh_NPbcYAbBhmUob540rIs8FUnkmkZDQ0go,111
+tzdata/zoneinfo/EST5EDT,sha256=RAPR1jPCcVa5nvibF24lGKApc2bRw3Y87RbesyI3BP4,951
+tzdata/zoneinfo/Egypt,sha256=icuaNiEvuC6TPc2fqhDv36lpop7IDDIGO7tFGMAz0b4,1309
+tzdata/zoneinfo/Eire,sha256=EcADNuAvExj-dkqylGfF8q_vv_-mRPqN0k9bCDtJW3E,1496
+tzdata/zoneinfo/Etc/GMT,sha256=3EoHVxsQiE5PTzRQydGhy_TAPvU9Bu0uTqFS2eul1dc,111
+tzdata/zoneinfo/Etc/GMT+0,sha256=3EoHVxsQiE5PTzRQydGhy_TAPvU9Bu0uTqFS2eul1dc,111
+tzdata/zoneinfo/Etc/GMT+1,sha256=5L9o8TEUgtB11poIag85vRdq08LMDZmZ6DPn7UqPL_g,113
+tzdata/zoneinfo/Etc/GMT+10,sha256=IvBxiqQU76qzNbuxRo8Ah9rPQSRGQGKp_SRs5u1PPkM,114
+tzdata/zoneinfo/Etc/GMT+11,sha256=9MfFpFp_rt9PksMjQ23VOlir3hzTlnLz_5V2tfonhbU,114
+tzdata/zoneinfo/Etc/GMT+12,sha256=l26XCFp9IbgXGvMw7NHgHzIZbHry2B5qGYfhMDHFVrw,114
+tzdata/zoneinfo/Etc/GMT+2,sha256=YbbqH7B6jNoQEIjyV4-8a2cXD9lGC3vQKnEkY2ucDGI,113
+tzdata/zoneinfo/Etc/GMT+3,sha256=q3D9DLfmTBUAo4YMnNUNUUKrAkKSwM5Q-vesd9A6SZQ,113
+tzdata/zoneinfo/Etc/GMT+4,sha256=UghKME3laXSDZ7q74YDb4FcLnzNqXQydcZpQHvssP2k,113
+tzdata/zoneinfo/Etc/GMT+5,sha256=TZ5qaoELlszW_Z5FdqAEMKk8Y_xu5XhZBNZUco55SrM,113
+tzdata/zoneinfo/Etc/GMT+6,sha256=_2k3LZ5x8hVjMwwmCx6GqUwW-v1IvOkBrJjYH5bD6Qw,113
+tzdata/zoneinfo/Etc/GMT+7,sha256=Di8J430WGr98Ww95tdfIo8hGxkVQfJvlx55ansDuoeQ,113
+tzdata/zoneinfo/Etc/GMT+8,sha256=OIIlUFhZwL2ctx3fxINbY2HDDAmSQ7i2ZAUgX7Exjgw,113
+tzdata/zoneinfo/Etc/GMT+9,sha256=1vpkIoPqBiwDWzH-fLFxwNbmdKRY7mqdiJhYQImVxaw,113
+tzdata/zoneinfo/Etc/GMT-0,sha256=3EoHVxsQiE5PTzRQydGhy_TAPvU9Bu0uTqFS2eul1dc,111
+tzdata/zoneinfo/Etc/GMT-1,sha256=S81S9Z0-V-0B5U-0S0Pnbx8fv2iHtwE1LrlZk-ckLto,114
+tzdata/zoneinfo/Etc/GMT-10,sha256=VvdG5IpXB_xJX4omzfrrHblkRUzkbCZXPhTrLngc7vk,115
+tzdata/zoneinfo/Etc/GMT-11,sha256=2sYLfVuDFSy7Kc1WOPiY1EqquHw5Xx4HbDA1QOL1hc4,115
+tzdata/zoneinfo/Etc/GMT-12,sha256=ifHVhk5fczZG3GDy_Nv7YsLNaxf8stB4MrzgWUCINlU,115
+tzdata/zoneinfo/Etc/GMT-13,sha256=CMkORdXsaSyL-4N0n37Cyc1lCr22ZsWyug9_QZVe0E0,115
+tzdata/zoneinfo/Etc/GMT-14,sha256=NK07ElwueU0OP8gORtcXUUug_3v4d04uxfVHMUnLM9U,115
+tzdata/zoneinfo/Etc/GMT-2,sha256=QMToMLcif1S4SNPOMxMtBLqc1skUYnIhbUAjKEdAf9w,114
+tzdata/zoneinfo/Etc/GMT-3,sha256=10GMvfulaJwDQiHiWEJiU_YURyjDfPcl5ugnYBugN3E,114
+tzdata/zoneinfo/Etc/GMT-4,sha256=c6Kx3v41GRkrvky8k71db_UJbpyyp2OZCsjDSvjkr6s,114
+tzdata/zoneinfo/Etc/GMT-5,sha256=94TvO8e_8t52bs8ry70nAquvgK8qJKQTI7lQnVCHX-U,114
+tzdata/zoneinfo/Etc/GMT-6,sha256=3fH8eX--0iDijmYAQHQ0IUXheezaj6-aadZsQNAB4fE,114
+tzdata/zoneinfo/Etc/GMT-7,sha256=DnsTJ3NUYYGLUwFb_L15U_GbaMF-acLVsPyTNySyH-M,114
+tzdata/zoneinfo/Etc/GMT-8,sha256=kvGQUwONDBG7nhEp_wESc4xl4xNXiXEivxAv09nkr_g,114
+tzdata/zoneinfo/Etc/GMT-9,sha256=U1WRFGWQAW91JXK99gY1K9d0rFZYDWHzDUR3z71Lh6Y,114
+tzdata/zoneinfo/Etc/GMT0,sha256=3EoHVxsQiE5PTzRQydGhy_TAPvU9Bu0uTqFS2eul1dc,111
+tzdata/zoneinfo/Etc/Greenwich,sha256=3EoHVxsQiE5PTzRQydGhy_TAPvU9Bu0uTqFS2eul1dc,111
+tzdata/zoneinfo/Etc/UCT,sha256=_dzh5kihcyrCmv2aFhUbKXPN8ILn7AxpD35CvmtZi5M,111
+tzdata/zoneinfo/Etc/UTC,sha256=_dzh5kihcyrCmv2aFhUbKXPN8ILn7AxpD35CvmtZi5M,111
+tzdata/zoneinfo/Etc/Universal,sha256=_dzh5kihcyrCmv2aFhUbKXPN8ILn7AxpD35CvmtZi5M,111
+tzdata/zoneinfo/Etc/Zulu,sha256=_dzh5kihcyrCmv2aFhUbKXPN8ILn7AxpD35CvmtZi5M,111
+tzdata/zoneinfo/Etc/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+tzdata/zoneinfo/Etc/__pycache__/__init__.cpython-310.pyc,,
+tzdata/zoneinfo/Europe/Amsterdam,sha256=sQ-VQqhQnwpj68p449gEMt2GuOopZAAoD-vZz6dugog,1103
+tzdata/zoneinfo/Europe/Andorra,sha256=leuTyE4uduIBX0aHb_7PK_KlslpWSyS6e0SS84hKFrE,389
+tzdata/zoneinfo/Europe/Astrakhan,sha256=P3E5UDgQ4gqsMi-KdMAWwOSStogdcNl9rLMVUdpFLXI,726
+tzdata/zoneinfo/Europe/Athens,sha256=8f1niwVI4ymziTT2KBJV5pjfp2GtH_hB9sy3lgbGE0U,682
+tzdata/zoneinfo/Europe/Belfast,sha256=Z2VB8LitRXx0TAk_gHWJrcrZCeP9A_kBeH0IeG7tvTM,1599
+tzdata/zoneinfo/Europe/Belgrade,sha256=qMlk8-qnognZplD7FsaMAD6aX8Yv-7sQ-oSdVPs2YtY,478
+tzdata/zoneinfo/Europe/Berlin,sha256=p_2ZMteF1NaQkAuDTDVjwYEMHPLgFxG8wJJq9sB2fLc,705
+tzdata/zoneinfo/Europe/Bratislava,sha256=pukw4zdc3LUffYp0iFr_if0UuGHrt1yzOdD5HBbBRpo,723
+tzdata/zoneinfo/Europe/Brussels,sha256=sQ-VQqhQnwpj68p449gEMt2GuOopZAAoD-vZz6dugog,1103
+tzdata/zoneinfo/Europe/Bucharest,sha256=iY74H96aaTMJvmqAhzUoSI8SjZUtPvv4PGF4ClwFm6U,661
+tzdata/zoneinfo/Europe/Budapest,sha256=qNr-valoDI1mevuQXqOMkOhIcT194EczOKIijxrDMV8,766
+tzdata/zoneinfo/Europe/Busingen,sha256=GZBiscMM_rI3XshMVt9SvlGJGYamKTt6Ek06YlCfRek,497
+tzdata/zoneinfo/Europe/Chisinau,sha256=5TPhkCtxxa0ByLCv7YxOrc5Vtdui2v2VX8vrSopPkPs,755
+tzdata/zoneinfo/Europe/Copenhagen,sha256=p_2ZMteF1NaQkAuDTDVjwYEMHPLgFxG8wJJq9sB2fLc,705
+tzdata/zoneinfo/Europe/Dublin,sha256=EcADNuAvExj-dkqylGfF8q_vv_-mRPqN0k9bCDtJW3E,1496
+tzdata/zoneinfo/Europe/Gibraltar,sha256=t1hglDTLUIFqs91nY5lulN7oxkoAXHnh0zjyaKG2bG8,1220
+tzdata/zoneinfo/Europe/Guernsey,sha256=Z2VB8LitRXx0TAk_gHWJrcrZCeP9A_kBeH0IeG7tvTM,1599
+tzdata/zoneinfo/Europe/Helsinki,sha256=ccpK9ZmPCZkMXoddNQ_DyONPKAuub-FPNtRpL6znpWM,481
+tzdata/zoneinfo/Europe/Isle_of_Man,sha256=Z2VB8LitRXx0TAk_gHWJrcrZCeP9A_kBeH0IeG7tvTM,1599
+tzdata/zoneinfo/Europe/Istanbul,sha256=KnFjsWuUgG9pmRNI59CmDEbrYbHwMF9fS4P2E9sQgG8,1200
+tzdata/zoneinfo/Europe/Jersey,sha256=Z2VB8LitRXx0TAk_gHWJrcrZCeP9A_kBeH0IeG7tvTM,1599
+tzdata/zoneinfo/Europe/Kaliningrad,sha256=57ov9G8m25w1pPdJF8zoFWzq5I6UoBMVsk2eHPelbA8,904
+tzdata/zoneinfo/Europe/Kiev,sha256=BYnoDd7Ov50wd4mMEpddK-c5PfKFbumSbFNHY-Hia_I,558
+tzdata/zoneinfo/Europe/Kirov,sha256=KqXGcIbMGTuOoKZYBG-5bj7kVzFbKyGMA99PA0414D0,735
+tzdata/zoneinfo/Europe/Kyiv,sha256=BYnoDd7Ov50wd4mMEpddK-c5PfKFbumSbFNHY-Hia_I,558
+tzdata/zoneinfo/Europe/Lisbon,sha256=Nr-w4MM_s8Zhwdu1D4cNOQiTZMwZibYswSH1nB1GUKg,1454
+tzdata/zoneinfo/Europe/Ljubljana,sha256=qMlk8-qnognZplD7FsaMAD6aX8Yv-7sQ-oSdVPs2YtY,478
+tzdata/zoneinfo/Europe/London,sha256=Z2VB8LitRXx0TAk_gHWJrcrZCeP9A_kBeH0IeG7tvTM,1599
+tzdata/zoneinfo/Europe/Luxembourg,sha256=sQ-VQqhQnwpj68p449gEMt2GuOopZAAoD-vZz6dugog,1103
+tzdata/zoneinfo/Europe/Madrid,sha256=ylsyHdv8iOB-DQPtL6DIMs5dDdjn2QolIAqOJImMOyE,897
+tzdata/zoneinfo/Europe/Malta,sha256=irX_nDD-BXYObaduu_vhPe1F31xmgL364dSOaT_OVco,928
+tzdata/zoneinfo/Europe/Mariehamn,sha256=ccpK9ZmPCZkMXoddNQ_DyONPKAuub-FPNtRpL6znpWM,481
+tzdata/zoneinfo/Europe/Minsk,sha256=86iP_xDtidkUCqjkoKhH5_El3VI21fSgoIiXl_BzUaU,808
+tzdata/zoneinfo/Europe/Monaco,sha256=zViOd5xXN9cOTkcVja-reUWwJrK7NEVMxHdBgVRZsGg,1105
+tzdata/zoneinfo/Europe/Moscow,sha256=7S4KCZ-0RrJBZoNDjT9W-fxaYqFsdUmn9Zy8k1s2TIo,908
+tzdata/zoneinfo/Europe/Nicosia,sha256=TYYqWp8sK0AwBUHAp0wuuihZuQ19RXdt28bth33zOBI,597
+tzdata/zoneinfo/Europe/Oslo,sha256=p_2ZMteF1NaQkAuDTDVjwYEMHPLgFxG8wJJq9sB2fLc,705
+tzdata/zoneinfo/Europe/Paris,sha256=zViOd5xXN9cOTkcVja-reUWwJrK7NEVMxHdBgVRZsGg,1105
+tzdata/zoneinfo/Europe/Podgorica,sha256=qMlk8-qnognZplD7FsaMAD6aX8Yv-7sQ-oSdVPs2YtY,478
+tzdata/zoneinfo/Europe/Prague,sha256=pukw4zdc3LUffYp0iFr_if0UuGHrt1yzOdD5HBbBRpo,723
+tzdata/zoneinfo/Europe/Riga,sha256=PU8amev-8XVvl4B_JUOOOM1ofSMbotp-3MPGPHpPoTw,694
+tzdata/zoneinfo/Europe/Rome,sha256=hr0moG_jBXs2zyndejOPJSSv-BFu8I0AWqIRTqYSKGk,947
+tzdata/zoneinfo/Europe/Samara,sha256=Vc60AJe-0-b8prNiFwZTUS1bCbWxxuEnnNcgp8YkQRY,732
+tzdata/zoneinfo/Europe/San_Marino,sha256=hr0moG_jBXs2zyndejOPJSSv-BFu8I0AWqIRTqYSKGk,947
+tzdata/zoneinfo/Europe/Sarajevo,sha256=qMlk8-qnognZplD7FsaMAD6aX8Yv-7sQ-oSdVPs2YtY,478
+tzdata/zoneinfo/Europe/Saratov,sha256=0fN3eVFVewG-DSVk9xJABDQB1S_Nyn37bHOjj5X8Bm0,726
+tzdata/zoneinfo/Europe/Simferopol,sha256=y2Nybf9LGVNqNdW_GPS-NIDRLriyH_pyxKpT0zmATK4,865
+tzdata/zoneinfo/Europe/Skopje,sha256=qMlk8-qnognZplD7FsaMAD6aX8Yv-7sQ-oSdVPs2YtY,478
+tzdata/zoneinfo/Europe/Sofia,sha256=LQjC-OJkL4TzZcqD-JUofDAg1-qJui_2Ri6Eoii2MuQ,592
+tzdata/zoneinfo/Europe/Stockholm,sha256=p_2ZMteF1NaQkAuDTDVjwYEMHPLgFxG8wJJq9sB2fLc,705
+tzdata/zoneinfo/Europe/Tallinn,sha256=R6yRfPqESOYQWftlncDWo_fQak61eeiEQKwg_C-C7W8,675
+tzdata/zoneinfo/Europe/Tirane,sha256=I-alATWRd8mfSgvnr3dN_F9vbTB66alvz2GQo0LUbPc,604
+tzdata/zoneinfo/Europe/Tiraspol,sha256=5TPhkCtxxa0ByLCv7YxOrc5Vtdui2v2VX8vrSopPkPs,755
+tzdata/zoneinfo/Europe/Ulyanovsk,sha256=2vK0XahtB_dKjDDXccjMjbQ2bAOfKDe66uMDqtjzHm4,760
+tzdata/zoneinfo/Europe/Uzhgorod,sha256=BYnoDd7Ov50wd4mMEpddK-c5PfKFbumSbFNHY-Hia_I,558
+tzdata/zoneinfo/Europe/Vaduz,sha256=GZBiscMM_rI3XshMVt9SvlGJGYamKTt6Ek06YlCfRek,497
+tzdata/zoneinfo/Europe/Vatican,sha256=hr0moG_jBXs2zyndejOPJSSv-BFu8I0AWqIRTqYSKGk,947
+tzdata/zoneinfo/Europe/Vienna,sha256=q8_UF23-KHqc2ay4ju0qT1TuBSpRTnlB7i6vElk4eJw,658
+tzdata/zoneinfo/Europe/Vilnius,sha256=hXvv1PaQndapT7hdywPO3738Y3ZqbW_hJx87khyaOPM,676
+tzdata/zoneinfo/Europe/Volgograd,sha256=v3P6iFJ-rThJprVNDxB7ZYDrimtsW7IvQi_gJpZiJOQ,753
+tzdata/zoneinfo/Europe/Warsaw,sha256=6I9aUfFoFXpBrC3YpO4OmoeUGchMYSK0dxsaKjPZOkw,923
+tzdata/zoneinfo/Europe/Zagreb,sha256=qMlk8-qnognZplD7FsaMAD6aX8Yv-7sQ-oSdVPs2YtY,478
+tzdata/zoneinfo/Europe/Zaporozhye,sha256=BYnoDd7Ov50wd4mMEpddK-c5PfKFbumSbFNHY-Hia_I,558
+tzdata/zoneinfo/Europe/Zurich,sha256=GZBiscMM_rI3XshMVt9SvlGJGYamKTt6Ek06YlCfRek,497
+tzdata/zoneinfo/Europe/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+tzdata/zoneinfo/Europe/__pycache__/__init__.cpython-310.pyc,,
+tzdata/zoneinfo/Factory,sha256=0ytXntCnQnMWvqJgue4mdUUQRr1YxXxnnCTyZxhgr3Y,113
+tzdata/zoneinfo/GB,sha256=Z2VB8LitRXx0TAk_gHWJrcrZCeP9A_kBeH0IeG7tvTM,1599
+tzdata/zoneinfo/GB-Eire,sha256=Z2VB8LitRXx0TAk_gHWJrcrZCeP9A_kBeH0IeG7tvTM,1599
+tzdata/zoneinfo/GMT,sha256=3EoHVxsQiE5PTzRQydGhy_TAPvU9Bu0uTqFS2eul1dc,111
+tzdata/zoneinfo/GMT+0,sha256=3EoHVxsQiE5PTzRQydGhy_TAPvU9Bu0uTqFS2eul1dc,111
+tzdata/zoneinfo/GMT-0,sha256=3EoHVxsQiE5PTzRQydGhy_TAPvU9Bu0uTqFS2eul1dc,111
+tzdata/zoneinfo/GMT0,sha256=3EoHVxsQiE5PTzRQydGhy_TAPvU9Bu0uTqFS2eul1dc,111
+tzdata/zoneinfo/Greenwich,sha256=3EoHVxsQiE5PTzRQydGhy_TAPvU9Bu0uTqFS2eul1dc,111
+tzdata/zoneinfo/HST,sha256=up2TB-9E2uBD6IGaCSOnR96o_DENUVI9ZCE1zQS0SzY,112
+tzdata/zoneinfo/Hongkong,sha256=9AaPcyRtuXQX9zRnRTVkxX1mRs5JCbn6JTaSPvzX608,775
+tzdata/zoneinfo/Iceland,sha256=8-f8qg6YQP9BadNWfY-1kmZEhI9JY9es-SMghDxdSG4,130
+tzdata/zoneinfo/Indian/Antananarivo,sha256=B4OFT1LDOtprbSpdhnZi8K6OFSONL857mtpPTTGetGY,191
+tzdata/zoneinfo/Indian/Chagos,sha256=J_aS7rs0ZG1dPTGeokXxNJpF4Pds8u1ct49cRtX7giY,152
+tzdata/zoneinfo/Indian/Christmas,sha256=zcjiwoLYvJpenDyvL8Rf9OnlzRj13sjLhzNArXxYTWQ,152
+tzdata/zoneinfo/Indian/Cocos,sha256=6J2DXIEdTaRKqLOGeCzogo3whaoO6PJWYamIHS8A6Qw,187
+tzdata/zoneinfo/Indian/Comoro,sha256=B4OFT1LDOtprbSpdhnZi8K6OFSONL857mtpPTTGetGY,191
+tzdata/zoneinfo/Indian/Kerguelen,sha256=lEhfD1j4QnZ-wtuTU51fw6-yvc4WZz2eY8CYjMzWQ44,152
+tzdata/zoneinfo/Indian/Mahe,sha256=DZ6lBT6DGIAypvtNMB1dtoj0MBHltrH5F6EbcaDaexY,133
+tzdata/zoneinfo/Indian/Maldives,sha256=lEhfD1j4QnZ-wtuTU51fw6-yvc4WZz2eY8CYjMzWQ44,152
+tzdata/zoneinfo/Indian/Mauritius,sha256=R6pdJalrHVK5LlGOmEsyD66_-c5a9ptJM-xE71Fo8hQ,179
+tzdata/zoneinfo/Indian/Mayotte,sha256=B4OFT1LDOtprbSpdhnZi8K6OFSONL857mtpPTTGetGY,191
+tzdata/zoneinfo/Indian/Reunion,sha256=DZ6lBT6DGIAypvtNMB1dtoj0MBHltrH5F6EbcaDaexY,133
+tzdata/zoneinfo/Indian/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+tzdata/zoneinfo/Indian/__pycache__/__init__.cpython-310.pyc,,
+tzdata/zoneinfo/Iran,sha256=ozLlhNXzpJCZx7bc-VpcmNdgdtn6lPtF6f9qkaDEycI,812
+tzdata/zoneinfo/Israel,sha256=n83o1YTeoFhfXIcnqvNfSKFJ4NvTqDv2zvi8qcFAIeM,1074
+tzdata/zoneinfo/Jamaica,sha256=pDexcAMzrv9TqLWGjVOHwIDcFMLT6Vqlzjb5AbNmkoQ,339
+tzdata/zoneinfo/Japan,sha256=WaOHFDDw07k-YZ-jCkOkHR6IvdSf8m8J0PQFpQBwb5Y,213
+tzdata/zoneinfo/Kwajalein,sha256=S-ZFi6idKzDaelLy7DRjGPeD0s7oVud3xLMxZKNlBk8,219
+tzdata/zoneinfo/Libya,sha256=zzMBLZZh4VQ4_ARe5k4L_rsuqKP7edKvVt8F6kvj5FM,431
+tzdata/zoneinfo/MET,sha256=EgkGCb0euba8FQGgUqAYFx4mRuKeRD6W5GIAyV6yDJ0,621
+tzdata/zoneinfo/MST,sha256=84AZayGFK2nfpYS0-u16q9QWrYYkCwUJcNdOnG7Ai1s,111
+tzdata/zoneinfo/MST7MDT,sha256=yt9ENOc1sfICs1yxJjiii6FhCQkEsEuw67zvs-EeBb4,951
+tzdata/zoneinfo/Mexico/BajaNorte,sha256=x2_eWDUWxIi5gKTGmM_d5V1HFt1-JN-j8dIpqj5Dn7M,1025
+tzdata/zoneinfo/Mexico/BajaSur,sha256=C5CBj73KgB8vbDbDEgqMHfPeMeglQj156WNbwYSxux8,718
+tzdata/zoneinfo/Mexico/General,sha256=vhDy1hSceJyFa3bIqn2qRi1kgxtvrCCaaB7s65mljtY,773
+tzdata/zoneinfo/Mexico/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+tzdata/zoneinfo/Mexico/__pycache__/__init__.cpython-310.pyc,,
+tzdata/zoneinfo/NZ,sha256=Dgbn5VrtvJLvWz0Qbnw5KrFijP2KQosg6S6ZAooL-7k,1043
+tzdata/zoneinfo/NZ-CHAT,sha256=pnhY_Lb8V4eo6cK3yL6JZL086SI_etG6rCycppJfTHg,808
+tzdata/zoneinfo/Navajo,sha256=m7cDkg7KS2EZ6BoQVYOk9soiBlHxO0GEeat81WxBPz4,1042
+tzdata/zoneinfo/PRC,sha256=v4t-2C_m5j5tmPjOqTTurJAc0Wq6hetXVc4_i0KJ6oo,393
+tzdata/zoneinfo/PST8PDT,sha256=8w8p5P18af0k8f2C3amKrvi4tSK83QUhUCV6QmyeTa8,951
+tzdata/zoneinfo/Pacific/Apia,sha256=3HDEfICrLIehq3VLq4_r_DhQgFniSd_lXnOjdZgI6hQ,407
+tzdata/zoneinfo/Pacific/Auckland,sha256=Dgbn5VrtvJLvWz0Qbnw5KrFijP2KQosg6S6ZAooL-7k,1043
+tzdata/zoneinfo/Pacific/Bougainville,sha256=rqdn1Y4HSarx-vjPk00lsHNfhj3IQgKCViAsumuN_IY,201
+tzdata/zoneinfo/Pacific/Chatham,sha256=pnhY_Lb8V4eo6cK3yL6JZL086SI_etG6rCycppJfTHg,808
+tzdata/zoneinfo/Pacific/Chuuk,sha256=aDABBVtu-dydiHNODt3ReC8cNkO3wTp16c-OkFIAbhk,154
+tzdata/zoneinfo/Pacific/Easter,sha256=EwVM74XjsboPVxK9bWmdd4nTrtvasP1zlLdxrMB_YaE,1174
+tzdata/zoneinfo/Pacific/Efate,sha256=LiX_rTfipQh_Vnqb_m7OGxyBtyAUC9UANVKHUpLoCcU,342
+tzdata/zoneinfo/Pacific/Enderbury,sha256=ojOG-oqi25HOnY6BFhav_3bmWg1LDILT4v-kxOFVuqI,172
+tzdata/zoneinfo/Pacific/Fakaofo,sha256=Uf8zeML2X8doPg8CX-p0mMGP-IOj7aHAMe7ULD5khxA,153
+tzdata/zoneinfo/Pacific/Fiji,sha256=umCNhtTuBziTXne-WAxzvYvGKqZxTYOTwK-tJhYh4MQ,396
+tzdata/zoneinfo/Pacific/Funafuti,sha256=CQNWIL2DFpej6Qcvgt40z8pekS1QyNpUdzmqLyj7bY4,134
+tzdata/zoneinfo/Pacific/Galapagos,sha256=Z1KJPZSvO8M_Pay9WLcNAxzjo8imPrQ7FnXNOXfZl8c,175
+tzdata/zoneinfo/Pacific/Gambier,sha256=yIh86hjpDk1wRWTVJROOGqn9tkc7e9_O6zNxqs-wBoM,132
+tzdata/zoneinfo/Pacific/Guadalcanal,sha256=Ui8PN0th4sb1-n0Z8ceszNCeSiE0Yu47QskNMr8r8Yw,134
+tzdata/zoneinfo/Pacific/Guam,sha256=i57eM6syriUFvAbrVALnziCw_I4lENyzBcJdOaH71yU,350
+tzdata/zoneinfo/Pacific/Honolulu,sha256=HapXKaoeDzLNRL4RLQGtTMVnqf522H3LuRgr6NLIj_A,221
+tzdata/zoneinfo/Pacific/Johnston,sha256=HapXKaoeDzLNRL4RLQGtTMVnqf522H3LuRgr6NLIj_A,221
+tzdata/zoneinfo/Pacific/Kanton,sha256=ojOG-oqi25HOnY6BFhav_3bmWg1LDILT4v-kxOFVuqI,172
+tzdata/zoneinfo/Pacific/Kiritimati,sha256=cUVGmMRBgllfuYJ3X0B0zg0Bf-LPo9l7Le5ju882dx4,174
+tzdata/zoneinfo/Pacific/Kosrae,sha256=pQMLJXilygPhlkm0jCo5JuVmpmYJgLIdiTVxeP59ZEg,242
+tzdata/zoneinfo/Pacific/Kwajalein,sha256=S-ZFi6idKzDaelLy7DRjGPeD0s7oVud3xLMxZKNlBk8,219
+tzdata/zoneinfo/Pacific/Majuro,sha256=CQNWIL2DFpej6Qcvgt40z8pekS1QyNpUdzmqLyj7bY4,134
+tzdata/zoneinfo/Pacific/Marquesas,sha256=ilprkRvn-N1XjptSI_0ZwUjeuokP-5l64uKjRBp0kxw,139
+tzdata/zoneinfo/Pacific/Midway,sha256=ZQ2Rh1E2ZZBVMGPNaBWS_cqKCZV-DOLBjWaX7Dhe95Y,146
+tzdata/zoneinfo/Pacific/Nauru,sha256=wahZONjreNAmYwhQ2CWdKMAE3SVm4S2aYvMZqcAlSYc,183
+tzdata/zoneinfo/Pacific/Niue,sha256=8WWebtgCnrMBKjuLNEYEWlktNI2op2kkKgk0Vcz8GaM,154
+tzdata/zoneinfo/Pacific/Norfolk,sha256=vL8G6W5CScYqp76g0b15UPIYHw2Lt60qOktHUF7caDs,237
+tzdata/zoneinfo/Pacific/Noumea,sha256=ezUyn7AYWBblrZbStlItJYu7XINCLiihrCBZB-Bl-Qw,198
+tzdata/zoneinfo/Pacific/Pago_Pago,sha256=ZQ2Rh1E2ZZBVMGPNaBWS_cqKCZV-DOLBjWaX7Dhe95Y,146
+tzdata/zoneinfo/Pacific/Palau,sha256=VkLRsKUUVXo3zrhAXn9iM-pKySbGIVfzWoopDhmceMA,148
+tzdata/zoneinfo/Pacific/Pitcairn,sha256=AJh6olJxXQzCMWKOE5ye4jHfgg1VA-9-gCZ5MbrX_8E,153
+tzdata/zoneinfo/Pacific/Pohnpei,sha256=Ui8PN0th4sb1-n0Z8ceszNCeSiE0Yu47QskNMr8r8Yw,134
+tzdata/zoneinfo/Pacific/Ponape,sha256=Ui8PN0th4sb1-n0Z8ceszNCeSiE0Yu47QskNMr8r8Yw,134
+tzdata/zoneinfo/Pacific/Port_Moresby,sha256=aDABBVtu-dydiHNODt3ReC8cNkO3wTp16c-OkFIAbhk,154
+tzdata/zoneinfo/Pacific/Rarotonga,sha256=J6a2mOrTp4bsZNovj3HjJK9AVJ89PhdEpQMMVD__i18,406
+tzdata/zoneinfo/Pacific/Saipan,sha256=i57eM6syriUFvAbrVALnziCw_I4lENyzBcJdOaH71yU,350
+tzdata/zoneinfo/Pacific/Samoa,sha256=ZQ2Rh1E2ZZBVMGPNaBWS_cqKCZV-DOLBjWaX7Dhe95Y,146
+tzdata/zoneinfo/Pacific/Tahiti,sha256=Ivcs04hthxEQj1I_6aACc70By0lmxlvhgGFYh843e14,133
+tzdata/zoneinfo/Pacific/Tarawa,sha256=CQNWIL2DFpej6Qcvgt40z8pekS1QyNpUdzmqLyj7bY4,134
+tzdata/zoneinfo/Pacific/Tongatapu,sha256=mjGjNSUATfw0yLGB0zsLxz3_L1uWxPANML8K4HQQIMY,237
+tzdata/zoneinfo/Pacific/Truk,sha256=aDABBVtu-dydiHNODt3ReC8cNkO3wTp16c-OkFIAbhk,154
+tzdata/zoneinfo/Pacific/Wake,sha256=CQNWIL2DFpej6Qcvgt40z8pekS1QyNpUdzmqLyj7bY4,134
+tzdata/zoneinfo/Pacific/Wallis,sha256=CQNWIL2DFpej6Qcvgt40z8pekS1QyNpUdzmqLyj7bY4,134
+tzdata/zoneinfo/Pacific/Yap,sha256=aDABBVtu-dydiHNODt3ReC8cNkO3wTp16c-OkFIAbhk,154
+tzdata/zoneinfo/Pacific/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+tzdata/zoneinfo/Pacific/__pycache__/__init__.cpython-310.pyc,,
+tzdata/zoneinfo/Poland,sha256=6I9aUfFoFXpBrC3YpO4OmoeUGchMYSK0dxsaKjPZOkw,923
+tzdata/zoneinfo/Portugal,sha256=Nr-w4MM_s8Zhwdu1D4cNOQiTZMwZibYswSH1nB1GUKg,1454
+tzdata/zoneinfo/ROC,sha256=oEwscvT3aoMXjQNt2X0VfuHzLkeORN2npcEJI2h-5s8,511
+tzdata/zoneinfo/ROK,sha256=ZKcLb7zJtl52Lb0l64m29AwTcUbtyNvU0IHq-s2reN4,415
+tzdata/zoneinfo/Singapore,sha256=CVSy2aMB2U9DSAJGBqcbvLL6JNPNNwn1vIvKYFA5eF0,256
+tzdata/zoneinfo/Turkey,sha256=KnFjsWuUgG9pmRNI59CmDEbrYbHwMF9fS4P2E9sQgG8,1200
+tzdata/zoneinfo/UCT,sha256=_dzh5kihcyrCmv2aFhUbKXPN8ILn7AxpD35CvmtZi5M,111
+tzdata/zoneinfo/US/Alaska,sha256=d8oMIpYvBpmLzl5I2By4ZaFEZsg_9dxgfqpIM0QFi_Y,977
+tzdata/zoneinfo/US/Aleutian,sha256=q_sZgOINX4TsX9iBx1gNd6XGwBnzCjg6qpdAQhK0ieA,969
+tzdata/zoneinfo/US/Arizona,sha256=rhFFPCHQiYTedfLv7ATckxeKe04jxeUvIJi4vUXMtUc,240
+tzdata/zoneinfo/US/Central,sha256=wntzn_RqffBZThINcltDkhfhHkTqmlDNxJEwODtUguc,1754
+tzdata/zoneinfo/US/East-Indiana,sha256=5nj0KhPvvXvg8mqc5T4EscKKWC6rBWEcsBwWg2Qy8Hs,531
+tzdata/zoneinfo/US/Eastern,sha256=1_IgazpFmJ_JrWPVWJIlMvpzUigNX4cXa_HbecsdH6k,1744
+tzdata/zoneinfo/US/Hawaii,sha256=HapXKaoeDzLNRL4RLQGtTMVnqf522H3LuRgr6NLIj_A,221
+tzdata/zoneinfo/US/Indiana-Starke,sha256=KJCzXct8CTMItVLYLYeBqM6aT6b53gWCg6aDbsH58oI,1016
+tzdata/zoneinfo/US/Michigan,sha256=I4F8Mt9nx38AF6D-steYskBa_HHO6jKU1-W0yRFr50A,899
+tzdata/zoneinfo/US/Mountain,sha256=m7cDkg7KS2EZ6BoQVYOk9soiBlHxO0GEeat81WxBPz4,1042
+tzdata/zoneinfo/US/Pacific,sha256=IA0FdU9tg6Nxz0CNcIUSV5dlezsL6-uh5QjP_oaj5cg,1294
+tzdata/zoneinfo/US/Samoa,sha256=ZQ2Rh1E2ZZBVMGPNaBWS_cqKCZV-DOLBjWaX7Dhe95Y,146
+tzdata/zoneinfo/US/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+tzdata/zoneinfo/US/__pycache__/__init__.cpython-310.pyc,,
+tzdata/zoneinfo/UTC,sha256=_dzh5kihcyrCmv2aFhUbKXPN8ILn7AxpD35CvmtZi5M,111
+tzdata/zoneinfo/Universal,sha256=_dzh5kihcyrCmv2aFhUbKXPN8ILn7AxpD35CvmtZi5M,111
+tzdata/zoneinfo/W-SU,sha256=7S4KCZ-0RrJBZoNDjT9W-fxaYqFsdUmn9Zy8k1s2TIo,908
+tzdata/zoneinfo/WET,sha256=pAiBtwIi4Sqi79_Ppm2V4VMiMrJKOUvMdCZTJeAizAc,494
+tzdata/zoneinfo/Zulu,sha256=_dzh5kihcyrCmv2aFhUbKXPN8ILn7AxpD35CvmtZi5M,111
+tzdata/zoneinfo/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+tzdata/zoneinfo/__pycache__/__init__.cpython-310.pyc,,
+tzdata/zoneinfo/iso3166.tab,sha256=oBpdFY8x1GrY5vjMKgbGQYEGgqk5fUYDIPaNVCG2XnE,4791
+tzdata/zoneinfo/leapseconds,sha256=fjC39Eu3wB6I4g7x_VL7HzvDVbiKbLUjfQAEgo7442I,3257
+tzdata/zoneinfo/tzdata.zi,sha256=q6xnElaYdX6HiShmu3FxIV9CeWaQHmnF9XLjV2-bYv0,109388
+tzdata/zoneinfo/zone.tab,sha256=qSLfeCWE3tsCDIIQbr71DMkmCUXTIUEgNZgfN-60d-Y,18846
+tzdata/zoneinfo/zone1970.tab,sha256=FJErvL9wggoFluO2WceYn8ZQ-nA9A073Lub1x2Pzg40,17582
+tzdata/zoneinfo/zonenow.tab,sha256=YoPd7huhHsKlJliOO-eMIBE5-bHBKpbfjkSJQFAto6I,8311
+tzdata/zones,sha256=W13GrYuma2VrkfW_VDfQpCt0Ivs2tvvYE4I63b0Z6jM,9084
diff --git a/venv/lib/python3.10/site-packages/tzdata-2024.1.dist-info/WHEEL b/venv/lib/python3.10/site-packages/tzdata-2024.1.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..4724c45738f6ac125bb3a21787855562e6870440
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tzdata-2024.1.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.42.0)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/venv/lib/python3.10/site-packages/tzdata-2024.1.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/tzdata-2024.1.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..0883ff0705bf11267d4d921b9bf48392b3e23889
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tzdata-2024.1.dist-info/top_level.txt
@@ -0,0 +1 @@
+tzdata
diff --git a/venv/lib/python3.10/site-packages/word2number/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/word2number/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..60c24d88b58c31035af23bfc4f5ce12a3acaec85
Binary files /dev/null and b/venv/lib/python3.10/site-packages/word2number/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/word2number/__pycache__/w2n.cpython-310.pyc b/venv/lib/python3.10/site-packages/word2number/__pycache__/w2n.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..df338e6c497c9c74803b04963410c0c1d893d159
Binary files /dev/null and b/venv/lib/python3.10/site-packages/word2number/__pycache__/w2n.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/word2number/w2n.py b/venv/lib/python3.10/site-packages/word2number/w2n.py
new file mode 100644
index 0000000000000000000000000000000000000000..69abcd95e5b987d1dd3c4de235d6619c108971df
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/word2number/w2n.py
@@ -0,0 +1,217 @@
+from __future__ import print_function
+
+
+american_number_system = {
+ 'zero': 0,
+ 'one': 1,
+ 'two': 2,
+ 'three': 3,
+ 'four': 4,
+ 'five': 5,
+ 'six': 6,
+ 'seven': 7,
+ 'eight': 8,
+ 'nine': 9,
+ 'ten': 10,
+ 'eleven': 11,
+ 'twelve': 12,
+ 'thirteen': 13,
+ 'fourteen': 14,
+ 'fifteen': 15,
+ 'sixteen': 16,
+ 'seventeen': 17,
+ 'eighteen': 18,
+ 'nineteen': 19,
+ 'twenty': 20,
+ 'thirty': 30,
+ 'forty': 40,
+ 'fifty': 50,
+ 'sixty': 60,
+ 'seventy': 70,
+ 'eighty': 80,
+ 'ninety': 90,
+ 'hundred': 100,
+ 'thousand': 1000,
+ 'million': 1000000,
+ 'billion': 1000000000,
+ 'point': '.'
+}
+
+decimal_words = ['zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine']
+
+"""
+#TODO
+indian_number_system = {
+ 'zero': 0,
+ 'one': 1,
+ 'two': 2,
+ 'three': 3,
+ 'four': 4,
+ 'five': 5,
+ 'six': 6,
+ 'seven': 7,
+ 'eight': 8,
+ 'nine': 9,
+ 'ten': 10,
+ 'eleven': 11,
+ 'twelve': 12,
+ 'thirteen': 13,
+ 'fourteen': 14,
+ 'fifteen': 15,
+ 'sixteen': 16,
+ 'seventeen': 17,
+ 'eighteen': 18,
+ 'nineteen': 19,
+ 'twenty': 20,
+ 'thirty': 30,
+ 'forty': 40,
+ 'fifty': 50,
+ 'sixty': 60,
+ 'seventy': 70,
+ 'eighty': 80,
+ 'ninety': 90,
+ 'hundred': 100,
+ 'thousand': 1000,
+ 'lac': 100000,
+ 'lakh': 100000,
+ 'crore': 10000000
+}
+"""
+
+
+"""
+function to form numeric multipliers for million, billion, thousand etc.
+
+input: list of strings
+return value: integer
+"""
+
+
+def number_formation(number_words):
+ numbers = []
+ for number_word in number_words:
+ numbers.append(american_number_system[number_word])
+ if len(numbers) == 4:
+ return (numbers[0] * numbers[1]) + numbers[2] + numbers[3]
+ elif len(numbers) == 3:
+ return numbers[0] * numbers[1] + numbers[2]
+ elif len(numbers) == 2:
+ if 100 in numbers:
+ return numbers[0] * numbers[1]
+ else:
+ return numbers[0] + numbers[1]
+ else:
+ return numbers[0]
+
+
+"""
+function to convert post decimal digit words to numerial digits
+input: list of strings
+output: double
+"""
+
+
+def get_decimal_sum(decimal_digit_words):
+ decimal_number_str = []
+ for dec_word in decimal_digit_words:
+ if(dec_word not in decimal_words):
+ return 0
+ else:
+ decimal_number_str.append(american_number_system[dec_word])
+ final_decimal_string = '0.' + ''.join(map(str,decimal_number_str))
+ return float(final_decimal_string)
+
+
+"""
+function to return integer for an input `number_sentence` string
+input: string
+output: int or double or None
+"""
+
+
+def word_to_num(number_sentence):
+ if type(number_sentence) is not str:
+ raise ValueError("Type of input is not string! Please enter a valid number word (eg. \'two million twenty three thousand and forty nine\')")
+
+ number_sentence = number_sentence.replace('-', ' ')
+ number_sentence = number_sentence.lower() # converting input to lowercase
+
+ if(number_sentence.isdigit()): # return the number if user enters a number string
+ return int(number_sentence)
+
+ split_words = number_sentence.strip().split() # strip extra spaces and split sentence into words
+
+ clean_numbers = []
+ clean_decimal_numbers = []
+
+ # removing and, & etc.
+ for word in split_words:
+ if word in american_number_system:
+ clean_numbers.append(word)
+
+ # Error message if the user enters invalid input!
+ if len(clean_numbers) == 0:
+ raise ValueError("No valid number words found! Please enter a valid number word (eg. two million twenty three thousand and forty nine)")
+
+ # Error if user enters million,billion, thousand or decimal point twice
+ if clean_numbers.count('thousand') > 1 or clean_numbers.count('million') > 1 or clean_numbers.count('billion') > 1 or clean_numbers.count('point')> 1:
+ raise ValueError("Redundant number word! Please enter a valid number word (eg. two million twenty three thousand and forty nine)")
+
+ # separate decimal part of number (if exists)
+ if clean_numbers.count('point') == 1:
+ clean_decimal_numbers = clean_numbers[clean_numbers.index('point')+1:]
+ clean_numbers = clean_numbers[:clean_numbers.index('point')]
+
+ billion_index = clean_numbers.index('billion') if 'billion' in clean_numbers else -1
+ million_index = clean_numbers.index('million') if 'million' in clean_numbers else -1
+ thousand_index = clean_numbers.index('thousand') if 'thousand' in clean_numbers else -1
+
+ if (thousand_index > -1 and (thousand_index < million_index or thousand_index < billion_index)) or (million_index>-1 and million_index < billion_index):
+ raise ValueError("Malformed number! Please enter a valid number word (eg. two million twenty three thousand and forty nine)")
+
+ total_sum = 0 # storing the number to be returned
+
+ if len(clean_numbers) > 0:
+ # hack for now, better way TODO
+ if len(clean_numbers) == 1:
+ total_sum += american_number_system[clean_numbers[0]]
+
+ else:
+ if billion_index > -1:
+ billion_multiplier = number_formation(clean_numbers[0:billion_index])
+ total_sum += billion_multiplier * 1000000000
+
+ if million_index > -1:
+ if billion_index > -1:
+ million_multiplier = number_formation(clean_numbers[billion_index+1:million_index])
+ else:
+ million_multiplier = number_formation(clean_numbers[0:million_index])
+ total_sum += million_multiplier * 1000000
+
+ if thousand_index > -1:
+ if million_index > -1:
+ thousand_multiplier = number_formation(clean_numbers[million_index+1:thousand_index])
+ elif billion_index > -1 and million_index == -1:
+ thousand_multiplier = number_formation(clean_numbers[billion_index+1:thousand_index])
+ else:
+ thousand_multiplier = number_formation(clean_numbers[0:thousand_index])
+ total_sum += thousand_multiplier * 1000
+
+ if thousand_index > -1 and thousand_index != len(clean_numbers)-1:
+ hundreds = number_formation(clean_numbers[thousand_index+1:])
+ elif million_index > -1 and million_index != len(clean_numbers)-1:
+ hundreds = number_formation(clean_numbers[million_index+1:])
+ elif billion_index > -1 and billion_index != len(clean_numbers)-1:
+ hundreds = number_formation(clean_numbers[billion_index+1:])
+ elif thousand_index == -1 and million_index == -1 and billion_index == -1:
+ hundreds = number_formation(clean_numbers)
+ else:
+ hundreds = 0
+ total_sum += hundreds
+
+ # adding decimal part to total_sum (if exists)
+ if len(clean_decimal_numbers) > 0:
+ decimal_sum = get_decimal_sum(clean_decimal_numbers)
+ total_sum += decimal_sum
+
+ return total_sum
\ No newline at end of file