applied-ai-018 commited on
Commit
1472e57
·
verified ·
1 Parent(s): 2154cf2

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step120/zero/21.attention.query_key_value.weight/exp_avg_sq.pt +3 -0
  2. ckpts/universal/global_step120/zero/22.attention.query_key_value.weight/exp_avg_sq.pt +3 -0
  3. venv/lib/python3.10/site-packages/fsspec-2024.3.1.dist-info/INSTALLER +1 -0
  4. venv/lib/python3.10/site-packages/fsspec-2024.3.1.dist-info/LICENSE +29 -0
  5. venv/lib/python3.10/site-packages/fsspec-2024.3.1.dist-info/METADATA +167 -0
  6. venv/lib/python3.10/site-packages/fsspec-2024.3.1.dist-info/RECORD +104 -0
  7. venv/lib/python3.10/site-packages/fsspec-2024.3.1.dist-info/WHEEL +5 -0
  8. venv/lib/python3.10/site-packages/fsspec-2024.3.1.dist-info/top_level.txt +1 -0
  9. venv/lib/python3.10/site-packages/hjson-3.1.0.dist-info/INSTALLER +1 -0
  10. venv/lib/python3.10/site-packages/hjson-3.1.0.dist-info/LICENSE.txt +81 -0
  11. venv/lib/python3.10/site-packages/hjson-3.1.0.dist-info/METADATA +125 -0
  12. venv/lib/python3.10/site-packages/hjson-3.1.0.dist-info/WHEEL +5 -0
  13. venv/lib/python3.10/site-packages/hjson-3.1.0.dist-info/entry_points.txt +2 -0
  14. venv/lib/python3.10/site-packages/hjson-3.1.0.dist-info/top_level.txt +1 -0
  15. venv/lib/python3.10/site-packages/nvidia_cuda_cupti_cu12-12.1.105.dist-info/INSTALLER +1 -0
  16. venv/lib/python3.10/site-packages/nvidia_cuda_cupti_cu12-12.1.105.dist-info/License.txt +1568 -0
  17. venv/lib/python3.10/site-packages/nvidia_cuda_cupti_cu12-12.1.105.dist-info/METADATA +35 -0
  18. venv/lib/python3.10/site-packages/nvidia_cuda_cupti_cu12-12.1.105.dist-info/RECORD +50 -0
  19. venv/lib/python3.10/site-packages/nvidia_cuda_cupti_cu12-12.1.105.dist-info/WHEEL +5 -0
  20. venv/lib/python3.10/site-packages/nvidia_cuda_cupti_cu12-12.1.105.dist-info/top_level.txt +1 -0
  21. venv/lib/python3.10/site-packages/packaging-24.0.dist-info/INSTALLER +1 -0
  22. venv/lib/python3.10/site-packages/packaging-24.0.dist-info/LICENSE +3 -0
  23. venv/lib/python3.10/site-packages/packaging-24.0.dist-info/LICENSE.APACHE +177 -0
  24. venv/lib/python3.10/site-packages/packaging-24.0.dist-info/LICENSE.BSD +23 -0
  25. venv/lib/python3.10/site-packages/packaging-24.0.dist-info/METADATA +102 -0
  26. venv/lib/python3.10/site-packages/packaging-24.0.dist-info/RECORD +36 -0
  27. venv/lib/python3.10/site-packages/packaging-24.0.dist-info/WHEEL +4 -0
  28. venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/apis.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/config.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/deprecated.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/torch/futures/__init__.py +318 -0
  32. venv/lib/python3.10/site-packages/torch/futures/__pycache__/__init__.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/torch/jit/__init__.py +294 -0
  34. venv/lib/python3.10/site-packages/torch/jit/__pycache__/__init__.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/torch/jit/__pycache__/_logging.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/torch/jit/__pycache__/_monkeytype_config.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/torch/jit/__pycache__/_pickle.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/torch/jit/__pycache__/_recursive.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/torch/jit/_async.py +101 -0
  40. venv/lib/python3.10/site-packages/torch/jit/_await.py +26 -0
  41. venv/lib/python3.10/site-packages/torch/jit/_builtins.py +187 -0
  42. venv/lib/python3.10/site-packages/torch/jit/_check.py +248 -0
  43. venv/lib/python3.10/site-packages/torch/jit/_dataclass_impls.py +189 -0
  44. venv/lib/python3.10/site-packages/torch/jit/_decomposition_utils.py +11 -0
  45. venv/lib/python3.10/site-packages/torch/jit/_decompositions.py +127 -0
  46. venv/lib/python3.10/site-packages/torch/jit/_freeze.py +227 -0
  47. venv/lib/python3.10/site-packages/torch/jit/_fuser.py +160 -0
  48. venv/lib/python3.10/site-packages/torch/jit/_ir_utils.py +25 -0
  49. venv/lib/python3.10/site-packages/torch/jit/_logging.py +10 -0
  50. venv/lib/python3.10/site-packages/torch/jit/_monkeytype_config.py +192 -0
ckpts/universal/global_step120/zero/21.attention.query_key_value.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70e4dcc64eeea313e32c8e1851f202027890fa5ac11678574603256981e1d814
3
+ size 50332843
ckpts/universal/global_step120/zero/22.attention.query_key_value.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d49a710942fc3e1cbd6e72d9796e19901058eb79fca810807b3d34cc3a819410
3
+ size 50332843
venv/lib/python3.10/site-packages/fsspec-2024.3.1.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
venv/lib/python3.10/site-packages/fsspec-2024.3.1.dist-info/LICENSE ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ BSD 3-Clause License
2
+
3
+ Copyright (c) 2018, Martin Durant
4
+ All rights reserved.
5
+
6
+ Redistribution and use in source and binary forms, with or without
7
+ modification, are permitted provided that the following conditions are met:
8
+
9
+ * Redistributions of source code must retain the above copyright notice, this
10
+ list of conditions and the following disclaimer.
11
+
12
+ * Redistributions in binary form must reproduce the above copyright notice,
13
+ this list of conditions and the following disclaimer in the documentation
14
+ and/or other materials provided with the distribution.
15
+
16
+ * Neither the name of the copyright holder nor the names of its
17
+ contributors may be used to endorse or promote products derived from
18
+ this software without specific prior written permission.
19
+
20
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
venv/lib/python3.10/site-packages/fsspec-2024.3.1.dist-info/METADATA ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: fsspec
3
+ Version: 2024.3.1
4
+ Summary: File-system specification
5
+ Home-page: https://github.com/fsspec/filesystem_spec
6
+ Maintainer: Martin Durant
7
+ Maintainer-email: [email protected]
8
+ License: BSD
9
+ Project-URL: Changelog, https://filesystem-spec.readthedocs.io/en/latest/changelog.html
10
+ Project-URL: Documentation, https://filesystem-spec.readthedocs.io/en/latest/
11
+ Keywords: file
12
+ Classifier: Development Status :: 4 - Beta
13
+ Classifier: Intended Audience :: Developers
14
+ Classifier: License :: OSI Approved :: BSD License
15
+ Classifier: Operating System :: OS Independent
16
+ Classifier: Programming Language :: Python :: 3.8
17
+ Classifier: Programming Language :: Python :: 3.9
18
+ Classifier: Programming Language :: Python :: 3.10
19
+ Classifier: Programming Language :: Python :: 3.11
20
+ Requires-Python: >=3.8
21
+ Description-Content-Type: text/markdown
22
+ License-File: LICENSE
23
+ Provides-Extra: abfs
24
+ Requires-Dist: adlfs ; extra == 'abfs'
25
+ Provides-Extra: adl
26
+ Requires-Dist: adlfs ; extra == 'adl'
27
+ Provides-Extra: arrow
28
+ Requires-Dist: pyarrow >=1 ; extra == 'arrow'
29
+ Provides-Extra: dask
30
+ Requires-Dist: dask ; extra == 'dask'
31
+ Requires-Dist: distributed ; extra == 'dask'
32
+ Provides-Extra: devel
33
+ Requires-Dist: pytest ; extra == 'devel'
34
+ Requires-Dist: pytest-cov ; extra == 'devel'
35
+ Provides-Extra: dropbox
36
+ Requires-Dist: dropboxdrivefs ; extra == 'dropbox'
37
+ Requires-Dist: requests ; extra == 'dropbox'
38
+ Requires-Dist: dropbox ; extra == 'dropbox'
39
+ Provides-Extra: entrypoints
40
+ Provides-Extra: full
41
+ Requires-Dist: adlfs ; extra == 'full'
42
+ Requires-Dist: aiohttp !=4.0.0a0,!=4.0.0a1 ; extra == 'full'
43
+ Requires-Dist: dask ; extra == 'full'
44
+ Requires-Dist: distributed ; extra == 'full'
45
+ Requires-Dist: dropbox ; extra == 'full'
46
+ Requires-Dist: dropboxdrivefs ; extra == 'full'
47
+ Requires-Dist: fusepy ; extra == 'full'
48
+ Requires-Dist: gcsfs ; extra == 'full'
49
+ Requires-Dist: libarchive-c ; extra == 'full'
50
+ Requires-Dist: ocifs ; extra == 'full'
51
+ Requires-Dist: panel ; extra == 'full'
52
+ Requires-Dist: paramiko ; extra == 'full'
53
+ Requires-Dist: pyarrow >=1 ; extra == 'full'
54
+ Requires-Dist: pygit2 ; extra == 'full'
55
+ Requires-Dist: requests ; extra == 'full'
56
+ Requires-Dist: s3fs ; extra == 'full'
57
+ Requires-Dist: smbprotocol ; extra == 'full'
58
+ Requires-Dist: tqdm ; extra == 'full'
59
+ Provides-Extra: fuse
60
+ Requires-Dist: fusepy ; extra == 'fuse'
61
+ Provides-Extra: gcs
62
+ Requires-Dist: gcsfs ; extra == 'gcs'
63
+ Provides-Extra: git
64
+ Requires-Dist: pygit2 ; extra == 'git'
65
+ Provides-Extra: github
66
+ Requires-Dist: requests ; extra == 'github'
67
+ Provides-Extra: gs
68
+ Requires-Dist: gcsfs ; extra == 'gs'
69
+ Provides-Extra: gui
70
+ Requires-Dist: panel ; extra == 'gui'
71
+ Provides-Extra: hdfs
72
+ Requires-Dist: pyarrow >=1 ; extra == 'hdfs'
73
+ Provides-Extra: http
74
+ Requires-Dist: aiohttp !=4.0.0a0,!=4.0.0a1 ; extra == 'http'
75
+ Provides-Extra: libarchive
76
+ Requires-Dist: libarchive-c ; extra == 'libarchive'
77
+ Provides-Extra: oci
78
+ Requires-Dist: ocifs ; extra == 'oci'
79
+ Provides-Extra: s3
80
+ Requires-Dist: s3fs ; extra == 's3'
81
+ Provides-Extra: sftp
82
+ Requires-Dist: paramiko ; extra == 'sftp'
83
+ Provides-Extra: smb
84
+ Requires-Dist: smbprotocol ; extra == 'smb'
85
+ Provides-Extra: ssh
86
+ Requires-Dist: paramiko ; extra == 'ssh'
87
+ Provides-Extra: tqdm
88
+ Requires-Dist: tqdm ; extra == 'tqdm'
89
+
90
+ # filesystem_spec
91
+
92
+ [![PyPI version](https://badge.fury.io/py/fsspec.svg)](https://pypi.python.org/pypi/fsspec/)
93
+ [![Anaconda-Server Badge](https://anaconda.org/conda-forge/fsspec/badges/version.svg)](https://anaconda.org/conda-forge/fsspec)
94
+ ![Build](https://github.com/fsspec/filesystem_spec/workflows/CI/badge.svg)
95
+ [![Docs](https://readthedocs.org/projects/filesystem-spec/badge/?version=latest)](https://filesystem-spec.readthedocs.io/en/latest/?badge=latest)
96
+ [![PyPi downloads](https://img.shields.io/pypi/dm/fsspec?label=pypi%20downloads&style=flat)](https://pepy.tech/project/fsspec)
97
+
98
+ A specification for pythonic filesystems.
99
+
100
+ ## Install
101
+
102
+ ```bash
103
+ pip install fsspec
104
+ ```
105
+
106
+ would install the base fsspec. Various optionally supported features might require specification of custom
107
+ extra require, e.g. `pip install fsspec[ssh]` will install dependencies for `ssh` backends support.
108
+ Use `pip install fsspec[full]` for installation of all known extra dependencies.
109
+
110
+ Up-to-date package also provided through conda-forge distribution:
111
+
112
+ ```bash
113
+ conda install -c conda-forge fsspec
114
+ ```
115
+
116
+
117
+ ## Purpose
118
+
119
+ To produce a template or specification for a file-system interface, that specific implementations should follow,
120
+ so that applications making use of them can rely on a common behaviour and not have to worry about the specific
121
+ internal implementation decisions with any given backend. Many such implementations are included in this package,
122
+ or in sister projects such as `s3fs` and `gcsfs`.
123
+
124
+ In addition, if this is well-designed, then additional functionality, such as a key-value store or FUSE
125
+ mounting of the file-system implementation may be available for all implementations "for free".
126
+
127
+ ## Documentation
128
+
129
+ Please refer to [RTD](https://filesystem-spec.readthedocs.io/en/latest/?badge=latest)
130
+
131
+ ## Develop
132
+
133
+ fsspec uses GitHub Actions for CI. Environment files can be found
134
+ in the "ci/" directory. Note that the main environment is called "py38",
135
+ but it is expected that the version of python installed be adjustable at
136
+ CI runtime. For local use, pick a version suitable for you.
137
+
138
+ ### Testing
139
+
140
+ Tests can be run in the dev environment, if activated, via ``pytest fsspec``.
141
+
142
+ The full fsspec suite requires a system-level docker, docker-compose, and fuse
143
+ installation. If only making changes to one backend implementation, it is
144
+ not generally necessary to run all tests locally.
145
+
146
+ It is expected that contributors ensure that any change to fsspec does not
147
+ cause issues or regressions for either other fsspec-related packages such
148
+ as gcsfs and s3fs, nor for downstream users of fsspec. The "downstream" CI
149
+ run and corresponding environment file run a set of tests from the dask
150
+ test suite, and very minimal tests against pandas and zarr from the
151
+ test_downstream.py module in this repo.
152
+
153
+ ### Code Formatting
154
+
155
+ fsspec uses [Black](https://black.readthedocs.io/en/stable) to ensure
156
+ a consistent code format throughout the project.
157
+ Run ``black fsspec`` from the root of the filesystem_spec repository to
158
+ auto-format your code. Additionally, many editors have plugins that will apply
159
+ ``black`` as you edit files. ``black`` is included in the ``tox`` environments.
160
+
161
+ Optionally, you may wish to setup [pre-commit hooks](https://pre-commit.com) to
162
+ automatically run ``black`` when you make a git commit.
163
+ Run ``pre-commit install --install-hooks`` from the root of the
164
+ filesystem_spec repository to setup pre-commit hooks. ``black`` will now be run
165
+ before you commit, reformatting any changed files. You can format without
166
+ committing via ``pre-commit run`` or skip these checks with ``git commit
167
+ --no-verify``.
venv/lib/python3.10/site-packages/fsspec-2024.3.1.dist-info/RECORD ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ fsspec-2024.3.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ fsspec-2024.3.1.dist-info/LICENSE,sha256=LcNUls5TpzB5FcAIqESq1T53K0mzTN0ARFBnaRQH7JQ,1513
3
+ fsspec-2024.3.1.dist-info/METADATA,sha256=Wv4QVGqB4lYfHfgP-Cfby1Nce57WYXXAhH0f6Ju5FUM,6786
4
+ fsspec-2024.3.1.dist-info/RECORD,,
5
+ fsspec-2024.3.1.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
6
+ fsspec-2024.3.1.dist-info/top_level.txt,sha256=blt2pDrQDwN3Gklcw13CSPLQRd6aaOgJ8AxqrW395MI,7
7
+ fsspec/__init__.py,sha256=C8G5rqeNAes0NHYFhdxcw-ZMg4yDipqzSNc4NIR7uoc,2010
8
+ fsspec/__pycache__/__init__.cpython-310.pyc,,
9
+ fsspec/__pycache__/_version.cpython-310.pyc,,
10
+ fsspec/__pycache__/archive.cpython-310.pyc,,
11
+ fsspec/__pycache__/asyn.cpython-310.pyc,,
12
+ fsspec/__pycache__/caching.cpython-310.pyc,,
13
+ fsspec/__pycache__/callbacks.cpython-310.pyc,,
14
+ fsspec/__pycache__/compression.cpython-310.pyc,,
15
+ fsspec/__pycache__/config.cpython-310.pyc,,
16
+ fsspec/__pycache__/conftest.cpython-310.pyc,,
17
+ fsspec/__pycache__/core.cpython-310.pyc,,
18
+ fsspec/__pycache__/dircache.cpython-310.pyc,,
19
+ fsspec/__pycache__/exceptions.cpython-310.pyc,,
20
+ fsspec/__pycache__/fuse.cpython-310.pyc,,
21
+ fsspec/__pycache__/generic.cpython-310.pyc,,
22
+ fsspec/__pycache__/gui.cpython-310.pyc,,
23
+ fsspec/__pycache__/mapping.cpython-310.pyc,,
24
+ fsspec/__pycache__/parquet.cpython-310.pyc,,
25
+ fsspec/__pycache__/registry.cpython-310.pyc,,
26
+ fsspec/__pycache__/spec.cpython-310.pyc,,
27
+ fsspec/__pycache__/transaction.cpython-310.pyc,,
28
+ fsspec/__pycache__/utils.cpython-310.pyc,,
29
+ fsspec/_version.py,sha256=wHq_BWmvVWba6IeFL8lWxmbw1fXQhCR4NJHH8b77Nxs,500
30
+ fsspec/archive.py,sha256=S__DzfZj-urAN3tp2W6jJ6YDiXG1fAl7FjvWUN73qIE,2386
31
+ fsspec/asyn.py,sha256=AOd2SXH2YPCaQL5jA6IegYevdMFkAnGD7Seh9DC2gSE,36404
32
+ fsspec/caching.py,sha256=TrZqKo3drK9Afujg7grZRiLNcmgUr84rnvMcojzURnI,28819
33
+ fsspec/callbacks.py,sha256=BDIwLzK6rr_0V5ch557fSzsivCElpdqhXr5dZ9Te-EE,9210
34
+ fsspec/compression.py,sha256=Yyd8FXw2rwWRtVoRVah_yguv-J7BUcBo4yDu6Qt52a0,4859
35
+ fsspec/config.py,sha256=LF4Zmu1vhJW7Je9Q-cwkRc3xP7Rhyy7Xnwj26Z6sv2g,4279
36
+ fsspec/conftest.py,sha256=fVfx-NLrH_OZS1TIpYNoPzM7efEcMoL62reHOdYeFCA,1245
37
+ fsspec/core.py,sha256=kkwJ7IR3-i1C9SAZ_oGrPpbM5hqpBZ2OLkuHU5a1sYE,22471
38
+ fsspec/dircache.py,sha256=YzogWJrhEastHU7vWz-cJiJ7sdtLXFXhEpInGKd4EcM,2717
39
+ fsspec/exceptions.py,sha256=xcS7LiRrQ748kvOB9mrUR14kpjNztrHgEkZWi9M-VaI,330
40
+ fsspec/fuse.py,sha256=66amOa6wdIbS0DMhhfAPUoOB37HPorfXD1izV0prmTY,10145
41
+ fsspec/generic.py,sha256=jIA7wBwtUzJhTth78PTzWbOBjGom2e4IjmQ_KBSlHPg,13575
42
+ fsspec/gui.py,sha256=XKoXZpUhRE7jOhRCJH4-jRbKhVu56aS8h9tecvPD3nc,13932
43
+ fsspec/implementations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
44
+ fsspec/implementations/__pycache__/__init__.cpython-310.pyc,,
45
+ fsspec/implementations/__pycache__/arrow.cpython-310.pyc,,
46
+ fsspec/implementations/__pycache__/cache_mapper.cpython-310.pyc,,
47
+ fsspec/implementations/__pycache__/cache_metadata.cpython-310.pyc,,
48
+ fsspec/implementations/__pycache__/cached.cpython-310.pyc,,
49
+ fsspec/implementations/__pycache__/dask.cpython-310.pyc,,
50
+ fsspec/implementations/__pycache__/data.cpython-310.pyc,,
51
+ fsspec/implementations/__pycache__/dbfs.cpython-310.pyc,,
52
+ fsspec/implementations/__pycache__/dirfs.cpython-310.pyc,,
53
+ fsspec/implementations/__pycache__/ftp.cpython-310.pyc,,
54
+ fsspec/implementations/__pycache__/git.cpython-310.pyc,,
55
+ fsspec/implementations/__pycache__/github.cpython-310.pyc,,
56
+ fsspec/implementations/__pycache__/http.cpython-310.pyc,,
57
+ fsspec/implementations/__pycache__/jupyter.cpython-310.pyc,,
58
+ fsspec/implementations/__pycache__/libarchive.cpython-310.pyc,,
59
+ fsspec/implementations/__pycache__/local.cpython-310.pyc,,
60
+ fsspec/implementations/__pycache__/memory.cpython-310.pyc,,
61
+ fsspec/implementations/__pycache__/reference.cpython-310.pyc,,
62
+ fsspec/implementations/__pycache__/sftp.cpython-310.pyc,,
63
+ fsspec/implementations/__pycache__/smb.cpython-310.pyc,,
64
+ fsspec/implementations/__pycache__/tar.cpython-310.pyc,,
65
+ fsspec/implementations/__pycache__/webhdfs.cpython-310.pyc,,
66
+ fsspec/implementations/__pycache__/zip.cpython-310.pyc,,
67
+ fsspec/implementations/arrow.py,sha256=_7TLuV6ZzNlpmUU_v6ud56u2wadzsKmY5qugPBxgMEs,8649
68
+ fsspec/implementations/cache_mapper.py,sha256=iHgBA6gjzDJ7_mBboHFzpLTf55HP3UEwUOZ43xyUK4M,2429
69
+ fsspec/implementations/cache_metadata.py,sha256=ZvyA7Y3KK-5Ct4E5pELzD6mH_5T03XqaKVT96qYDADU,8576
70
+ fsspec/implementations/cached.py,sha256=CuxQXQ6f-MRnLvsRzvauEhpmXEgicZZCfViKjIu1kn4,33029
71
+ fsspec/implementations/dask.py,sha256=CXZbJzIVOhKV8ILcxuy3bTvcacCueAbyQxmvAkbPkrk,4466
72
+ fsspec/implementations/data.py,sha256=LDLczxRh8h7x39Zjrd-GgzdQHr78yYxDlrv2C9Uxb5E,1658
73
+ fsspec/implementations/dbfs.py,sha256=cix9OYUveuSOx5UO5uRUwNUkYqjzyY0fkKnca1kTgZ0,15014
74
+ fsspec/implementations/dirfs.py,sha256=inDIRSDPhI1_ud1MMBFrpZQ11VIAMJ_dZQtbE4V08Ng,11384
75
+ fsspec/implementations/ftp.py,sha256=rp6cTog8xqjDPlKdSLKcsyP7K593_ByMabxGbNSEpTo,11655
76
+ fsspec/implementations/git.py,sha256=vKGI-Vd5q4H2RrvhebkPc9NwlfkZ980OUGhebeCw-M0,4034
77
+ fsspec/implementations/github.py,sha256=0kIiKkeAaROuHgdWBHVQFrzJ2ZfoDgymCehL_kJXHYA,7565
78
+ fsspec/implementations/http.py,sha256=yr6t0OhLwZx_pvNQ05detAehcQjRw0Pg6XdwWv81jxk,29601
79
+ fsspec/implementations/jupyter.py,sha256=B2uj7OEm7yIk-vRSsO37_ND0t0EBvn4B-Su43ibN4Pg,3811
80
+ fsspec/implementations/libarchive.py,sha256=5_I2DiLXwQ1JC8x-K7jXu-tBwhO9dj7tFLnb0bTnVMQ,7102
81
+ fsspec/implementations/local.py,sha256=XLsBoG4lf92w5ZddmbHXcStThSYxHgeoJEd2Mp5Uo0Y,14327
82
+ fsspec/implementations/memory.py,sha256=tlaGCekgZ2Z_9n8B6hsSpo6_V89PwjugkOmD445QrqE,9778
83
+ fsspec/implementations/reference.py,sha256=3dPi55riD_cROCafpeoUm2Xbb1vpXpyQijl09f5jTsE,43871
84
+ fsspec/implementations/sftp.py,sha256=fMY9XZcmpjszQ2tCqO_TPaJesaeD_Dv7ptYzgUPGoO0,5631
85
+ fsspec/implementations/smb.py,sha256=_hR5MKwDUCi3u5zEzvnenIWRIGPMhuUryWRYvNSp0WE,10804
86
+ fsspec/implementations/tar.py,sha256=dam78Tp_CozybNqCY2JYgGBS3Uc9FuJUAT9oB0lolOs,4111
87
+ fsspec/implementations/webhdfs.py,sha256=wqVfno7z0TY1HepaIvKTUUcl_bi5NkV6qWsST8t_s7Y,16745
88
+ fsspec/implementations/zip.py,sha256=vc1fNz-yO8uWQ9bQUqBFYpTcgsfZQq9vDwwg4Aufs9Y,4417
89
+ fsspec/mapping.py,sha256=hSsiRo-dgAOj6oHf67bF3i11U4xREglXToHGUX4GhRY,8261
90
+ fsspec/parquet.py,sha256=qVxDhwc960SGOt5etcYAJxCr-7HQKP01687KpDR02Gw,19463
91
+ fsspec/registry.py,sha256=VjnfGUg39lvlCDxOoGSeYm8UL12wZC5yQ5n8-W92eD0,11173
92
+ fsspec/spec.py,sha256=3t96RgizRN_slIuHXnuR0bXjVUfBS1TfuDrEua4oQvE,66277
93
+ fsspec/tests/abstract/__init__.py,sha256=i1wcFixV6QhOwdoB24c8oXjzobISNqiKVz9kl2DvAY8,10028
94
+ fsspec/tests/abstract/__pycache__/__init__.cpython-310.pyc,,
95
+ fsspec/tests/abstract/__pycache__/common.cpython-310.pyc,,
96
+ fsspec/tests/abstract/__pycache__/copy.cpython-310.pyc,,
97
+ fsspec/tests/abstract/__pycache__/get.cpython-310.pyc,,
98
+ fsspec/tests/abstract/__pycache__/put.cpython-310.pyc,,
99
+ fsspec/tests/abstract/common.py,sha256=1GQwNo5AONzAnzZj0fWgn8NJPLXALehbsuGxS3FzWVU,4973
100
+ fsspec/tests/abstract/copy.py,sha256=gU5-d97U3RSde35Vp4RxPY4rWwL744HiSrJ8IBOp9-8,19967
101
+ fsspec/tests/abstract/get.py,sha256=vNR4HztvTR7Cj56AMo7_tx7TeYz1Jgr_2Wb8Lv-UiBY,20755
102
+ fsspec/tests/abstract/put.py,sha256=7aih17OKB_IZZh1Mkq1eBDIjobhtMQmI8x-Pw-S_aZk,21201
103
+ fsspec/transaction.py,sha256=xliRG6U2Zf3khG4xcw9WiB-yAoqJSHEGK_VjHOdtgo0,2398
104
+ fsspec/utils.py,sha256=kYBiiw_0AEFJPCYwYlbZ7QxiGwuXdaaxrK6KM3mcFak,23052
venv/lib/python3.10/site-packages/fsspec-2024.3.1.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.43.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
venv/lib/python3.10/site-packages/fsspec-2024.3.1.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ fsspec
venv/lib/python3.10/site-packages/hjson-3.1.0.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
venv/lib/python3.10/site-packages/hjson-3.1.0.dist-info/LICENSE.txt ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hjson (based on simplejson) is dual-licensed software. It is available under the terms
2
+ of the MIT license, or the Academic Free License version 2.1. The full
3
+ text of each license agreement is included below. This code is also
4
+ licensed to the Python Software Foundation (PSF) under a Contributor
5
+ Agreement.
6
+
7
+ MIT License
8
+ ===========
9
+
10
+ Copyright (c) 2006 Bob Ippolito
11
+ Copyright (c) 2015 Christian Zangl
12
+
13
+ Permission is hereby granted, free of charge, to any person obtaining a copy of
14
+ this software and associated documentation files (the "Software"), to deal in
15
+ the Software without restriction, including without limitation the rights to
16
+ use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
17
+ of the Software, and to permit persons to whom the Software is furnished to do
18
+ so, subject to the following conditions:
19
+
20
+ The above copyright notice and this permission notice shall be included in all
21
+ copies or substantial portions of the Software.
22
+
23
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
28
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29
+ SOFTWARE.
30
+
31
+ Academic Free License v. 2.1
32
+ ============================
33
+
34
+ Copyright (c) 2006 Bob Ippolito. All rights reserved.
35
+ Copyright (c) 2015 Christian Zangl
36
+
37
+ This Academic Free License (the "License") applies to any original work of authorship (the "Original Work") whose owner (the "Licensor") has placed the following notice immediately following the copyright notice for the Original Work:
38
+
39
+ Licensed under the Academic Free License version 2.1
40
+
41
+ 1) Grant of Copyright License. Licensor hereby grants You a world-wide, royalty-free, non-exclusive, perpetual, sublicenseable license to do the following:
42
+
43
+ a) to reproduce the Original Work in copies;
44
+
45
+ b) to prepare derivative works ("Derivative Works") based upon the Original Work;
46
+
47
+ c) to distribute copies of the Original Work and Derivative Works to the public;
48
+
49
+ d) to perform the Original Work publicly; and
50
+
51
+ e) to display the Original Work publicly.
52
+
53
+ 2) Grant of Patent License. Licensor hereby grants You a world-wide, royalty-free, non-exclusive, perpetual, sublicenseable license, under patent claims owned or controlled by the Licensor that are embodied in the Original Work as furnished by the Licensor, to make, use, sell and offer for sale the Original Work and Derivative Works.
54
+
55
+ 3) Grant of Source Code License. The term "Source Code" means the preferred form of the Original Work for making modifications to it and all available documentation describing how to modify the Original Work. Licensor hereby agrees to provide a machine-readable copy of the Source Code of the Original Work along with each copy of the Original Work that Licensor distributes. Licensor reserves the right to satisfy this obligation by placing a machine-readable copy of the Source Code in an information repository reasonably calculated to permit inexpensive and convenient access by You for as long as Licensor continues to distribute the Original Work, and by publishing the address of that information repository in a notice immediately following the copyright notice that applies to the Original Work.
56
+
57
+ 4) Exclusions From License Grant. Neither the names of Licensor, nor the names of any contributors to the Original Work, nor any of their trademarks or service marks, may be used to endorse or promote products derived from this Original Work without express prior written permission of the Licensor. Nothing in this License shall be deemed to grant any rights to trademarks, copyrights, patents, trade secrets or any other intellectual property of Licensor except as expressly stated herein. No patent license is granted to make, use, sell or offer to sell embodiments of any patent claims other than the licensed claims defined in Section 2. No right is granted to the trademarks of Licensor even if such marks are included in the Original Work. Nothing in this License shall be interpreted to prohibit Licensor from licensing under different terms from this License any Original Work that Licensor otherwise would have a right to license.
58
+
59
+ 5) This section intentionally omitted.
60
+
61
+ 6) Attribution Rights. You must retain, in the Source Code of any Derivative Works that You create, all copyright, patent or trademark notices from the Source Code of the Original Work, as well as any notices of licensing and any descriptive text identified therein as an "Attribution Notice." You must cause the Source Code for any Derivative Works that You create to carry a prominent Attribution Notice reasonably calculated to inform recipients that You have modified the Original Work.
62
+
63
+ 7) Warranty of Provenance and Disclaimer of Warranty. Licensor warrants that the copyright in and to the Original Work and the patent rights granted herein by Licensor are owned by the Licensor or are sublicensed to You under the terms of this License with the permission of the contributor(s) of those copyrights and patent rights. Except as expressly stated in the immediately proceeding sentence, the Original Work is provided under this License on an "AS IS" BASIS and WITHOUT WARRANTY, either express or implied, including, without limitation, the warranties of NON-INFRINGEMENT, MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY OF THE ORIGINAL WORK IS WITH YOU. This DISCLAIMER OF WARRANTY constitutes an essential part of this License. No license to Original Work is granted hereunder except under this disclaimer.
64
+
65
+ 8) Limitation of Liability. Under no circumstances and under no legal theory, whether in tort (including negligence), contract, or otherwise, shall the Licensor be liable to any person for any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or the use of the Original Work including, without limitation, damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses. This limitation of liability shall not apply to liability for death or personal injury resulting from Licensor's negligence to the extent applicable law prohibits such limitation. Some jurisdictions do not allow the exclusion or limitation of incidental or consequential damages, so this exclusion and limitation may not apply to You.
66
+
67
+ 9) Acceptance and Termination. If You distribute copies of the Original Work or a Derivative Work, You must make a reasonable effort under the circumstances to obtain the express assent of recipients to the terms of this License. Nothing else but this License (or another written agreement between Licensor and You) grants You permission to create Derivative Works based upon the Original Work or to exercise any of the rights granted in Section 1 herein, and any attempt to do so except under the terms of this License (or another written agreement between Licensor and You) is expressly prohibited by U.S. copyright law, the equivalent laws of other countries, and by international treaty. Therefore, by exercising any of the rights granted to You in Section 1 herein, You indicate Your acceptance of this License and all of its terms and conditions.
68
+
69
+ 10) Termination for Patent Action. This License shall terminate automatically and You may no longer exercise any of the rights granted to You by this License as of the date You commence an action, including a cross-claim or counterclaim, against Licensor or any licensee alleging that the Original Work infringes a patent. This termination provision shall not apply for an action alleging patent infringement by combinations of the Original Work with other software or hardware.
70
+
71
+ 11) Jurisdiction, Venue and Governing Law. Any action or suit relating to this License may be brought only in the courts of a jurisdiction wherein the Licensor resides or in which Licensor conducts its primary business, and under the laws of that jurisdiction excluding its conflict-of-law provisions. The application of the United Nations Convention on Contracts for the International Sale of Goods is expressly excluded. Any use of the Original Work outside the scope of this License or after its termination shall be subject to the requirements and penalties of the U.S. Copyright Act, 17 U.S.C. § 101 et seq., the equivalent laws of other countries, and international treaty. This section shall survive the termination of this License.
72
+
73
+ 12) Attorneys Fees. In any action to enforce the terms of this License or seeking damages relating thereto, the prevailing party shall be entitled to recover its costs and expenses, including, without limitation, reasonable attorneys' fees and costs incurred in connection with such action, including any appeal of such action. This section shall survive the termination of this License.
74
+
75
+ 13) Miscellaneous. This License represents the complete agreement concerning the subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable.
76
+
77
+ 14) Definition of "You" in This License. "You" throughout this License, whether in upper or lower case, means an individual or a legal entity exercising rights under, and complying with all of the terms of, this License. For legal entities, "You" includes any entity that controls, is controlled by, or is under common control with you. For purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
78
+
79
+ 15) Right to Use. You may use the Original Work in all ways not otherwise restricted or conditioned by this License or by law, and Licensor promises not to interfere with or be responsible for such uses by You.
80
+
81
+ This license is Copyright (C) 2003-2004 Lawrence E. Rosen. All rights reserved. Permission is hereby granted to copy and distribute this license without modification. This license may not be modified without the express written permission of its copyright owner.
venv/lib/python3.10/site-packages/hjson-3.1.0.dist-info/METADATA ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: hjson
3
+ Version: 3.1.0
4
+ Summary: Hjson, a user interface for JSON.
5
+ Home-page: http://github.com/hjson/hjson-py
6
+ Author: Christian Zangl
7
+ Author-email: [email protected]
8
+ License: MIT License
9
+ Keywords: json comments configuration
10
+ Platform: any
11
+ Classifier: Development Status :: 5 - Production/Stable
12
+ Classifier: Intended Audience :: Developers
13
+ Classifier: License :: OSI Approved :: MIT License
14
+ Classifier: License :: OSI Approved :: Academic Free License (AFL)
15
+ Classifier: Programming Language :: Python
16
+ Classifier: Programming Language :: Python :: 2
17
+ Classifier: Programming Language :: Python :: 2.6
18
+ Classifier: Programming Language :: Python :: 2.7
19
+ Classifier: Programming Language :: Python :: 3
20
+ Classifier: Programming Language :: Python :: 3.3
21
+ Classifier: Programming Language :: Python :: 3.4
22
+ Classifier: Programming Language :: Python :: 3.5
23
+ Classifier: Programming Language :: Python :: Implementation :: CPython
24
+ Classifier: Programming Language :: Python :: Implementation :: PyPy
25
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
26
+ License-File: LICENSE.txt
27
+
28
+ hjson-py
29
+ ========
30
+
31
+ `Hjson`_, a user interface for JSON
32
+
33
+ Hjson works with Python 2.5+ and Python 3.3+ (based on `simplejson`_)
34
+
35
+ Installation
36
+ ============
37
+
38
+ - ``pip install hjson``
39
+
40
+ - or download from https://pypi.python.org/pypi/hjson
41
+
42
+ Commandline
43
+ -----------
44
+
45
+ ::
46
+
47
+ Usage:
48
+ hjson [options]
49
+ hjson [options] <input>
50
+ hjson (-h | --help)
51
+ hjson (-V | --version)
52
+
53
+ Options:
54
+ -h --help Show this screen.
55
+ -j Output as formatted JSON.
56
+ -c Output as JSON.
57
+ -V --version Show version.
58
+
59
+ E.g. ``echo '{"json":"obj"}' | hjson``
60
+
61
+
62
+ Usage
63
+ =====
64
+
65
+ .. code-block:: python
66
+
67
+ import hjson
68
+
69
+ Decoding Hjson
70
+ --------------
71
+
72
+ .. code-block:: python
73
+
74
+ text = """{
75
+ foo: a
76
+ bar: 1
77
+ }"""
78
+
79
+ hjson.loads(text)
80
+
81
+ Result:
82
+
83
+ .. code-block:: python
84
+
85
+ OrderedDict([('foo', 'a'), ('bar', 1)])
86
+
87
+ Encoding Python object hierarchies
88
+ ----------------------------------
89
+
90
+ .. code-block:: python
91
+
92
+ hjson.dumps({'foo': 'text', 'bar': (1, 2)})
93
+
94
+ Result:
95
+
96
+ ::
97
+
98
+ {
99
+ foo: text
100
+ bar:
101
+ [
102
+ 1
103
+ 2
104
+ ]
105
+ }
106
+
107
+ Encoding as JSON
108
+ ----------------
109
+
110
+ Note that this is probably not as performant as the simplejson version.
111
+
112
+ .. code-block:: python
113
+
114
+ hjson.dumpsJSON(['foo', {'bar': ('baz', None, 1.0, 2)}])
115
+
116
+ Result: ``'["foo", {"bar": ["baz", null, 1.0, 2]}]'``
117
+
118
+ API
119
+ ===
120
+
121
+ `hjson-py`_
122
+
123
+ .. _Hjson: https://hjson.github.io
124
+ .. _simplejson: https://github.com/simplejson/simplejson
125
+ .. _hjson-py: http://hjson.github.io/hjson-py/
venv/lib/python3.10/site-packages/hjson-3.1.0.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.37.1)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
venv/lib/python3.10/site-packages/hjson-3.1.0.dist-info/entry_points.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ [console_scripts]
2
+ hjson = hjson.tool:main
venv/lib/python3.10/site-packages/hjson-3.1.0.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ hjson
venv/lib/python3.10/site-packages/nvidia_cuda_cupti_cu12-12.1.105.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
venv/lib/python3.10/site-packages/nvidia_cuda_cupti_cu12-12.1.105.dist-info/License.txt ADDED
@@ -0,0 +1,1568 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ End User License Agreement
2
+ --------------------------
3
+
4
+
5
+ Preface
6
+ -------
7
+
8
+ The Software License Agreement in Chapter 1 and the Supplement
9
+ in Chapter 2 contain license terms and conditions that govern
10
+ the use of NVIDIA software. By accepting this agreement, you
11
+ agree to comply with all the terms and conditions applicable
12
+ to the product(s) included herein.
13
+
14
+
15
+ NVIDIA Driver
16
+
17
+
18
+ Description
19
+
20
+ This package contains the operating system driver and
21
+ fundamental system software components for NVIDIA GPUs.
22
+
23
+
24
+ NVIDIA CUDA Toolkit
25
+
26
+
27
+ Description
28
+
29
+ The NVIDIA CUDA Toolkit provides command-line and graphical
30
+ tools for building, debugging and optimizing the performance
31
+ of applications accelerated by NVIDIA GPUs, runtime and math
32
+ libraries, and documentation including programming guides,
33
+ user manuals, and API references.
34
+
35
+
36
+ Default Install Location of CUDA Toolkit
37
+
38
+ Windows platform:
39
+
40
+ %ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v#.#
41
+
42
+ Linux platform:
43
+
44
+ /usr/local/cuda-#.#
45
+
46
+ Mac platform:
47
+
48
+ /Developer/NVIDIA/CUDA-#.#
49
+
50
+
51
+ NVIDIA CUDA Samples
52
+
53
+
54
+ Description
55
+
56
+ This package includes over 100+ CUDA examples that demonstrate
57
+ various CUDA programming principles, and efficient CUDA
58
+ implementation of algorithms in specific application domains.
59
+
60
+
61
+ Default Install Location of CUDA Samples
62
+
63
+ Windows platform:
64
+
65
+ %ProgramData%\NVIDIA Corporation\CUDA Samples\v#.#
66
+
67
+ Linux platform:
68
+
69
+ /usr/local/cuda-#.#/samples
70
+
71
+ and
72
+
73
+ $HOME/NVIDIA_CUDA-#.#_Samples
74
+
75
+ Mac platform:
76
+
77
+ /Developer/NVIDIA/CUDA-#.#/samples
78
+
79
+
80
+ NVIDIA Nsight Visual Studio Edition (Windows only)
81
+
82
+
83
+ Description
84
+
85
+ NVIDIA Nsight Development Platform, Visual Studio Edition is a
86
+ development environment integrated into Microsoft Visual
87
+ Studio that provides tools for debugging, profiling, analyzing
88
+ and optimizing your GPU computing and graphics applications.
89
+
90
+
91
+ Default Install Location of Nsight Visual Studio Edition
92
+
93
+ Windows platform:
94
+
95
+ %ProgramFiles(x86)%\NVIDIA Corporation\Nsight Visual Studio Edition #.#
96
+
97
+
98
+ 1. License Agreement for NVIDIA Software Development Kits
99
+ ---------------------------------------------------------
100
+
101
+
102
+ Release Date: July 26, 2018
103
+ ---------------------------
104
+
105
+
106
+ Important NoticeRead before downloading, installing,
107
+ copying or using the licensed software:
108
+ -------------------------------------------------------
109
+
110
+ This license agreement, including exhibits attached
111
+ ("Agreement”) is a legal agreement between you and NVIDIA
112
+ Corporation ("NVIDIA") and governs your use of a NVIDIA
113
+ software development kit (“SDK”).
114
+
115
+ Each SDK has its own set of software and materials, but here
116
+ is a description of the types of items that may be included in
117
+ a SDK: source code, header files, APIs, data sets and assets
118
+ (examples include images, textures, models, scenes, videos,
119
+ native API input/output files), binary software, sample code,
120
+ libraries, utility programs, programming code and
121
+ documentation.
122
+
123
+ This Agreement can be accepted only by an adult of legal age
124
+ of majority in the country in which the SDK is used.
125
+
126
+ If you are entering into this Agreement on behalf of a company
127
+ or other legal entity, you represent that you have the legal
128
+ authority to bind the entity to this Agreement, in which case
129
+ “you” will mean the entity you represent.
130
+
131
+ If you don’t have the required age or authority to accept
132
+ this Agreement, or if you don’t accept all the terms and
133
+ conditions of this Agreement, do not download, install or use
134
+ the SDK.
135
+
136
+ You agree to use the SDK only for purposes that are permitted
137
+ by (a) this Agreement, and (b) any applicable law, regulation
138
+ or generally accepted practices or guidelines in the relevant
139
+ jurisdictions.
140
+
141
+
142
+ 1.1. License
143
+
144
+
145
+ 1.1.1. License Grant
146
+
147
+ Subject to the terms of this Agreement, NVIDIA hereby grants
148
+ you a non-exclusive, non-transferable license, without the
149
+ right to sublicense (except as expressly provided in this
150
+ Agreement) to:
151
+
152
+ 1. Install and use the SDK,
153
+
154
+ 2. Modify and create derivative works of sample source code
155
+ delivered in the SDK, and
156
+
157
+ 3. Distribute those portions of the SDK that are identified
158
+ in this Agreement as distributable, as incorporated in
159
+ object code format into a software application that meets
160
+ the distribution requirements indicated in this Agreement.
161
+
162
+
163
+ 1.1.2. Distribution Requirements
164
+
165
+ These are the distribution requirements for you to exercise
166
+ the distribution grant:
167
+
168
+ 1. Your application must have material additional
169
+ functionality, beyond the included portions of the SDK.
170
+
171
+ 2. The distributable portions of the SDK shall only be
172
+ accessed by your application.
173
+
174
+ 3. The following notice shall be included in modifications
175
+ and derivative works of sample source code distributed:
176
+ “This software contains source code provided by NVIDIA
177
+ Corporation.”
178
+
179
+ 4. Unless a developer tool is identified in this Agreement
180
+ as distributable, it is delivered for your internal use
181
+ only.
182
+
183
+ 5. The terms under which you distribute your application
184
+ must be consistent with the terms of this Agreement,
185
+ including (without limitation) terms relating to the
186
+ license grant and license restrictions and protection of
187
+ NVIDIA’s intellectual property rights. Additionally, you
188
+ agree that you will protect the privacy, security and
189
+ legal rights of your application users.
190
+
191
+ 6. You agree to notify NVIDIA in writing of any known or
192
+ suspected distribution or use of the SDK not in compliance
193
+ with the requirements of this Agreement, and to enforce
194
+ the terms of your agreements with respect to distributed
195
+ SDK.
196
+
197
+
198
+ 1.1.3. Authorized Users
199
+
200
+ You may allow employees and contractors of your entity or of
201
+ your subsidiary(ies) to access and use the SDK from your
202
+ secure network to perform work on your behalf.
203
+
204
+ If you are an academic institution you may allow users
205
+ enrolled or employed by the academic institution to access and
206
+ use the SDK from your secure network.
207
+
208
+ You are responsible for the compliance with the terms of this
209
+ Agreement by your authorized users. If you become aware that
210
+ your authorized users didn’t follow the terms of this
211
+ Agreement, you agree to take reasonable steps to resolve the
212
+ non-compliance and prevent new occurrences.
213
+
214
+
215
+ 1.1.4. Pre-Release SDK
216
+
217
+ The SDK versions identified as alpha, beta, preview or
218
+ otherwise as pre-release, may not be fully functional, may
219
+ contain errors or design flaws, and may have reduced or
220
+ different security, privacy, accessibility, availability, and
221
+ reliability standards relative to commercial versions of
222
+ NVIDIA software and materials. Use of a pre-release SDK may
223
+ result in unexpected results, loss of data, project delays or
224
+ other unpredictable damage or loss.
225
+
226
+ You may use a pre-release SDK at your own risk, understanding
227
+ that pre-release SDKs are not intended for use in production
228
+ or business-critical systems.
229
+
230
+ NVIDIA may choose not to make available a commercial version
231
+ of any pre-release SDK. NVIDIA may also choose to abandon
232
+ development and terminate the availability of a pre-release
233
+ SDK at any time without liability.
234
+
235
+
236
+ 1.1.5. Updates
237
+
238
+ NVIDIA may, at its option, make available patches, workarounds
239
+ or other updates to this SDK. Unless the updates are provided
240
+ with their separate governing terms, they are deemed part of
241
+ the SDK licensed to you as provided in this Agreement. You
242
+ agree that the form and content of the SDK that NVIDIA
243
+ provides may change without prior notice to you. While NVIDIA
244
+ generally maintains compatibility between versions, NVIDIA may
245
+ in some cases make changes that introduce incompatibilities in
246
+ future versions of the SDK.
247
+
248
+
249
+ 1.1.6. Third Party Licenses
250
+
251
+ The SDK may come bundled with, or otherwise include or be
252
+ distributed with, third party software licensed by a NVIDIA
253
+ supplier and/or open source software provided under an open
254
+ source license. Use of third party software is subject to the
255
+ third-party license terms, or in the absence of third party
256
+ terms, the terms of this Agreement. Copyright to third party
257
+ software is held by the copyright holders indicated in the
258
+ third-party software or license.
259
+
260
+
261
+ 1.1.7. Reservation of Rights
262
+
263
+ NVIDIA reserves all rights, title, and interest in and to the
264
+ SDK, not expressly granted to you under this Agreement.
265
+
266
+
267
+ 1.2. Limitations
268
+
269
+ The following license limitations apply to your use of the
270
+ SDK:
271
+
272
+ 1. You may not reverse engineer, decompile or disassemble,
273
+ or remove copyright or other proprietary notices from any
274
+ portion of the SDK or copies of the SDK.
275
+
276
+ 2. Except as expressly provided in this Agreement, you may
277
+ not copy, sell, rent, sublicense, transfer, distribute,
278
+ modify, or create derivative works of any portion of the
279
+ SDK. For clarity, you may not distribute or sublicense the
280
+ SDK as a stand-alone product.
281
+
282
+ 3. Unless you have an agreement with NVIDIA for this
283
+ purpose, you may not indicate that an application created
284
+ with the SDK is sponsored or endorsed by NVIDIA.
285
+
286
+ 4. You may not bypass, disable, or circumvent any
287
+ encryption, security, digital rights management or
288
+ authentication mechanism in the SDK.
289
+
290
+ 5. You may not use the SDK in any manner that would cause it
291
+ to become subject to an open source software license. As
292
+ examples, licenses that require as a condition of use,
293
+ modification, and/or distribution that the SDK be:
294
+
295
+ a. Disclosed or distributed in source code form;
296
+
297
+ b. Licensed for the purpose of making derivative works;
298
+ or
299
+
300
+ c. Redistributable at no charge.
301
+
302
+ 6. Unless you have an agreement with NVIDIA for this
303
+ purpose, you may not use the SDK with any system or
304
+ application where the use or failure of the system or
305
+ application can reasonably be expected to threaten or
306
+ result in personal injury, death, or catastrophic loss.
307
+ Examples include use in avionics, navigation, military,
308
+ medical, life support or other life critical applications.
309
+ NVIDIA does not design, test or manufacture the SDK for
310
+ these critical uses and NVIDIA shall not be liable to you
311
+ or any third party, in whole or in part, for any claims or
312
+ damages arising from such uses.
313
+
314
+ 7. You agree to defend, indemnify and hold harmless NVIDIA
315
+ and its affiliates, and their respective employees,
316
+ contractors, agents, officers and directors, from and
317
+ against any and all claims, damages, obligations, losses,
318
+ liabilities, costs or debt, fines, restitutions and
319
+ expenses (including but not limited to attorney’s fees
320
+ and costs incident to establishing the right of
321
+ indemnification) arising out of or related to your use of
322
+ the SDK outside of the scope of this Agreement, or not in
323
+ compliance with its terms.
324
+
325
+
326
+ 1.3. Ownership
327
+
328
+ 1. NVIDIA or its licensors hold all rights, title and
329
+ interest in and to the SDK and its modifications and
330
+ derivative works, including their respective intellectual
331
+ property rights, subject to your rights described in this
332
+ section. This SDK may include software and materials from
333
+ NVIDIA’s licensors, and these licensors are intended
334
+ third party beneficiaries that may enforce this Agreement
335
+ with respect to their intellectual property rights.
336
+
337
+ 2. You hold all rights, title and interest in and to your
338
+ applications and your derivative works of the sample
339
+ source code delivered in the SDK, including their
340
+ respective intellectual property rights, subject to
341
+ NVIDIA’s rights described in this section.
342
+
343
+ 3. You may, but don’t have to, provide to NVIDIA
344
+ suggestions, feature requests or other feedback regarding
345
+ the SDK, including possible enhancements or modifications
346
+ to the SDK. For any feedback that you voluntarily provide,
347
+ you hereby grant NVIDIA and its affiliates a perpetual,
348
+ non-exclusive, worldwide, irrevocable license to use,
349
+ reproduce, modify, license, sublicense (through multiple
350
+ tiers of sublicensees), and distribute (through multiple
351
+ tiers of distributors) it without the payment of any
352
+ royalties or fees to you. NVIDIA will use feedback at its
353
+ choice. NVIDIA is constantly looking for ways to improve
354
+ its products, so you may send feedback to NVIDIA through
355
+ the developer portal at https://developer.nvidia.com.
356
+
357
+
358
+ 1.4. No Warranties
359
+
360
+ THE SDK IS PROVIDED BY NVIDIA “AS IS” AND “WITH ALL
361
+ FAULTS.” TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND
362
+ ITS AFFILIATES EXPRESSLY DISCLAIM ALL WARRANTIES OF ANY KIND
363
+ OR NATURE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING,
364
+ BUT NOT LIMITED TO, ANY WARRANTIES OF MERCHANTABILITY, FITNESS
365
+ FOR A PARTICULAR PURPOSE, TITLE, NON-INFRINGEMENT, OR THE
366
+ ABSENCE OF ANY DEFECTS THEREIN, WHETHER LATENT OR PATENT. NO
367
+ WARRANTY IS MADE ON THE BASIS OF TRADE USAGE, COURSE OF
368
+ DEALING OR COURSE OF TRADE.
369
+
370
+
371
+ 1.5. Limitation of Liability
372
+
373
+ TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS
374
+ AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL,
375
+ PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, LOSS
376
+ OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF
377
+ PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION
378
+ WITH THIS AGREEMENT OR THE USE OR PERFORMANCE OF THE SDK,
379
+ WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH
380
+ OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE),
381
+ PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF
382
+ LIABILITY. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES
383
+ TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS
384
+ AGREEMENT EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE
385
+ NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS
386
+ LIMIT.
387
+
388
+ These exclusions and limitations of liability shall apply
389
+ regardless if NVIDIA or its affiliates have been advised of
390
+ the possibility of such damages, and regardless of whether a
391
+ remedy fails its essential purpose. These exclusions and
392
+ limitations of liability form an essential basis of the
393
+ bargain between the parties, and, absent any of these
394
+ exclusions or limitations of liability, the provisions of this
395
+ Agreement, including, without limitation, the economic terms,
396
+ would be substantially different.
397
+
398
+
399
+ 1.6. Termination
400
+
401
+ 1. This Agreement will continue to apply until terminated by
402
+ either you or NVIDIA as described below.
403
+
404
+ 2. If you want to terminate this Agreement, you may do so by
405
+ stopping to use the SDK.
406
+
407
+ 3. NVIDIA may, at any time, terminate this Agreement if:
408
+
409
+ a. (i) you fail to comply with any term of this
410
+ Agreement and the non-compliance is not fixed within
411
+ thirty (30) days following notice from NVIDIA (or
412
+ immediately if you violate NVIDIA’s intellectual
413
+ property rights);
414
+
415
+ b. (ii) you commence or participate in any legal
416
+ proceeding against NVIDIA with respect to the SDK; or
417
+
418
+ c. (iii) NVIDIA decides to no longer provide the SDK in
419
+ a country or, in NVIDIA’s sole discretion, the
420
+ continued use of it is no longer commercially viable.
421
+
422
+ 4. Upon any termination of this Agreement, you agree to
423
+ promptly discontinue use of the SDK and destroy all copies
424
+ in your possession or control. Your prior distributions in
425
+ accordance with this Agreement are not affected by the
426
+ termination of this Agreement. Upon written request, you
427
+ will certify in writing that you have complied with your
428
+ commitments under this section. Upon any termination of
429
+ this Agreement all provisions survive except for the
430
+ license grant provisions.
431
+
432
+
433
+ 1.7. General
434
+
435
+ If you wish to assign this Agreement or your rights and
436
+ obligations, including by merger, consolidation, dissolution
437
+ or operation of law, contact NVIDIA to ask for permission. Any
438
+ attempted assignment not approved by NVIDIA in writing shall
439
+ be void and of no effect. NVIDIA may assign, delegate or
440
+ transfer this Agreement and its rights and obligations, and if
441
+ to a non-affiliate you will be notified.
442
+
443
+ You agree to cooperate with NVIDIA and provide reasonably
444
+ requested information to verify your compliance with this
445
+ Agreement.
446
+
447
+ This Agreement will be governed in all respects by the laws of
448
+ the United States and of the State of Delaware as those laws
449
+ are applied to contracts entered into and performed entirely
450
+ within Delaware by Delaware residents, without regard to the
451
+ conflicts of laws principles. The United Nations Convention on
452
+ Contracts for the International Sale of Goods is specifically
453
+ disclaimed. You agree to all terms of this Agreement in the
454
+ English language.
455
+
456
+ The state or federal courts residing in Santa Clara County,
457
+ California shall have exclusive jurisdiction over any dispute
458
+ or claim arising out of this Agreement. Notwithstanding this,
459
+ you agree that NVIDIA shall still be allowed to apply for
460
+ injunctive remedies or an equivalent type of urgent legal
461
+ relief in any jurisdiction.
462
+
463
+ If any court of competent jurisdiction determines that any
464
+ provision of this Agreement is illegal, invalid or
465
+ unenforceable, such provision will be construed as limited to
466
+ the extent necessary to be consistent with and fully
467
+ enforceable under the law and the remaining provisions will
468
+ remain in full force and effect. Unless otherwise specified,
469
+ remedies are cumulative.
470
+
471
+ Each party acknowledges and agrees that the other is an
472
+ independent contractor in the performance of this Agreement.
473
+
474
+ The SDK has been developed entirely at private expense and is
475
+ “commercial items” consisting of “commercial computer
476
+ software” and “commercial computer software
477
+ documentation” provided with RESTRICTED RIGHTS. Use,
478
+ duplication or disclosure by the U.S. Government or a U.S.
479
+ Government subcontractor is subject to the restrictions in
480
+ this Agreement pursuant to DFARS 227.7202-3(a) or as set forth
481
+ in subparagraphs (c)(1) and (2) of the Commercial Computer
482
+ Software - Restricted Rights clause at FAR 52.227-19, as
483
+ applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas
484
+ Expressway, Santa Clara, CA 95051.
485
+
486
+ The SDK is subject to United States export laws and
487
+ regulations. You agree that you will not ship, transfer or
488
+ export the SDK into any country, or use the SDK in any manner,
489
+ prohibited by the United States Bureau of Industry and
490
+ Security or economic sanctions regulations administered by the
491
+ U.S. Department of Treasury’s Office of Foreign Assets
492
+ Control (OFAC), or any applicable export laws, restrictions or
493
+ regulations. These laws include restrictions on destinations,
494
+ end users and end use. By accepting this Agreement, you
495
+ confirm that you are not a resident or citizen of any country
496
+ currently embargoed by the U.S. and that you are not otherwise
497
+ prohibited from receiving the SDK.
498
+
499
+ Any notice delivered by NVIDIA to you under this Agreement
500
+ will be delivered via mail, email or fax. You agree that any
501
+ notices that NVIDIA sends you electronically will satisfy any
502
+ legal communication requirements. Please direct your legal
503
+ notices or other correspondence to NVIDIA Corporation, 2788
504
+ San Tomas Expressway, Santa Clara, California 95051, United
505
+ States of America, Attention: Legal Department.
506
+
507
+ This Agreement and any exhibits incorporated into this
508
+ Agreement constitute the entire agreement of the parties with
509
+ respect to the subject matter of this Agreement and supersede
510
+ all prior negotiations or documentation exchanged between the
511
+ parties relating to this SDK license. Any additional and/or
512
+ conflicting terms on documents issued by you are null, void,
513
+ and invalid. Any amendment or waiver under this Agreement
514
+ shall be in writing and signed by representatives of both
515
+ parties.
516
+
517
+
518
+ 2. CUDA Toolkit Supplement to Software License Agreement for
519
+ NVIDIA Software Development Kits
520
+ ------------------------------------------------------------
521
+
522
+
523
+ Release date: August 16, 2018
524
+ -----------------------------
525
+
526
+ The terms in this supplement govern your use of the NVIDIA
527
+ CUDA Toolkit SDK under the terms of your license agreement
528
+ (“Agreement”) as modified by this supplement. Capitalized
529
+ terms used but not defined below have the meaning assigned to
530
+ them in the Agreement.
531
+
532
+ This supplement is an exhibit to the Agreement and is
533
+ incorporated as an integral part of the Agreement. In the
534
+ event of conflict between the terms in this supplement and the
535
+ terms in the Agreement, the terms in this supplement govern.
536
+
537
+
538
+ 2.1. License Scope
539
+
540
+ The SDK is licensed for you to develop applications only for
541
+ use in systems with NVIDIA GPUs.
542
+
543
+
544
+ 2.2. Distribution
545
+
546
+ The portions of the SDK that are distributable under the
547
+ Agreement are listed in Attachment A.
548
+
549
+
550
+ 2.3. Operating Systems
551
+
552
+ Those portions of the SDK designed exclusively for use on the
553
+ Linux or FreeBSD operating systems, or other operating systems
554
+ derived from the source code to these operating systems, may
555
+ be copied and redistributed for use in accordance with this
556
+ Agreement, provided that the object code files are not
557
+ modified in any way (except for unzipping of compressed
558
+ files).
559
+
560
+
561
+ 2.4. Audio and Video Encoders and Decoders
562
+
563
+ You acknowledge and agree that it is your sole responsibility
564
+ to obtain any additional third-party licenses required to
565
+ make, have made, use, have used, sell, import, and offer for
566
+ sale your products or services that include or incorporate any
567
+ third-party software and content relating to audio and/or
568
+ video encoders and decoders from, including but not limited
569
+ to, Microsoft, Thomson, Fraunhofer IIS, Sisvel S.p.A.,
570
+ MPEG-LA, and Coding Technologies. NVIDIA does not grant to you
571
+ under this Agreement any necessary patent or other rights with
572
+ respect to any audio and/or video encoders and decoders.
573
+
574
+
575
+ 2.5. Licensing
576
+
577
+ If the distribution terms in this Agreement are not suitable
578
+ for your organization, or for any questions regarding this
579
+ Agreement, please contact NVIDIA at
580
581
+
582
+
583
+ 2.6. Attachment A
584
+
585
+ The following portions of the SDK are distributable under the
586
+ Agreement:
587
+
588
+ Component
589
+
590
+ CUDA Runtime
591
+
592
+ Windows
593
+
594
+ cudart.dll, cudart_static.lib, cudadevrt.lib
595
+
596
+ Mac OSX
597
+
598
+ libcudart.dylib, libcudart_static.a, libcudadevrt.a
599
+
600
+ Linux
601
+
602
+ libcudart.so, libcudart_static.a, libcudadevrt.a
603
+
604
+ Android
605
+
606
+ libcudart.so, libcudart_static.a, libcudadevrt.a
607
+
608
+ Component
609
+
610
+ CUDA FFT Library
611
+
612
+ Windows
613
+
614
+ cufft.dll, cufftw.dll, cufft.lib, cufftw.lib
615
+
616
+ Mac OSX
617
+
618
+ libcufft.dylib, libcufft_static.a, libcufftw.dylib,
619
+ libcufftw_static.a
620
+
621
+ Linux
622
+
623
+ libcufft.so, libcufft_static.a, libcufftw.so,
624
+ libcufftw_static.a
625
+
626
+ Android
627
+
628
+ libcufft.so, libcufft_static.a, libcufftw.so,
629
+ libcufftw_static.a
630
+
631
+ Component
632
+
633
+ CUDA BLAS Library
634
+
635
+ Windows
636
+
637
+ cublas.dll, cublasLt.dll
638
+
639
+ Mac OSX
640
+
641
+ libcublas.dylib, libcublasLt.dylib, libcublas_static.a,
642
+ libcublasLt_static.a
643
+
644
+ Linux
645
+
646
+ libcublas.so, libcublasLt.so, libcublas_static.a,
647
+ libcublasLt_static.a
648
+
649
+ Android
650
+
651
+ libcublas.so, libcublasLt.so, libcublas_static.a,
652
+ libcublasLt_static.a
653
+
654
+ Component
655
+
656
+ NVIDIA "Drop-in" BLAS Library
657
+
658
+ Windows
659
+
660
+ nvblas.dll
661
+
662
+ Mac OSX
663
+
664
+ libnvblas.dylib
665
+
666
+ Linux
667
+
668
+ libnvblas.so
669
+
670
+ Component
671
+
672
+ CUDA Sparse Matrix Library
673
+
674
+ Windows
675
+
676
+ cusparse.dll, cusparse.lib
677
+
678
+ Mac OSX
679
+
680
+ libcusparse.dylib, libcusparse_static.a
681
+
682
+ Linux
683
+
684
+ libcusparse.so, libcusparse_static.a
685
+
686
+ Android
687
+
688
+ libcusparse.so, libcusparse_static.a
689
+
690
+ Component
691
+
692
+ CUDA Linear Solver Library
693
+
694
+ Windows
695
+
696
+ cusolver.dll, cusolver.lib
697
+
698
+ Mac OSX
699
+
700
+ libcusolver.dylib, libcusolver_static.a
701
+
702
+ Linux
703
+
704
+ libcusolver.so, libcusolver_static.a
705
+
706
+ Android
707
+
708
+ libcusolver.so, libcusolver_static.a
709
+
710
+ Component
711
+
712
+ CUDA Random Number Generation Library
713
+
714
+ Windows
715
+
716
+ curand.dll, curand.lib
717
+
718
+ Mac OSX
719
+
720
+ libcurand.dylib, libcurand_static.a
721
+
722
+ Linux
723
+
724
+ libcurand.so, libcurand_static.a
725
+
726
+ Android
727
+
728
+ libcurand.so, libcurand_static.a
729
+
730
+ Component
731
+
732
+ CUDA Accelerated Graph Library
733
+
734
+ Component
735
+
736
+ NVIDIA Performance Primitives Library
737
+
738
+ Windows
739
+
740
+ nppc.dll, nppc.lib, nppial.dll, nppial.lib, nppicc.dll,
741
+ nppicc.lib, nppicom.dll, nppicom.lib, nppidei.dll,
742
+ nppidei.lib, nppif.dll, nppif.lib, nppig.dll, nppig.lib,
743
+ nppim.dll, nppim.lib, nppist.dll, nppist.lib, nppisu.dll,
744
+ nppisu.lib, nppitc.dll, nppitc.lib, npps.dll, npps.lib
745
+
746
+ Mac OSX
747
+
748
+ libnppc.dylib, libnppc_static.a, libnppial.dylib,
749
+ libnppial_static.a, libnppicc.dylib, libnppicc_static.a,
750
+ libnppicom.dylib, libnppicom_static.a, libnppidei.dylib,
751
+ libnppidei_static.a, libnppif.dylib, libnppif_static.a,
752
+ libnppig.dylib, libnppig_static.a, libnppim.dylib,
753
+ libnppisu_static.a, libnppitc.dylib, libnppitc_static.a,
754
+ libnpps.dylib, libnpps_static.a
755
+
756
+ Linux
757
+
758
+ libnppc.so, libnppc_static.a, libnppial.so,
759
+ libnppial_static.a, libnppicc.so, libnppicc_static.a,
760
+ libnppicom.so, libnppicom_static.a, libnppidei.so,
761
+ libnppidei_static.a, libnppif.so, libnppif_static.a
762
+ libnppig.so, libnppig_static.a, libnppim.so,
763
+ libnppim_static.a, libnppist.so, libnppist_static.a,
764
+ libnppisu.so, libnppisu_static.a, libnppitc.so
765
+ libnppitc_static.a, libnpps.so, libnpps_static.a
766
+
767
+ Android
768
+
769
+ libnppc.so, libnppc_static.a, libnppial.so,
770
+ libnppial_static.a, libnppicc.so, libnppicc_static.a,
771
+ libnppicom.so, libnppicom_static.a, libnppidei.so,
772
+ libnppidei_static.a, libnppif.so, libnppif_static.a
773
+ libnppig.so, libnppig_static.a, libnppim.so,
774
+ libnppim_static.a, libnppist.so, libnppist_static.a,
775
+ libnppisu.so, libnppisu_static.a, libnppitc.so
776
+ libnppitc_static.a, libnpps.so, libnpps_static.a
777
+
778
+ Component
779
+
780
+ NVIDIA JPEG Library
781
+
782
+ Linux
783
+
784
+ libnvjpeg.so, libnvjpeg_static.a
785
+
786
+ Component
787
+
788
+ Internal common library required for statically linking to
789
+ cuBLAS, cuSPARSE, cuFFT, cuRAND, nvJPEG and NPP
790
+
791
+ Mac OSX
792
+
793
+ libculibos.a
794
+
795
+ Linux
796
+
797
+ libculibos.a
798
+
799
+ Component
800
+
801
+ NVIDIA Runtime Compilation Library and Header
802
+
803
+ All
804
+
805
+ nvrtc.h
806
+
807
+ Windows
808
+
809
+ nvrtc.dll, nvrtc-builtins.dll
810
+
811
+ Mac OSX
812
+
813
+ libnvrtc.dylib, libnvrtc-builtins.dylib
814
+
815
+ Linux
816
+
817
+ libnvrtc.so, libnvrtc-builtins.so
818
+
819
+ Component
820
+
821
+ NVIDIA Optimizing Compiler Library
822
+
823
+ Windows
824
+
825
+ nvvm.dll
826
+
827
+ Mac OSX
828
+
829
+ libnvvm.dylib
830
+
831
+ Linux
832
+
833
+ libnvvm.so
834
+
835
+ Component
836
+
837
+ NVIDIA Common Device Math Functions Library
838
+
839
+ Windows
840
+
841
+ libdevice.10.bc
842
+
843
+ Mac OSX
844
+
845
+ libdevice.10.bc
846
+
847
+ Linux
848
+
849
+ libdevice.10.bc
850
+
851
+ Component
852
+
853
+ CUDA Occupancy Calculation Header Library
854
+
855
+ All
856
+
857
+ cuda_occupancy.h
858
+
859
+ Component
860
+
861
+ CUDA Half Precision Headers
862
+
863
+ All
864
+
865
+ cuda_fp16.h, cuda_fp16.hpp
866
+
867
+ Component
868
+
869
+ CUDA Profiling Tools Interface (CUPTI) Library
870
+
871
+ Windows
872
+
873
+ cupti.dll
874
+
875
+ Mac OSX
876
+
877
+ libcupti.dylib
878
+
879
+ Linux
880
+
881
+ libcupti.so
882
+
883
+ Component
884
+
885
+ NVIDIA Tools Extension Library
886
+
887
+ Windows
888
+
889
+ nvToolsExt.dll, nvToolsExt.lib
890
+
891
+ Mac OSX
892
+
893
+ libnvToolsExt.dylib
894
+
895
+ Linux
896
+
897
+ libnvToolsExt.so
898
+
899
+ Component
900
+
901
+ NVIDIA CUDA Driver Libraries
902
+
903
+ Linux
904
+
905
+ libcuda.so, libnvidia-fatbinaryloader.so,
906
+ libnvidia-ptxjitcompiler.so
907
+
908
+ The NVIDIA CUDA Driver Libraries are only distributable in
909
+ applications that meet this criteria:
910
+
911
+ 1. The application was developed starting from a NVIDIA CUDA
912
+ container obtained from Docker Hub or the NVIDIA GPU
913
+ Cloud, and
914
+
915
+ 2. The resulting application is packaged as a Docker
916
+ container and distributed to users on Docker Hub or the
917
+ NVIDIA GPU Cloud only.
918
+
919
+
920
+ 2.7. Attachment B
921
+
922
+
923
+ Additional Licensing Obligations
924
+
925
+ The following third party components included in the SOFTWARE
926
+ are licensed to Licensee pursuant to the following terms and
927
+ conditions:
928
+
929
+ 1. Licensee's use of the GDB third party component is
930
+ subject to the terms and conditions of GNU GPL v3:
931
+
932
+ This product includes copyrighted third-party software licensed
933
+ under the terms of the GNU General Public License v3 ("GPL v3").
934
+ All third-party software packages are copyright by their respective
935
+ authors. GPL v3 terms and conditions are hereby incorporated into
936
+ the Agreement by this reference: http://www.gnu.org/licenses/gpl.txt
937
+
938
+ Consistent with these licensing requirements, the software
939
+ listed below is provided under the terms of the specified
940
+ open source software licenses. To obtain source code for
941
+ software provided under licenses that require
942
+ redistribution of source code, including the GNU General
943
+ Public License (GPL) and GNU Lesser General Public License
944
+ (LGPL), contact [email protected]. This offer is
945
+ valid for a period of three (3) years from the date of the
946
+ distribution of this product by NVIDIA CORPORATION.
947
+
948
+ Component License
949
+ CUDA-GDB GPL v3
950
+
951
+ 2. Licensee represents and warrants that any and all third
952
+ party licensing and/or royalty payment obligations in
953
+ connection with Licensee's use of the H.264 video codecs
954
+ are solely the responsibility of Licensee.
955
+
956
+ 3. Licensee's use of the Thrust library is subject to the
957
+ terms and conditions of the Apache License Version 2.0.
958
+ All third-party software packages are copyright by their
959
+ respective authors. Apache License Version 2.0 terms and
960
+ conditions are hereby incorporated into the Agreement by
961
+ this reference.
962
+ http://www.apache.org/licenses/LICENSE-2.0.html
963
+
964
+ In addition, Licensee acknowledges the following notice:
965
+ Thrust includes source code from the Boost Iterator,
966
+ Tuple, System, and Random Number libraries.
967
+
968
+ Boost Software License - Version 1.0 - August 17th, 2003
969
+ . . . .
970
+
971
+ Permission is hereby granted, free of charge, to any person or
972
+ organization obtaining a copy of the software and accompanying
973
+ documentation covered by this license (the "Software") to use,
974
+ reproduce, display, distribute, execute, and transmit the Software,
975
+ and to prepare derivative works of the Software, and to permit
976
+ third-parties to whom the Software is furnished to do so, all
977
+ subject to the following:
978
+
979
+ The copyright notices in the Software and this entire statement,
980
+ including the above license grant, this restriction and the following
981
+ disclaimer, must be included in all copies of the Software, in whole
982
+ or in part, and all derivative works of the Software, unless such
983
+ copies or derivative works are solely in the form of machine-executable
984
+ object code generated by a source language processor.
985
+
986
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
987
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
988
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND
989
+ NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR
990
+ ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR
991
+ OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING
992
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
993
+ OTHER DEALINGS IN THE SOFTWARE.
994
+
995
+ 4. Licensee's use of the LLVM third party component is
996
+ subject to the following terms and conditions:
997
+
998
+ ======================================================
999
+ LLVM Release License
1000
+ ======================================================
1001
+ University of Illinois/NCSA
1002
+ Open Source License
1003
+
1004
+ Copyright (c) 2003-2010 University of Illinois at Urbana-Champaign.
1005
+ All rights reserved.
1006
+
1007
+ Developed by:
1008
+
1009
+ LLVM Team
1010
+
1011
+ University of Illinois at Urbana-Champaign
1012
+
1013
+ http://llvm.org
1014
+
1015
+ Permission is hereby granted, free of charge, to any person obtaining a copy
1016
+ of this software and associated documentation files (the "Software"), to
1017
+ deal with the Software without restriction, including without limitation the
1018
+ rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
1019
+ sell copies of the Software, and to permit persons to whom the Software is
1020
+ furnished to do so, subject to the following conditions:
1021
+
1022
+ * Redistributions of source code must retain the above copyright notice,
1023
+ this list of conditions and the following disclaimers.
1024
+
1025
+ * Redistributions in binary form must reproduce the above copyright
1026
+ notice, this list of conditions and the following disclaimers in the
1027
+ documentation and/or other materials provided with the distribution.
1028
+
1029
+ * Neither the names of the LLVM Team, University of Illinois at Urbana-
1030
+ Champaign, nor the names of its contributors may be used to endorse or
1031
+ promote products derived from this Software without specific prior
1032
+ written permission.
1033
+
1034
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1035
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1036
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
1037
+ THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
1038
+ OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
1039
+ ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
1040
+ DEALINGS WITH THE SOFTWARE.
1041
+
1042
+ 5. Licensee's use (e.g. nvprof) of the PCRE third party
1043
+ component is subject to the following terms and
1044
+ conditions:
1045
+
1046
+ ------------
1047
+ PCRE LICENCE
1048
+ ------------
1049
+ PCRE is a library of functions to support regular expressions whose syntax
1050
+ and semantics are as close as possible to those of the Perl 5 language.
1051
+ Release 8 of PCRE is distributed under the terms of the "BSD" licence, as
1052
+ specified below. The documentation for PCRE, supplied in the "doc"
1053
+ directory, is distributed under the same terms as the software itself. The
1054
+ basic library functions are written in C and are freestanding. Also
1055
+ included in the distribution is a set of C++ wrapper functions, and a just-
1056
+ in-time compiler that can be used to optimize pattern matching. These are
1057
+ both optional features that can be omitted when the library is built.
1058
+
1059
+ THE BASIC LIBRARY FUNCTIONS
1060
+ ---------------------------
1061
+ Written by: Philip Hazel
1062
+ Email local part: ph10
1063
+ Email domain: cam.ac.uk
1064
+ University of Cambridge Computing Service,
1065
+ Cambridge, England.
1066
+ Copyright (c) 1997-2012 University of Cambridge
1067
+ All rights reserved.
1068
+
1069
+ PCRE JUST-IN-TIME COMPILATION SUPPORT
1070
+ -------------------------------------
1071
+ Written by: Zoltan Herczeg
1072
+ Email local part: hzmester
1073
+ Emain domain: freemail.hu
1074
+ Copyright(c) 2010-2012 Zoltan Herczeg
1075
+ All rights reserved.
1076
+
1077
+ STACK-LESS JUST-IN-TIME COMPILER
1078
+ --------------------------------
1079
+ Written by: Zoltan Herczeg
1080
+ Email local part: hzmester
1081
+ Emain domain: freemail.hu
1082
+ Copyright(c) 2009-2012 Zoltan Herczeg
1083
+ All rights reserved.
1084
+
1085
+ THE C++ WRAPPER FUNCTIONS
1086
+ -------------------------
1087
+ Contributed by: Google Inc.
1088
+ Copyright (c) 2007-2012, Google Inc.
1089
+ All rights reserved.
1090
+
1091
+ THE "BSD" LICENCE
1092
+ -----------------
1093
+ Redistribution and use in source and binary forms, with or without
1094
+ modification, are permitted provided that the following conditions are met:
1095
+
1096
+ * Redistributions of source code must retain the above copyright notice,
1097
+ this list of conditions and the following disclaimer.
1098
+
1099
+ * Redistributions in binary form must reproduce the above copyright
1100
+ notice, this list of conditions and the following disclaimer in the
1101
+ documentation and/or other materials provided with the distribution.
1102
+
1103
+ * Neither the name of the University of Cambridge nor the name of Google
1104
+ Inc. nor the names of their contributors may be used to endorse or
1105
+ promote products derived from this software without specific prior
1106
+ written permission.
1107
+
1108
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
1109
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1110
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1111
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
1112
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
1113
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
1114
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
1115
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
1116
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
1117
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1118
+ POSSIBILITY OF SUCH DAMAGE.
1119
+
1120
+ 6. Some of the cuBLAS library routines were written by or
1121
+ derived from code written by Vasily Volkov and are subject
1122
+ to the Modified Berkeley Software Distribution License as
1123
+ follows:
1124
+
1125
+ Copyright (c) 2007-2009, Regents of the University of California
1126
+
1127
+ All rights reserved.
1128
+
1129
+ Redistribution and use in source and binary forms, with or without
1130
+ modification, are permitted provided that the following conditions are
1131
+ met:
1132
+ * Redistributions of source code must retain the above copyright
1133
+ notice, this list of conditions and the following disclaimer.
1134
+ * Redistributions in binary form must reproduce the above
1135
+ copyright notice, this list of conditions and the following
1136
+ disclaimer in the documentation and/or other materials provided
1137
+ with the distribution.
1138
+ * Neither the name of the University of California, Berkeley nor
1139
+ the names of its contributors may be used to endorse or promote
1140
+ products derived from this software without specific prior
1141
+ written permission.
1142
+
1143
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR
1144
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1145
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1146
+ DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
1147
+ INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1148
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
1149
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
1150
+ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
1151
+ STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
1152
+ IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1153
+ POSSIBILITY OF SUCH DAMAGE.
1154
+
1155
+ 7. Some of the cuBLAS library routines were written by or
1156
+ derived from code written by Davide Barbieri and are
1157
+ subject to the Modified Berkeley Software Distribution
1158
+ License as follows:
1159
+
1160
+ Copyright (c) 2008-2009 Davide Barbieri @ University of Rome Tor Vergata.
1161
+
1162
+ All rights reserved.
1163
+
1164
+ Redistribution and use in source and binary forms, with or without
1165
+ modification, are permitted provided that the following conditions are
1166
+ met:
1167
+ * Redistributions of source code must retain the above copyright
1168
+ notice, this list of conditions and the following disclaimer.
1169
+ * Redistributions in binary form must reproduce the above
1170
+ copyright notice, this list of conditions and the following
1171
+ disclaimer in the documentation and/or other materials provided
1172
+ with the distribution.
1173
+ * The name of the author may not be used to endorse or promote
1174
+ products derived from this software without specific prior
1175
+ written permission.
1176
+
1177
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR
1178
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1179
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1180
+ DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
1181
+ INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1182
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
1183
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
1184
+ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
1185
+ STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
1186
+ IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1187
+ POSSIBILITY OF SUCH DAMAGE.
1188
+
1189
+ 8. Some of the cuBLAS library routines were derived from
1190
+ code developed by the University of Tennessee and are
1191
+ subject to the Modified Berkeley Software Distribution
1192
+ License as follows:
1193
+
1194
+ Copyright (c) 2010 The University of Tennessee.
1195
+
1196
+ All rights reserved.
1197
+
1198
+ Redistribution and use in source and binary forms, with or without
1199
+ modification, are permitted provided that the following conditions are
1200
+ met:
1201
+ * Redistributions of source code must retain the above copyright
1202
+ notice, this list of conditions and the following disclaimer.
1203
+ * Redistributions in binary form must reproduce the above
1204
+ copyright notice, this list of conditions and the following
1205
+ disclaimer listed in this license in the documentation and/or
1206
+ other materials provided with the distribution.
1207
+ * Neither the name of the copyright holders nor the names of its
1208
+ contributors may be used to endorse or promote products derived
1209
+ from this software without specific prior written permission.
1210
+
1211
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1212
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1213
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1214
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1215
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1216
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1217
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1218
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1219
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1220
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1221
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1222
+
1223
+ 9. Some of the cuBLAS library routines were written by or
1224
+ derived from code written by Jonathan Hogg and are subject
1225
+ to the Modified Berkeley Software Distribution License as
1226
+ follows:
1227
+
1228
+ Copyright (c) 2012, The Science and Technology Facilities Council (STFC).
1229
+
1230
+ All rights reserved.
1231
+
1232
+ Redistribution and use in source and binary forms, with or without
1233
+ modification, are permitted provided that the following conditions are
1234
+ met:
1235
+ * Redistributions of source code must retain the above copyright
1236
+ notice, this list of conditions and the following disclaimer.
1237
+ * Redistributions in binary form must reproduce the above
1238
+ copyright notice, this list of conditions and the following
1239
+ disclaimer in the documentation and/or other materials provided
1240
+ with the distribution.
1241
+ * Neither the name of the STFC nor the names of its contributors
1242
+ may be used to endorse or promote products derived from this
1243
+ software without specific prior written permission.
1244
+
1245
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1246
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1247
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1248
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE STFC BE
1249
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
1250
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
1251
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
1252
+ BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
1253
+ WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
1254
+ OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
1255
+ IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1256
+
1257
+ 10. Some of the cuBLAS library routines were written by or
1258
+ derived from code written by Ahmad M. Abdelfattah, David
1259
+ Keyes, and Hatem Ltaief, and are subject to the Apache
1260
+ License, Version 2.0, as follows:
1261
+
1262
+ -- (C) Copyright 2013 King Abdullah University of Science and Technology
1263
+ Authors:
1264
+ Ahmad Abdelfattah ([email protected])
1265
+ David Keyes ([email protected])
1266
+ Hatem Ltaief ([email protected])
1267
+
1268
+ Redistribution and use in source and binary forms, with or without
1269
+ modification, are permitted provided that the following conditions
1270
+ are met:
1271
+
1272
+ * Redistributions of source code must retain the above copyright
1273
+ notice, this list of conditions and the following disclaimer.
1274
+ * Redistributions in binary form must reproduce the above copyright
1275
+ notice, this list of conditions and the following disclaimer in the
1276
+ documentation and/or other materials provided with the distribution.
1277
+ * Neither the name of the King Abdullah University of Science and
1278
+ Technology nor the names of its contributors may be used to endorse
1279
+ or promote products derived from this software without specific prior
1280
+ written permission.
1281
+
1282
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1283
+ ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1284
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1285
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1286
+ HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1287
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1288
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1289
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1290
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1291
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1292
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
1293
+
1294
+ 11. Some of the cuSPARSE library routines were written by or
1295
+ derived from code written by Li-Wen Chang and are subject
1296
+ to the NCSA Open Source License as follows:
1297
+
1298
+ Copyright (c) 2012, University of Illinois.
1299
+
1300
+ All rights reserved.
1301
+
1302
+ Developed by: IMPACT Group, University of Illinois, http://impact.crhc.illinois.edu
1303
+
1304
+ Permission is hereby granted, free of charge, to any person obtaining
1305
+ a copy of this software and associated documentation files (the
1306
+ "Software"), to deal with the Software without restriction, including
1307
+ without limitation the rights to use, copy, modify, merge, publish,
1308
+ distribute, sublicense, and/or sell copies of the Software, and to
1309
+ permit persons to whom the Software is furnished to do so, subject to
1310
+ the following conditions:
1311
+ * Redistributions of source code must retain the above copyright
1312
+ notice, this list of conditions and the following disclaimer.
1313
+ * Redistributions in binary form must reproduce the above
1314
+ copyright notice, this list of conditions and the following
1315
+ disclaimers in the documentation and/or other materials provided
1316
+ with the distribution.
1317
+ * Neither the names of IMPACT Group, University of Illinois, nor
1318
+ the names of its contributors may be used to endorse or promote
1319
+ products derived from this Software without specific prior
1320
+ written permission.
1321
+
1322
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
1323
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
1324
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
1325
+ NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT
1326
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
1327
+ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
1328
+ IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
1329
+ SOFTWARE.
1330
+
1331
+ 12. Some of the cuRAND library routines were written by or
1332
+ derived from code written by Mutsuo Saito and Makoto
1333
+ Matsumoto and are subject to the following license:
1334
+
1335
+ Copyright (c) 2009, 2010 Mutsuo Saito, Makoto Matsumoto and Hiroshima
1336
+ University. All rights reserved.
1337
+
1338
+ Copyright (c) 2011 Mutsuo Saito, Makoto Matsumoto, Hiroshima
1339
+ University and University of Tokyo. All rights reserved.
1340
+
1341
+ Redistribution and use in source and binary forms, with or without
1342
+ modification, are permitted provided that the following conditions are
1343
+ met:
1344
+ * Redistributions of source code must retain the above copyright
1345
+ notice, this list of conditions and the following disclaimer.
1346
+ * Redistributions in binary form must reproduce the above
1347
+ copyright notice, this list of conditions and the following
1348
+ disclaimer in the documentation and/or other materials provided
1349
+ with the distribution.
1350
+ * Neither the name of the Hiroshima University nor the names of
1351
+ its contributors may be used to endorse or promote products
1352
+ derived from this software without specific prior written
1353
+ permission.
1354
+
1355
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1356
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1357
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1358
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1359
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1360
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1361
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1362
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1363
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1364
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1365
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1366
+
1367
+ 13. Some of the cuRAND library routines were derived from
1368
+ code developed by D. E. Shaw Research and are subject to
1369
+ the following license:
1370
+
1371
+ Copyright 2010-2011, D. E. Shaw Research.
1372
+
1373
+ All rights reserved.
1374
+
1375
+ Redistribution and use in source and binary forms, with or without
1376
+ modification, are permitted provided that the following conditions are
1377
+ met:
1378
+ * Redistributions of source code must retain the above copyright
1379
+ notice, this list of conditions, and the following disclaimer.
1380
+ * Redistributions in binary form must reproduce the above
1381
+ copyright notice, this list of conditions, and the following
1382
+ disclaimer in the documentation and/or other materials provided
1383
+ with the distribution.
1384
+ * Neither the name of D. E. Shaw Research nor the names of its
1385
+ contributors may be used to endorse or promote products derived
1386
+ from this software without specific prior written permission.
1387
+
1388
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1389
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1390
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1391
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1392
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1393
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1394
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1395
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1396
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1397
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1398
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1399
+
1400
+ 14. Some of the Math library routines were written by or
1401
+ derived from code developed by Norbert Juffa and are
1402
+ subject to the following license:
1403
+
1404
+ Copyright (c) 2015-2017, Norbert Juffa
1405
+ All rights reserved.
1406
+
1407
+ Redistribution and use in source and binary forms, with or without
1408
+ modification, are permitted provided that the following conditions
1409
+ are met:
1410
+
1411
+ 1. Redistributions of source code must retain the above copyright
1412
+ notice, this list of conditions and the following disclaimer.
1413
+
1414
+ 2. Redistributions in binary form must reproduce the above copyright
1415
+ notice, this list of conditions and the following disclaimer in the
1416
+ documentation and/or other materials provided with the distribution.
1417
+
1418
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1419
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1420
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1421
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1422
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1423
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1424
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1425
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1426
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1427
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1428
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1429
+
1430
+ 15. Licensee's use of the lz4 third party component is
1431
+ subject to the following terms and conditions:
1432
+
1433
+ Copyright (C) 2011-2013, Yann Collet.
1434
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
1435
+
1436
+ Redistribution and use in source and binary forms, with or without
1437
+ modification, are permitted provided that the following conditions are
1438
+ met:
1439
+
1440
+ * Redistributions of source code must retain the above copyright
1441
+ notice, this list of conditions and the following disclaimer.
1442
+ * Redistributions in binary form must reproduce the above
1443
+ copyright notice, this list of conditions and the following disclaimer
1444
+ in the documentation and/or other materials provided with the
1445
+ distribution.
1446
+
1447
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1448
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1449
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1450
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1451
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1452
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1453
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1454
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1455
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1456
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1457
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1458
+
1459
+ 16. The NPP library uses code from the Boost Math Toolkit,
1460
+ and is subject to the following license:
1461
+
1462
+ Boost Software License - Version 1.0 - August 17th, 2003
1463
+ . . . .
1464
+
1465
+ Permission is hereby granted, free of charge, to any person or
1466
+ organization obtaining a copy of the software and accompanying
1467
+ documentation covered by this license (the "Software") to use,
1468
+ reproduce, display, distribute, execute, and transmit the Software,
1469
+ and to prepare derivative works of the Software, and to permit
1470
+ third-parties to whom the Software is furnished to do so, all
1471
+ subject to the following:
1472
+
1473
+ The copyright notices in the Software and this entire statement,
1474
+ including the above license grant, this restriction and the following
1475
+ disclaimer, must be included in all copies of the Software, in whole
1476
+ or in part, and all derivative works of the Software, unless such
1477
+ copies or derivative works are solely in the form of machine-executable
1478
+ object code generated by a source language processor.
1479
+
1480
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
1481
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
1482
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND
1483
+ NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR
1484
+ ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR
1485
+ OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING
1486
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
1487
+ OTHER DEALINGS IN THE SOFTWARE.
1488
+
1489
+ 17. Portions of the Nsight Eclipse Edition is subject to the
1490
+ following license:
1491
+
1492
+ The Eclipse Foundation makes available all content in this plug-in
1493
+ ("Content"). Unless otherwise indicated below, the Content is provided
1494
+ to you under the terms and conditions of the Eclipse Public License
1495
+ Version 1.0 ("EPL"). A copy of the EPL is available at http://
1496
+ www.eclipse.org/legal/epl-v10.html. For purposes of the EPL, "Program"
1497
+ will mean the Content.
1498
+
1499
+ If you did not receive this Content directly from the Eclipse
1500
+ Foundation, the Content is being redistributed by another party
1501
+ ("Redistributor") and different terms and conditions may apply to your
1502
+ use of any object code in the Content. Check the Redistributor's
1503
+ license that was provided with the Content. If no such license exists,
1504
+ contact the Redistributor. Unless otherwise indicated below, the terms
1505
+ and conditions of the EPL still apply to any source code in the
1506
+ Content and such source code may be obtained at http://www.eclipse.org.
1507
+
1508
+ 18. Some of the cuBLAS library routines uses code from
1509
+ OpenAI, which is subject to the following license:
1510
+
1511
+ License URL
1512
+ https://github.com/openai/openai-gemm/blob/master/LICENSE
1513
+
1514
+ License Text
1515
+ The MIT License
1516
+
1517
+ Copyright (c) 2016 OpenAI (http://openai.com), 2016 Google Inc.
1518
+
1519
+ Permission is hereby granted, free of charge, to any person obtaining a copy
1520
+ of this software and associated documentation files (the "Software"), to deal
1521
+ in the Software without restriction, including without limitation the rights
1522
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
1523
+ copies of the Software, and to permit persons to whom the Software is
1524
+ furnished to do so, subject to the following conditions:
1525
+
1526
+ The above copyright notice and this permission notice shall be included in
1527
+ all copies or substantial portions of the Software.
1528
+
1529
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1530
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1531
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
1532
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1533
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
1534
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
1535
+ THE SOFTWARE.
1536
+
1537
+ 19. Licensee's use of the Visual Studio Setup Configuration
1538
+ Samples is subject to the following license:
1539
+
1540
+ The MIT License (MIT)
1541
+ Copyright (C) Microsoft Corporation. All rights reserved.
1542
+
1543
+ Permission is hereby granted, free of charge, to any person
1544
+ obtaining a copy of this software and associated documentation
1545
+ files (the "Software"), to deal in the Software without restriction,
1546
+ including without limitation the rights to use, copy, modify, merge,
1547
+ publish, distribute, sublicense, and/or sell copies of the Software,
1548
+ and to permit persons to whom the Software is furnished to do so,
1549
+ subject to the following conditions:
1550
+
1551
+ The above copyright notice and this permission notice shall be included
1552
+ in all copies or substantial portions of the Software.
1553
+
1554
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
1555
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1556
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
1557
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1558
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
1559
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
1560
+
1561
+ 20. Licensee's use of linmath.h header for CPU functions for
1562
+ GL vector/matrix operations from lunarG is subject to the
1563
+ Apache License Version 2.0.
1564
+
1565
+ 21. The DX12-CUDA sample uses the d3dx12.h header, which is
1566
+ subject to the MIT license .
1567
+
1568
+ -----------------
venv/lib/python3.10/site-packages/nvidia_cuda_cupti_cu12-12.1.105.dist-info/METADATA ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: nvidia-cuda-cupti-cu12
3
+ Version: 12.1.105
4
+ Summary: CUDA profiling tools runtime libs.
5
+ Home-page: https://developer.nvidia.com/cuda-zone
6
+ Author: Nvidia CUDA Installer Team
7
+ Author-email: [email protected]
8
+ License: NVIDIA Proprietary Software
9
+ Keywords: cuda,nvidia,runtime,machine learning,deep learning
10
+ Classifier: Development Status :: 4 - Beta
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: Intended Audience :: Education
13
+ Classifier: Intended Audience :: Science/Research
14
+ Classifier: License :: Other/Proprietary License
15
+ Classifier: Natural Language :: English
16
+ Classifier: Programming Language :: Python :: 3
17
+ Classifier: Programming Language :: Python :: 3.5
18
+ Classifier: Programming Language :: Python :: 3.6
19
+ Classifier: Programming Language :: Python :: 3.7
20
+ Classifier: Programming Language :: Python :: 3.8
21
+ Classifier: Programming Language :: Python :: 3.9
22
+ Classifier: Programming Language :: Python :: 3.10
23
+ Classifier: Programming Language :: Python :: 3.11
24
+ Classifier: Programming Language :: Python :: 3 :: Only
25
+ Classifier: Topic :: Scientific/Engineering
26
+ Classifier: Topic :: Scientific/Engineering :: Mathematics
27
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
28
+ Classifier: Topic :: Software Development
29
+ Classifier: Topic :: Software Development :: Libraries
30
+ Classifier: Operating System :: Microsoft :: Windows
31
+ Classifier: Operating System :: POSIX :: Linux
32
+ Requires-Python: >=3
33
+ License-File: License.txt
34
+
35
+ Provides libraries to enable third party tools using GPU profiling APIs.
venv/lib/python3.10/site-packages/nvidia_cuda_cupti_cu12-12.1.105.dist-info/RECORD ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ nvidia/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ nvidia/__pycache__/__init__.cpython-310.pyc,,
3
+ nvidia/cuda_cupti/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ nvidia/cuda_cupti/__pycache__/__init__.cpython-310.pyc,,
5
+ nvidia/cuda_cupti/include/Openacc/cupti_openacc.h,sha256=Z0OM5e_hbd3cxdXyn3SCHqBBQawLg4QORnlm57Cr2-M,3513
6
+ nvidia/cuda_cupti/include/Openmp/cupti_openmp.h,sha256=E1WNmeb_7HaUSmBegtUNe4IV1i7pXeNxgzIlyKn1zrM,3491
7
+ nvidia/cuda_cupti/include/Openmp/omp-tools.h,sha256=AmuC_xPC7VPu3B-W4PmXuCNufFawhY8PjNXePaQFAOg,37403
8
+ nvidia/cuda_cupti/include/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
+ nvidia/cuda_cupti/include/__pycache__/__init__.cpython-310.pyc,,
10
+ nvidia/cuda_cupti/include/cuda_stdint.h,sha256=XbFOk9CtJjKqk7PpYNqbSVsDxAsVM8avA4rWpPi0BjQ,4093
11
+ nvidia/cuda_cupti/include/cupti.h,sha256=JkVyAGTIMYzwm62dfVqas3nMcILhgP_Wdz6fh4_NED0,4697
12
+ nvidia/cuda_cupti/include/cupti_activity.h,sha256=qVVazvOJZbDMzvbqgS8nmaHN4gaxAWO2HA_8D7-Vpiw,311866
13
+ nvidia/cuda_cupti/include/cupti_callbacks.h,sha256=-a47AoM4HoU5IuCCB_L-6lZRdrkDAC4XXLJuoUqojeY,26587
14
+ nvidia/cuda_cupti/include/cupti_checkpoint.h,sha256=rTz8JoWxqESBXyZWUhZJGm4xeYcx4OJOtJ7Ld13T_b0,5264
15
+ nvidia/cuda_cupti/include/cupti_driver_cbid.h,sha256=Uc74JDlJN_3qI04l4gkGzYbB3Ki0l0IgZILZO0WXtVs,70346
16
+ nvidia/cuda_cupti/include/cupti_events.h,sha256=oHIOKSsE5ZAot5tZK-sbS2K9xcgiXBXTZZDkPQuiaNw,52639
17
+ nvidia/cuda_cupti/include/cupti_metrics.h,sha256=iLAOlDrcbHEsIIUmgq0Tp1ZOY9O3Ot3wj2-bI8iYbSs,32148
18
+ nvidia/cuda_cupti/include/cupti_nvtx_cbid.h,sha256=_azPtR1g4qivvX7qbvHRUg0RHCWF7iEOJyHMN9qZe9E,5912
19
+ nvidia/cuda_cupti/include/cupti_pcsampling.h,sha256=uT_DtFN0Bye6ADtxfKXUAc8BcrFefotf-VtTuKQGJx0,32395
20
+ nvidia/cuda_cupti/include/cupti_pcsampling_util.h,sha256=gEiMBes3mtpDJqauxqUtfe0csY4J31qpdg2Cp8On95E,13060
21
+ nvidia/cuda_cupti/include/cupti_profiler_target.h,sha256=LWNFuYyotgGhCKY7TS48uVGxjeuOAuANWSnB8yfOfvo,31596
22
+ nvidia/cuda_cupti/include/cupti_result.h,sha256=sOBZCRuRVHvcbIyDlzyLeina5YXwIQH21rVr3FPoB6M,12026
23
+ nvidia/cuda_cupti/include/cupti_runtime_cbid.h,sha256=ZpomdRK7Fhn_NZYiiq5b3AyNZX3gznot-aX4dk-tsZI,44182
24
+ nvidia/cuda_cupti/include/cupti_target.h,sha256=x4Vz1Upb6m9ixmVpmGaKQldDWYQI3OZ-ocEXGzNK0EE,1263
25
+ nvidia/cuda_cupti/include/cupti_version.h,sha256=7XDJSIWpeJU8lrp0cOyma7dXXSGK4bdT1G8akxu8D_Q,4344
26
+ nvidia/cuda_cupti/include/generated_cudaGL_meta.h,sha256=dfd2QuaRdEjbStOKvaQLi1Md_qrpRQh8PfyZznJ8bWY,3115
27
+ nvidia/cuda_cupti/include/generated_cudaVDPAU_meta.h,sha256=fAedsoQxaU3hIAApAWDOKsa9kgcuQw4tdyf8klLm-3k,1453
28
+ nvidia/cuda_cupti/include/generated_cuda_gl_interop_meta.h,sha256=LXOqvQCej0sCgAT1LUKKYZ466EFxN4hIwf9oIhXOLF0,2250
29
+ nvidia/cuda_cupti/include/generated_cuda_meta.h,sha256=qZhsMxL-CURycqC2YkkioSDiD5pA8q22GOje2bOeviU,87152
30
+ nvidia/cuda_cupti/include/generated_cuda_runtime_api_meta.h,sha256=YCkUMRP93XtDGLEH7DOJCUuhdRcVsO1vQwF_K9AuDfI,64332
31
+ nvidia/cuda_cupti/include/generated_cuda_vdpau_interop_meta.h,sha256=8OLqWN26aEYpTWUXtbHJvA5GYhVv3ybYVOTW7yK37z8,1367
32
+ nvidia/cuda_cupti/include/generated_cudart_removed_meta.h,sha256=X3I5WXmhtsJNNlgY7coJ5vg4t11G5FRR6Xo7MboIeck,5172
33
+ nvidia/cuda_cupti/include/generated_nvtx_meta.h,sha256=YHb_RD8g3s4m8PJn7Z0wnxvUHarl7BOAX5ADr-BL3HI,7513
34
+ nvidia/cuda_cupti/include/nvperf_common.h,sha256=MMZrDvDdtG2DSS0h2B8AR1aPyt6UmeWwH-Dc_XsxaHo,10422
35
+ nvidia/cuda_cupti/include/nvperf_cuda_host.h,sha256=xEapxwvdl96uV-On-c8LY2lvwVNfIjq-rAgj9_dYbqo,8299
36
+ nvidia/cuda_cupti/include/nvperf_host.h,sha256=3mcgAEbB9uaDfWheRqC8gLlTiTggc_auV8PE9dTShx4,66289
37
+ nvidia/cuda_cupti/include/nvperf_target.h,sha256=jVR2zEO2KmMta0C-qTGuS9V6rhVyMNnRnOU4QJSiPrc,21476
38
+ nvidia/cuda_cupti/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
39
+ nvidia/cuda_cupti/lib/__pycache__/__init__.cpython-310.pyc,,
40
+ nvidia/cuda_cupti/lib/libcheckpoint.so,sha256=Fib_EZWCvKRmBbxtSXaat1MUuZk91ke9ZKkN7HR7yEM,1534104
41
+ nvidia/cuda_cupti/lib/libcupti.so.12,sha256=q8YxAOnPUWuO0folNUrlPb_o30g4rFJdjXODMsIZjcI,7419504
42
+ nvidia/cuda_cupti/lib/libnvperf_host.so,sha256=lc7EKudwwfIlHSBLA-EtVv2y5VYeSJjAe0A4L-JHRYk,28636664
43
+ nvidia/cuda_cupti/lib/libnvperf_target.so,sha256=-iWHyNIR-8hei4jMoLzr54yMxAyBsMN2POV6yeY_Bmk,5895416
44
+ nvidia/cuda_cupti/lib/libpcsamplingutil.so,sha256=XGCctMdV5khc1HtLdK_imh8aepM88GJz0q6CcPJtb3k,912728
45
+ nvidia_cuda_cupti_cu12-12.1.105.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
46
+ nvidia_cuda_cupti_cu12-12.1.105.dist-info/License.txt,sha256=rW9YU_ugyg0VnQ9Y1JrkmDDC-Mk_epJki5zpCttMbM0,59262
47
+ nvidia_cuda_cupti_cu12-12.1.105.dist-info/METADATA,sha256=xrOx7eliZP6--5Pla2AJW0e8XI3H0XDb9ZEN7DXghPs,1553
48
+ nvidia_cuda_cupti_cu12-12.1.105.dist-info/RECORD,,
49
+ nvidia_cuda_cupti_cu12-12.1.105.dist-info/WHEEL,sha256=-kQi_VMfvRQozZJT7HUPMfY-5vLo0LVTmAylNJ3Ft98,106
50
+ nvidia_cuda_cupti_cu12-12.1.105.dist-info/top_level.txt,sha256=fTkAtiFuL16nUrB9ytDDtpytz2t0B4NvYTnRzwAhO14,7
venv/lib/python3.10/site-packages/nvidia_cuda_cupti_cu12-12.1.105.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.37.1)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-manylinux1_x86_64
5
+
venv/lib/python3.10/site-packages/nvidia_cuda_cupti_cu12-12.1.105.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ nvidia
venv/lib/python3.10/site-packages/packaging-24.0.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
venv/lib/python3.10/site-packages/packaging-24.0.dist-info/LICENSE ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ This software is made available under the terms of *either* of the licenses
2
+ found in LICENSE.APACHE or LICENSE.BSD. Contributions to this software is made
3
+ under the terms of *both* these licenses.
venv/lib/python3.10/site-packages/packaging-24.0.dist-info/LICENSE.APACHE ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ Apache License
3
+ Version 2.0, January 2004
4
+ http://www.apache.org/licenses/
5
+
6
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7
+
8
+ 1. Definitions.
9
+
10
+ "License" shall mean the terms and conditions for use, reproduction,
11
+ and distribution as defined by Sections 1 through 9 of this document.
12
+
13
+ "Licensor" shall mean the copyright owner or entity authorized by
14
+ the copyright owner that is granting the License.
15
+
16
+ "Legal Entity" shall mean the union of the acting entity and all
17
+ other entities that control, are controlled by, or are under common
18
+ control with that entity. For the purposes of this definition,
19
+ "control" means (i) the power, direct or indirect, to cause the
20
+ direction or management of such entity, whether by contract or
21
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
22
+ outstanding shares, or (iii) beneficial ownership of such entity.
23
+
24
+ "You" (or "Your") shall mean an individual or Legal Entity
25
+ exercising permissions granted by this License.
26
+
27
+ "Source" form shall mean the preferred form for making modifications,
28
+ including but not limited to software source code, documentation
29
+ source, and configuration files.
30
+
31
+ "Object" form shall mean any form resulting from mechanical
32
+ transformation or translation of a Source form, including but
33
+ not limited to compiled object code, generated documentation,
34
+ and conversions to other media types.
35
+
36
+ "Work" shall mean the work of authorship, whether in Source or
37
+ Object form, made available under the License, as indicated by a
38
+ copyright notice that is included in or attached to the work
39
+ (an example is provided in the Appendix below).
40
+
41
+ "Derivative Works" shall mean any work, whether in Source or Object
42
+ form, that is based on (or derived from) the Work and for which the
43
+ editorial revisions, annotations, elaborations, or other modifications
44
+ represent, as a whole, an original work of authorship. For the purposes
45
+ of this License, Derivative Works shall not include works that remain
46
+ separable from, or merely link (or bind by name) to the interfaces of,
47
+ the Work and Derivative Works thereof.
48
+
49
+ "Contribution" shall mean any work of authorship, including
50
+ the original version of the Work and any modifications or additions
51
+ to that Work or Derivative Works thereof, that is intentionally
52
+ submitted to Licensor for inclusion in the Work by the copyright owner
53
+ or by an individual or Legal Entity authorized to submit on behalf of
54
+ the copyright owner. For the purposes of this definition, "submitted"
55
+ means any form of electronic, verbal, or written communication sent
56
+ to the Licensor or its representatives, including but not limited to
57
+ communication on electronic mailing lists, source code control systems,
58
+ and issue tracking systems that are managed by, or on behalf of, the
59
+ Licensor for the purpose of discussing and improving the Work, but
60
+ excluding communication that is conspicuously marked or otherwise
61
+ designated in writing by the copyright owner as "Not a Contribution."
62
+
63
+ "Contributor" shall mean Licensor and any individual or Legal Entity
64
+ on behalf of whom a Contribution has been received by Licensor and
65
+ subsequently incorporated within the Work.
66
+
67
+ 2. Grant of Copyright License. Subject to the terms and conditions of
68
+ this License, each Contributor hereby grants to You a perpetual,
69
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70
+ copyright license to reproduce, prepare Derivative Works of,
71
+ publicly display, publicly perform, sublicense, and distribute the
72
+ Work and such Derivative Works in Source or Object form.
73
+
74
+ 3. Grant of Patent License. Subject to the terms and conditions of
75
+ this License, each Contributor hereby grants to You a perpetual,
76
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77
+ (except as stated in this section) patent license to make, have made,
78
+ use, offer to sell, sell, import, and otherwise transfer the Work,
79
+ where such license applies only to those patent claims licensable
80
+ by such Contributor that are necessarily infringed by their
81
+ Contribution(s) alone or by combination of their Contribution(s)
82
+ with the Work to which such Contribution(s) was submitted. If You
83
+ institute patent litigation against any entity (including a
84
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
85
+ or a Contribution incorporated within the Work constitutes direct
86
+ or contributory patent infringement, then any patent licenses
87
+ granted to You under this License for that Work shall terminate
88
+ as of the date such litigation is filed.
89
+
90
+ 4. Redistribution. You may reproduce and distribute copies of the
91
+ Work or Derivative Works thereof in any medium, with or without
92
+ modifications, and in Source or Object form, provided that You
93
+ meet the following conditions:
94
+
95
+ (a) You must give any other recipients of the Work or
96
+ Derivative Works a copy of this License; and
97
+
98
+ (b) You must cause any modified files to carry prominent notices
99
+ stating that You changed the files; and
100
+
101
+ (c) You must retain, in the Source form of any Derivative Works
102
+ that You distribute, all copyright, patent, trademark, and
103
+ attribution notices from the Source form of the Work,
104
+ excluding those notices that do not pertain to any part of
105
+ the Derivative Works; and
106
+
107
+ (d) If the Work includes a "NOTICE" text file as part of its
108
+ distribution, then any Derivative Works that You distribute must
109
+ include a readable copy of the attribution notices contained
110
+ within such NOTICE file, excluding those notices that do not
111
+ pertain to any part of the Derivative Works, in at least one
112
+ of the following places: within a NOTICE text file distributed
113
+ as part of the Derivative Works; within the Source form or
114
+ documentation, if provided along with the Derivative Works; or,
115
+ within a display generated by the Derivative Works, if and
116
+ wherever such third-party notices normally appear. The contents
117
+ of the NOTICE file are for informational purposes only and
118
+ do not modify the License. You may add Your own attribution
119
+ notices within Derivative Works that You distribute, alongside
120
+ or as an addendum to the NOTICE text from the Work, provided
121
+ that such additional attribution notices cannot be construed
122
+ as modifying the License.
123
+
124
+ You may add Your own copyright statement to Your modifications and
125
+ may provide additional or different license terms and conditions
126
+ for use, reproduction, or distribution of Your modifications, or
127
+ for any such Derivative Works as a whole, provided Your use,
128
+ reproduction, and distribution of the Work otherwise complies with
129
+ the conditions stated in this License.
130
+
131
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
132
+ any Contribution intentionally submitted for inclusion in the Work
133
+ by You to the Licensor shall be under the terms and conditions of
134
+ this License, without any additional terms or conditions.
135
+ Notwithstanding the above, nothing herein shall supersede or modify
136
+ the terms of any separate license agreement you may have executed
137
+ with Licensor regarding such Contributions.
138
+
139
+ 6. Trademarks. This License does not grant permission to use the trade
140
+ names, trademarks, service marks, or product names of the Licensor,
141
+ except as required for reasonable and customary use in describing the
142
+ origin of the Work and reproducing the content of the NOTICE file.
143
+
144
+ 7. Disclaimer of Warranty. Unless required by applicable law or
145
+ agreed to in writing, Licensor provides the Work (and each
146
+ Contributor provides its Contributions) on an "AS IS" BASIS,
147
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148
+ implied, including, without limitation, any warranties or conditions
149
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150
+ PARTICULAR PURPOSE. You are solely responsible for determining the
151
+ appropriateness of using or redistributing the Work and assume any
152
+ risks associated with Your exercise of permissions under this License.
153
+
154
+ 8. Limitation of Liability. In no event and under no legal theory,
155
+ whether in tort (including negligence), contract, or otherwise,
156
+ unless required by applicable law (such as deliberate and grossly
157
+ negligent acts) or agreed to in writing, shall any Contributor be
158
+ liable to You for damages, including any direct, indirect, special,
159
+ incidental, or consequential damages of any character arising as a
160
+ result of this License or out of the use or inability to use the
161
+ Work (including but not limited to damages for loss of goodwill,
162
+ work stoppage, computer failure or malfunction, or any and all
163
+ other commercial damages or losses), even if such Contributor
164
+ has been advised of the possibility of such damages.
165
+
166
+ 9. Accepting Warranty or Additional Liability. While redistributing
167
+ the Work or Derivative Works thereof, You may choose to offer,
168
+ and charge a fee for, acceptance of support, warranty, indemnity,
169
+ or other liability obligations and/or rights consistent with this
170
+ License. However, in accepting such obligations, You may act only
171
+ on Your own behalf and on Your sole responsibility, not on behalf
172
+ of any other Contributor, and only if You agree to indemnify,
173
+ defend, and hold each Contributor harmless for any liability
174
+ incurred by, or claims asserted against, such Contributor by reason
175
+ of your accepting any such warranty or additional liability.
176
+
177
+ END OF TERMS AND CONDITIONS
venv/lib/python3.10/site-packages/packaging-24.0.dist-info/LICENSE.BSD ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (c) Donald Stufft and individual contributors.
2
+ All rights reserved.
3
+
4
+ Redistribution and use in source and binary forms, with or without
5
+ modification, are permitted provided that the following conditions are met:
6
+
7
+ 1. Redistributions of source code must retain the above copyright notice,
8
+ this list of conditions and the following disclaimer.
9
+
10
+ 2. Redistributions in binary form must reproduce the above copyright
11
+ notice, this list of conditions and the following disclaimer in the
12
+ documentation and/or other materials provided with the distribution.
13
+
14
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
15
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
16
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
18
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
20
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
21
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
22
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
venv/lib/python3.10/site-packages/packaging-24.0.dist-info/METADATA ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: packaging
3
+ Version: 24.0
4
+ Summary: Core utilities for Python packages
5
+ Author-email: Donald Stufft <[email protected]>
6
+ Requires-Python: >=3.7
7
+ Description-Content-Type: text/x-rst
8
+ Classifier: Development Status :: 5 - Production/Stable
9
+ Classifier: Intended Audience :: Developers
10
+ Classifier: License :: OSI Approved :: Apache Software License
11
+ Classifier: License :: OSI Approved :: BSD License
12
+ Classifier: Programming Language :: Python
13
+ Classifier: Programming Language :: Python :: 3
14
+ Classifier: Programming Language :: Python :: 3 :: Only
15
+ Classifier: Programming Language :: Python :: 3.7
16
+ Classifier: Programming Language :: Python :: 3.8
17
+ Classifier: Programming Language :: Python :: 3.9
18
+ Classifier: Programming Language :: Python :: 3.10
19
+ Classifier: Programming Language :: Python :: 3.11
20
+ Classifier: Programming Language :: Python :: 3.12
21
+ Classifier: Programming Language :: Python :: Implementation :: CPython
22
+ Classifier: Programming Language :: Python :: Implementation :: PyPy
23
+ Classifier: Typing :: Typed
24
+ Project-URL: Documentation, https://packaging.pypa.io/
25
+ Project-URL: Source, https://github.com/pypa/packaging
26
+
27
+ packaging
28
+ =========
29
+
30
+ .. start-intro
31
+
32
+ Reusable core utilities for various Python Packaging
33
+ `interoperability specifications <https://packaging.python.org/specifications/>`_.
34
+
35
+ This library provides utilities that implement the interoperability
36
+ specifications which have clearly one correct behaviour (eg: :pep:`440`)
37
+ or benefit greatly from having a single shared implementation (eg: :pep:`425`).
38
+
39
+ .. end-intro
40
+
41
+ The ``packaging`` project includes the following: version handling, specifiers,
42
+ markers, requirements, tags, utilities.
43
+
44
+ Documentation
45
+ -------------
46
+
47
+ The `documentation`_ provides information and the API for the following:
48
+
49
+ - Version Handling
50
+ - Specifiers
51
+ - Markers
52
+ - Requirements
53
+ - Tags
54
+ - Utilities
55
+
56
+ Installation
57
+ ------------
58
+
59
+ Use ``pip`` to install these utilities::
60
+
61
+ pip install packaging
62
+
63
+ The ``packaging`` library uses calendar-based versioning (``YY.N``).
64
+
65
+ Discussion
66
+ ----------
67
+
68
+ If you run into bugs, you can file them in our `issue tracker`_.
69
+
70
+ You can also join ``#pypa`` on Freenode to ask questions or get involved.
71
+
72
+
73
+ .. _`documentation`: https://packaging.pypa.io/
74
+ .. _`issue tracker`: https://github.com/pypa/packaging/issues
75
+
76
+
77
+ Code of Conduct
78
+ ---------------
79
+
80
+ Everyone interacting in the packaging project's codebases, issue trackers, chat
81
+ rooms, and mailing lists is expected to follow the `PSF Code of Conduct`_.
82
+
83
+ .. _PSF Code of Conduct: https://github.com/pypa/.github/blob/main/CODE_OF_CONDUCT.md
84
+
85
+ Contributing
86
+ ------------
87
+
88
+ The ``CONTRIBUTING.rst`` file outlines how to contribute to this project as
89
+ well as how to report a potential security issue. The documentation for this
90
+ project also covers information about `project development`_ and `security`_.
91
+
92
+ .. _`project development`: https://packaging.pypa.io/en/latest/development/
93
+ .. _`security`: https://packaging.pypa.io/en/latest/security/
94
+
95
+ Project History
96
+ ---------------
97
+
98
+ Please review the ``CHANGELOG.rst`` file or the `Changelog documentation`_ for
99
+ recent changes and project history.
100
+
101
+ .. _`Changelog documentation`: https://packaging.pypa.io/en/latest/changelog/
102
+
venv/lib/python3.10/site-packages/packaging-24.0.dist-info/RECORD ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ packaging-24.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ packaging-24.0.dist-info/LICENSE,sha256=ytHvW9NA1z4HS6YU0m996spceUDD2MNIUuZcSQlobEg,197
3
+ packaging-24.0.dist-info/LICENSE.APACHE,sha256=DVQuDIgE45qn836wDaWnYhSdxoLXgpRRKH4RuTjpRZQ,10174
4
+ packaging-24.0.dist-info/LICENSE.BSD,sha256=tw5-m3QvHMb5SLNMFqo5_-zpQZY2S8iP8NIYDwAo-sU,1344
5
+ packaging-24.0.dist-info/METADATA,sha256=0dESdhY_wHValuOrbgdebiEw04EbX4dkujlxPdEsFus,3203
6
+ packaging-24.0.dist-info/RECORD,,
7
+ packaging-24.0.dist-info/WHEEL,sha256=EZbGkh7Ie4PoZfRQ8I0ZuP9VklN_TvcZ6DSE5Uar4z4,81
8
+ packaging/__init__.py,sha256=UzotcV07p8vcJzd80S-W0srhgY8NMVD_XvJcZ7JN-tA,496
9
+ packaging/__pycache__/__init__.cpython-310.pyc,,
10
+ packaging/__pycache__/_elffile.cpython-310.pyc,,
11
+ packaging/__pycache__/_manylinux.cpython-310.pyc,,
12
+ packaging/__pycache__/_musllinux.cpython-310.pyc,,
13
+ packaging/__pycache__/_parser.cpython-310.pyc,,
14
+ packaging/__pycache__/_structures.cpython-310.pyc,,
15
+ packaging/__pycache__/_tokenizer.cpython-310.pyc,,
16
+ packaging/__pycache__/markers.cpython-310.pyc,,
17
+ packaging/__pycache__/metadata.cpython-310.pyc,,
18
+ packaging/__pycache__/requirements.cpython-310.pyc,,
19
+ packaging/__pycache__/specifiers.cpython-310.pyc,,
20
+ packaging/__pycache__/tags.cpython-310.pyc,,
21
+ packaging/__pycache__/utils.cpython-310.pyc,,
22
+ packaging/__pycache__/version.cpython-310.pyc,,
23
+ packaging/_elffile.py,sha256=hbmK8OD6Z7fY6hwinHEUcD1by7czkGiNYu7ShnFEk2k,3266
24
+ packaging/_manylinux.py,sha256=1ng_TqyH49hY6s3W_zVHyoJIaogbJqbIF1jJ0fAehc4,9590
25
+ packaging/_musllinux.py,sha256=kgmBGLFybpy8609-KTvzmt2zChCPWYvhp5BWP4JX7dE,2676
26
+ packaging/_parser.py,sha256=zlsFB1FpMRjkUdQb6WLq7xON52ruQadxFpYsDXWhLb4,10347
27
+ packaging/_structures.py,sha256=q3eVNmbWJGG_S0Dit_S3Ao8qQqz_5PYTXFAKBZe5yr4,1431
28
+ packaging/_tokenizer.py,sha256=alCtbwXhOFAmFGZ6BQ-wCTSFoRAJ2z-ysIf7__MTJ_k,5292
29
+ packaging/markers.py,sha256=eH-txS2zq1HdNpTd9LcZUcVIwewAiNU0grmq5wjKnOk,8208
30
+ packaging/metadata.py,sha256=w7jPEg6mDf1FTZMn79aFxFuk4SKtynUJtxr2InTxlV4,33036
31
+ packaging/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
32
+ packaging/requirements.py,sha256=dgoBeVprPu2YE6Q8nGfwOPTjATHbRa_ZGLyXhFEln6Q,2933
33
+ packaging/specifiers.py,sha256=dB2DwbmvSbEuVilEyiIQ382YfW5JfwzXTfRRPVtaENY,39784
34
+ packaging/tags.py,sha256=fedHXiOHkBxNZTXotXv8uXPmMFU9ae-TKBujgYHigcA,18950
35
+ packaging/utils.py,sha256=XgdmP3yx9-wQEFjO7OvMj9RjEf5JlR5HFFR69v7SQ9E,5268
36
+ packaging/version.py,sha256=XjRBLNK17UMDgLeP8UHnqwiY3TdSi03xFQURtec211A,16236
venv/lib/python3.10/site-packages/packaging-24.0.dist-info/WHEEL ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: flit 3.9.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/apis.cpython-310.pyc ADDED
Binary file (18.1 kB). View file
 
venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/config.cpython-310.pyc ADDED
Binary file (739 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/_functorch/__pycache__/deprecated.cpython-310.pyc ADDED
Binary file (4.47 kB). View file
 
venv/lib/python3.10/site-packages/torch/futures/__init__.py ADDED
@@ -0,0 +1,318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import cast, Callable, Generic, List, Optional, Type, TypeVar, Union
4
+
5
+ import torch
6
+
7
+ __all__ = ['Future', 'collect_all', 'wait_all']
8
+
9
+ T = TypeVar("T")
10
+ S = TypeVar("S")
11
+
12
+
13
+ class _PyFutureMeta(type(torch._C.Future), type(Generic)): # type: ignore[misc, no-redef]
14
+ pass
15
+
16
+
17
+ class Future(torch._C.Future, Generic[T], metaclass=_PyFutureMeta):
18
+ r"""
19
+ Wrapper around a ``torch._C.Future`` which encapsulates an asynchronous
20
+ execution of a callable, e.g. :meth:`~torch.distributed.rpc.rpc_async`. It
21
+ also exposes a set of APIs to add callback functions and set results.
22
+
23
+ .. warning:: GPU support is a beta feature, subject to changes.
24
+ """
25
+
26
+ def __init__(self, *, devices: Optional[List[Union[int, str, torch.device]]] = None):
27
+ r"""
28
+ Create an empty unset ``Future``. If the future is intended to hold
29
+ values containing CUDA tensors, (a superset of) their CUDA devices must
30
+ be specified at construction. (This is only supported if
31
+ ``torch.cuda.is_available()`` returns ``True``). This is needed to
32
+ ensure proper CUDA stream synchronization. The child futures, returned
33
+ by the ``then`` method, will inherit these devices.
34
+
35
+ Args:
36
+ devices(``List[Union[int, str, torch.device]]``, optional): the set
37
+ of devices on which tensors contained in this future's value are
38
+ allowed to reside and on which callbacks are allowed to operate.
39
+ """
40
+ if devices is None:
41
+ devices = []
42
+ super().__init__([torch.device(d) for d in devices])
43
+
44
+ def done(self) -> bool:
45
+ r"""
46
+ Return ``True`` if this ``Future`` is done. A ``Future`` is done if it
47
+ has a result or an exception.
48
+
49
+ If the value contains tensors that reside on GPUs, ``Future.done()``
50
+ will return ``True`` even if the asynchronous kernels that are
51
+ populating those tensors haven't yet completed running on the device,
52
+ because at such stage the result is already usable, provided one
53
+ performs the appropriate synchronizations (see :meth:`wait`).
54
+ """
55
+ return super().done()
56
+
57
+ def wait(self) -> T:
58
+ r"""
59
+ Block until the value of this ``Future`` is ready.
60
+
61
+ If the value contains tensors that reside on GPUs, then an additional
62
+ synchronization is performed with the kernels (executing on the device)
63
+ which may be asynchronously populating those tensors. Such sync is
64
+ non-blocking, which means that ``wait()`` will insert the necessary
65
+ instructions in the current streams to ensure that further operations
66
+ enqueued on those streams will be properly scheduled after the async
67
+ kernels but, once that is done, ``wait()`` will return, even if those
68
+ kernels are still running. No further synchronization is required when
69
+ accessing and using the values, as long as one doesn't change streams.
70
+
71
+ Returns:
72
+ The value held by this ``Future``. If the function (callback or RPC)
73
+ creating the value has thrown an error, this ``wait`` method will
74
+ also throw an error.
75
+ """
76
+ return super().wait()
77
+
78
+ def value(self) -> T:
79
+ r"""
80
+ Obtain the value of an already-completed future.
81
+
82
+ This method should only be called after a call to :meth:`wait` has
83
+ completed, or inside a callback function passed to :meth:`then`. In
84
+ other cases this ``Future`` may not yet hold a value and calling
85
+ ``value()`` could fail.
86
+
87
+ If the value contains tensors that reside on GPUs, then this method will
88
+ *not* perform any additional synchronization. This should be done
89
+ beforehand, separately, through a call to :meth:`wait` (except within
90
+ callbacks, for which it's already being taken care of by :meth:`then`).
91
+
92
+ Returns:
93
+ The value held by this ``Future``. If the function (callback or RPC)
94
+ creating the value has thrown an error, this ``value()`` method will
95
+ also throw an error.
96
+ """
97
+ return super().value()
98
+
99
+ def then(self, callback: Callable[[Future[T]], S]) -> Future[S]:
100
+ r"""
101
+ Append the given callback function to this ``Future``, which will be run
102
+ when the ``Future`` is completed. Multiple callbacks can be added to
103
+ the same ``Future``, but the order in which they will be executed cannot
104
+ be guaranteed (to enforce a certain order consider chaining:
105
+ ``fut.then(cb1).then(cb2)``). The callback must take one argument, which
106
+ is the reference to this ``Future``. The callback function can use the
107
+ :meth:`value` method to get the value. Note that if this ``Future`` is
108
+ already completed, the given callback will be run immediately inline.
109
+
110
+ If the ``Future``'s value contains tensors that reside on GPUs, the
111
+ callback might be invoked while the async kernels that are populating
112
+ those tensors haven't yet finished executing on the device. However, the
113
+ callback will be invoked with some dedicated streams set as current
114
+ (fetched from a global pool) which will be synchronized with those
115
+ kernels. Hence any operation performed by the callback on these tensors
116
+ will be scheduled on the device after the kernels complete. In other
117
+ words, as long as the callback doesn't switch streams, it can safely
118
+ manipulate the result without any additional synchronization. This is
119
+ similar to the non-blocking behavior of :meth:`wait`.
120
+
121
+ Similarly, if the callback returns a value that contains tensors that
122
+ reside on a GPU, it can do so even if the kernels that are producing
123
+ these tensors are still running on the device, as long as the callback
124
+ didn't change streams during its execution. If one wants to change
125
+ streams, one must be careful to re-synchronize them with the original
126
+ streams, that is, those that were current when the callback was invoked.
127
+
128
+ Args:
129
+ callback(``Callable``): a ``Callable`` that takes this ``Future`` as
130
+ the only argument.
131
+
132
+ Returns:
133
+ A new ``Future`` object that holds the return value of the
134
+ ``callback`` and will be marked as completed when the given
135
+ ``callback`` finishes.
136
+
137
+ .. note:: Note that if the callback function throws, either
138
+ through the original future being completed with an exception and
139
+ calling ``fut.wait()``, or through other code in the callback, the
140
+ future returned by ``then`` will be marked appropriately with the
141
+ encountered error. However, if this callback later completes
142
+ additional futures, those futures are not marked as completed with
143
+ an error and the user is responsible for handling completion/waiting
144
+ on those futures independently.
145
+
146
+ Example::
147
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES)
148
+ >>> def callback(fut):
149
+ ... print(f"RPC return value is {fut.wait()}.")
150
+ >>> fut = torch.futures.Future()
151
+ >>> # The inserted callback will print the return value when
152
+ >>> # receiving the response from "worker1"
153
+ >>> cb_fut = fut.then(callback)
154
+ >>> chain_cb_fut = cb_fut.then(
155
+ ... lambda x : print(f"Chained cb done. {x.wait()}")
156
+ ... )
157
+ >>> fut.set_result(5)
158
+ RPC return value is 5.
159
+ Chained cb done. None
160
+ """
161
+ return cast(Future[S], super().then(callback))
162
+
163
+ def add_done_callback(self, callback: Callable[[Future[T]], None]) -> None:
164
+ r"""
165
+ Append the given callback function to this ``Future``, which will be run
166
+ when the ``Future`` is completed. Multiple callbacks can be added to
167
+ the same ``Future``, but the order in which they will be executed cannot
168
+ be guaranteed. The callback must take one argument, which is the
169
+ reference to this ``Future``. The callback function can use the
170
+ :meth:`value` method to get the value. Note that if this ``Future`` is
171
+ already completed, the given callback will be run inline.
172
+
173
+ We recommend that you use the :meth:`then` method as it provides a way
174
+ to synchronize after your callback has completed. ``add_done_callback``
175
+ can be cheaper if your callback does not return anything. But both
176
+ :meth:`then` and ``add_done_callback`` use the same callback
177
+ registration API under the hood.
178
+
179
+ With respect to GPU tensors, this method behaves in the same way as
180
+ :meth:`then`.
181
+
182
+ Args:
183
+ callback(``Future``): a ``Callable`` that takes in one argument,
184
+ which is the reference to this ``Future``.
185
+
186
+ .. note:: Note that if the callback function throws, either
187
+ through the original future being completed with an exception and
188
+ calling ``fut.wait()``, or through other code in the callback,
189
+ error handling must be carefully taken care of. For example, if
190
+ this callback later completes additional futures, those futures are
191
+ not marked as completed with an error and the user is responsible
192
+ for handling completion/waiting on those futures independently.
193
+
194
+ Example::
195
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES)
196
+ >>> def callback(fut):
197
+ ... print("This will run after the future has finished.")
198
+ ... print(fut.wait())
199
+ >>> fut = torch.futures.Future()
200
+ >>> fut.add_done_callback(callback)
201
+ >>> fut.set_result(5)
202
+ This will run after the future has finished.
203
+ 5
204
+ """
205
+ super().add_done_callback(callback)
206
+
207
+ def set_result(self, result: T) -> None:
208
+ r"""
209
+ Set the result for this ``Future``, which will mark this ``Future`` as
210
+ completed and trigger all attached callbacks. Note that a ``Future``
211
+ cannot be marked completed twice.
212
+
213
+ If the result contains tensors that reside on GPUs, this method can be
214
+ called even if the asynchronous kernels that are populating those
215
+ tensors haven't yet completed running on the device, provided that the
216
+ streams on which those kernels were enqueued are set as the current ones
217
+ when this method is called. Put simply, it's safe to call this method
218
+ immediately after launching those kernels, without any additional
219
+ synchronization, as long as one doesn't change streams in between. This
220
+ method will record events on all the relevant current streams and will
221
+ use them to ensure proper scheduling for all the consumers of this
222
+ ``Future``.
223
+
224
+ Args:
225
+ result (object): the result object of this ``Future``.
226
+
227
+ Example::
228
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES)
229
+ >>> import threading
230
+ >>> import time
231
+ >>> def slow_set_future(fut, value):
232
+ ... time.sleep(0.5)
233
+ ... fut.set_result(value)
234
+ >>> fut = torch.futures.Future()
235
+ >>> t = threading.Thread(
236
+ ... target=slow_set_future,
237
+ ... args=(fut, torch.ones(2) * 3)
238
+ ... )
239
+ >>> t.start()
240
+ >>> print(fut.wait())
241
+ tensor([3., 3.])
242
+ >>> t.join()
243
+ """
244
+ super().set_result(result)
245
+
246
+ def set_exception(self, result: T) -> None:
247
+ r"""
248
+ Set an exception for this ``Future``, which will mark this ``Future`` as
249
+ completed with an error and trigger all attached callbacks. Note that
250
+ when calling wait()/value() on this ``Future``, the exception set here
251
+ will be raised inline.
252
+
253
+ Args:
254
+ result (BaseException): the exception for this ``Future``.
255
+
256
+ Example::
257
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES)
258
+ >>> fut = torch.futures.Future()
259
+ >>> fut.set_exception(ValueError("foo"))
260
+ >>> fut.wait()
261
+ Traceback (most recent call last):
262
+ ...
263
+ ValueError: foo
264
+ """
265
+ assert isinstance(result, Exception), f"{result} is of type {type(result)}, not an Exception."
266
+
267
+ def raise_error(fut_result):
268
+ raise fut_result
269
+
270
+ super()._set_unwrap_func(raise_error)
271
+ self.set_result(result) # type: ignore[arg-type]
272
+
273
+
274
+ def collect_all(futures: List[Future]) -> Future[List[Future]]:
275
+ r"""
276
+ Collects the provided :class:`~torch.futures.Future` objects into a single
277
+ combined :class:`~torch.futures.Future` that is completed when all of the
278
+ sub-futures are completed.
279
+
280
+ Args:
281
+ futures (list): a list of :class:`~torch.futures.Future` objects.
282
+
283
+ Returns:
284
+ Returns a :class:`~torch.futures.Future` object to a list of the passed
285
+ in Futures.
286
+
287
+ Example::
288
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES)
289
+ >>> fut0 = torch.futures.Future()
290
+ >>> fut1 = torch.futures.Future()
291
+ >>> fut = torch.futures.collect_all([fut0, fut1])
292
+ >>> fut0.set_result(0)
293
+ >>> fut1.set_result(1)
294
+ >>> fut_list = fut.wait()
295
+ >>> print(f"fut0 result = {fut_list[0].wait()}")
296
+ fut0 result = 0
297
+ >>> print(f"fut1 result = {fut_list[1].wait()}")
298
+ fut1 result = 1
299
+ """
300
+ return cast(Future[List[Future]], torch._C._collect_all(cast(List[torch._C.Future], futures)))
301
+
302
+
303
+ def wait_all(futures: List[Future]) -> List:
304
+ r"""
305
+ Waits for all provided futures to be complete, and returns
306
+ the list of completed values. If any of the futures encounters an error,
307
+ the method will exit early and report the error not waiting for other
308
+ futures to complete.
309
+
310
+ Args:
311
+ futures (list): a list of :class:`~torch.futures.Future` object.
312
+
313
+ Returns:
314
+ A list of the completed :class:`~torch.futures.Future` results. This
315
+ method will throw an error if ``wait`` on any
316
+ :class:`~torch.futures.Future` throws.
317
+ """
318
+ return [fut.wait() for fut in torch._C._collect_all(cast(List[torch._C.Future], futures)).wait()]
venv/lib/python3.10/site-packages/torch/futures/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (15.9 kB). View file
 
venv/lib/python3.10/site-packages/torch/jit/__init__.py ADDED
@@ -0,0 +1,294 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+
3
+ from contextlib import contextmanager
4
+ from typing import Any, Iterator
5
+
6
+ import torch._C
7
+
8
+ # These are imported so users can access them from the `torch.jit` module
9
+ from torch._jit_internal import (
10
+ _Await,
11
+ _drop,
12
+ _IgnoreContextManager,
13
+ _isinstance,
14
+ _overload,
15
+ _overload_method,
16
+ export,
17
+ Final,
18
+ Future,
19
+ ignore,
20
+ is_scripting,
21
+ unused,
22
+ )
23
+ from torch.jit._async import fork, wait
24
+ from torch.jit._await import _awaitable, _awaitable_nowait, _awaitable_wait
25
+ from torch.jit._decomposition_utils import _register_decomposition
26
+ from torch.jit._freeze import freeze, optimize_for_inference, run_frozen_optimizations
27
+ from torch.jit._fuser import (
28
+ fuser,
29
+ last_executed_optimized_graph,
30
+ optimized_execution,
31
+ set_fusion_strategy,
32
+ )
33
+ from torch.jit._ir_utils import _InsertPoint
34
+ from torch.jit._script import (
35
+ _ScriptProfile,
36
+ _unwrap_optional,
37
+ Attribute,
38
+ CompilationUnit,
39
+ interface,
40
+ RecursiveScriptClass,
41
+ RecursiveScriptModule,
42
+ script,
43
+ script_method,
44
+ ScriptFunction,
45
+ ScriptModule,
46
+ ScriptWarning,
47
+ )
48
+ from torch.jit._serialization import (
49
+ jit_module_from_flatbuffer,
50
+ load,
51
+ save,
52
+ save_jit_module_to_flatbuffer,
53
+ )
54
+ from torch.jit._trace import (
55
+ _flatten,
56
+ _get_trace_graph,
57
+ _script_if_tracing,
58
+ _unique_state_dict,
59
+ is_tracing,
60
+ ONNXTracedModule,
61
+ TopLevelTracedModule,
62
+ trace,
63
+ trace_module,
64
+ TracedModule,
65
+ TracerWarning,
66
+ TracingCheckError,
67
+ )
68
+
69
+ from torch.utils import set_module
70
+
71
+ __all__ = [
72
+ "Attribute",
73
+ "CompilationUnit",
74
+ "Error",
75
+ "Future",
76
+ "ScriptFunction",
77
+ "ScriptModule",
78
+ "annotate",
79
+ "enable_onednn_fusion",
80
+ "export",
81
+ "export_opnames",
82
+ "fork",
83
+ "freeze",
84
+ "ignore",
85
+ "isinstance",
86
+ "load",
87
+ "onednn_fusion_enabled",
88
+ "optimize_for_inference",
89
+ "save",
90
+ "script",
91
+ "script_if_tracing",
92
+ "set_fusion_strategy",
93
+ "strict_fusion",
94
+ "trace",
95
+ "trace_module",
96
+ "unused",
97
+ "wait",
98
+ ]
99
+
100
+ # For backwards compatibility
101
+ _fork = fork
102
+ _wait = wait
103
+ _set_fusion_strategy = set_fusion_strategy
104
+
105
+
106
+ def export_opnames(m):
107
+ r"""
108
+ Generate new bytecode for a Script module.
109
+
110
+ Returns what the op list would be for a Script Module based off the current code base.
111
+
112
+ If you have a LiteScriptModule and want to get the currently present
113
+ list of ops call _export_operator_list instead.
114
+ """
115
+ return torch._C._export_opnames(m._c)
116
+
117
+
118
+ # torch.jit.Error
119
+ Error = torch._C.JITException
120
+ set_module(Error, "torch.jit")
121
+ # This is not perfect but works in common cases
122
+ Error.__name__ = "Error"
123
+ Error.__qualname__ = "Error"
124
+
125
+
126
+ # for use in python if using annotate
127
+ def annotate(the_type, the_value):
128
+ """Use to give type of `the_value` in TorchScript compiler.
129
+
130
+ This method is a pass-through function that returns `the_value`, used to hint TorchScript
131
+ compiler the type of `the_value`. It is a no-op when running outside of TorchScript.
132
+
133
+ Though TorchScript can infer correct type for most Python expressions, there are some cases where
134
+ type inference can be wrong, including:
135
+
136
+ - Empty containers like `[]` and `{}`, which TorchScript assumes to be container of `Tensor`
137
+ - Optional types like `Optional[T]` but assigned a valid value of type `T`, TorchScript would assume
138
+ it is type `T` rather than `Optional[T]`
139
+
140
+ Note that `annotate()` does not help in `__init__` method of `torch.nn.Module` subclasses because it
141
+ is executed in eager mode. To annotate types of `torch.nn.Module` attributes,
142
+ use :meth:`~torch.jit.Annotate` instead.
143
+
144
+ Example:
145
+
146
+ .. testcode::
147
+
148
+ import torch
149
+ from typing import Dict
150
+
151
+ @torch.jit.script
152
+ def fn():
153
+ # Telling TorchScript that this empty dictionary is a (str -> int) dictionary
154
+ # instead of default dictionary type of (str -> Tensor).
155
+ d = torch.jit.annotate(Dict[str, int], {})
156
+
157
+ # Without `torch.jit.annotate` above, following statement would fail because of
158
+ # type mismatch.
159
+ d["name"] = 20
160
+
161
+ .. testcleanup::
162
+
163
+ del fn
164
+
165
+ Args:
166
+ the_type: Python type that should be passed to TorchScript compiler as type hint for `the_value`
167
+ the_value: Value or expression to hint type for.
168
+
169
+ Returns:
170
+ `the_value` is passed back as return value.
171
+ """
172
+ return the_value
173
+
174
+
175
+ def script_if_tracing(fn):
176
+ """
177
+ Compiles ``fn`` when it is first called during tracing.
178
+
179
+ ``torch.jit.script`` has a non-negligible start up time when it is first called due to
180
+ lazy-initializations of many compiler builtins. Therefore you should not use
181
+ it in library code. However, you may want to have parts of your library work
182
+ in tracing even if they use control flow. In these cases, you should use
183
+ ``@torch.jit.script_if_tracing`` to substitute for
184
+ ``torch.jit.script``.
185
+
186
+ Args:
187
+ fn: A function to compile.
188
+
189
+ Returns:
190
+ If called during tracing, a :class:`ScriptFunction` created by `torch.jit.script` is returned.
191
+ Otherwise, the original function `fn` is returned.
192
+ """
193
+ return _script_if_tracing(fn)
194
+
195
+
196
+ # for torch.jit.isinstance
197
+ def isinstance(obj, target_type):
198
+ """
199
+ Provide container type refinement in TorchScript.
200
+
201
+ It can refine parameterized containers of the List, Dict, Tuple, and Optional types. E.g. ``List[str]``,
202
+ ``Dict[str, List[torch.Tensor]]``, ``Optional[Tuple[int,str,int]]``. It can also
203
+ refine basic types such as bools and ints that are available in TorchScript.
204
+
205
+ Args:
206
+ obj: object to refine the type of
207
+ target_type: type to try to refine obj to
208
+ Returns:
209
+ ``bool``: True if obj was successfully refined to the type of target_type,
210
+ False otherwise with no new type refinement
211
+
212
+
213
+ Example (using ``torch.jit.isinstance`` for type refinement):
214
+ .. testcode::
215
+
216
+ import torch
217
+ from typing import Any, Dict, List
218
+
219
+ class MyModule(torch.nn.Module):
220
+ def __init__(self):
221
+ super().__init__()
222
+
223
+ def forward(self, input: Any): # note the Any type
224
+ if torch.jit.isinstance(input, List[torch.Tensor]):
225
+ for t in input:
226
+ y = t.clamp(0, 0.5)
227
+ elif torch.jit.isinstance(input, Dict[str, str]):
228
+ for val in input.values():
229
+ print(val)
230
+
231
+ m = torch.jit.script(MyModule())
232
+ x = [torch.rand(3,3), torch.rand(4,3)]
233
+ m(x)
234
+ y = {"key1":"val1","key2":"val2"}
235
+ m(y)
236
+ """
237
+ return _isinstance(obj, target_type)
238
+
239
+
240
+ class strict_fusion:
241
+ """
242
+ Give errors if not all nodes have been fused in inference, or symbolically differentiated in training.
243
+
244
+ Example:
245
+ Forcing fusion of additions.
246
+
247
+ .. code-block:: python
248
+
249
+ @torch.jit.script
250
+ def foo(x):
251
+ with torch.jit.strict_fusion():
252
+ return x + x + x
253
+
254
+ """
255
+
256
+ def __init__(self):
257
+ if not torch._jit_internal.is_scripting():
258
+ warnings.warn("Only works in script mode")
259
+ pass
260
+
261
+ def __enter__(self):
262
+ pass
263
+
264
+ def __exit__(self, type: Any, value: Any, tb: Any) -> None:
265
+ pass
266
+
267
+
268
+ # Context manager for globally hiding source ranges when printing graphs.
269
+ # Note that these functions are exposed to Python as static members of the
270
+ # Graph class, so mypy checks need to be skipped.
271
+ @contextmanager
272
+ def _hide_source_ranges() -> Iterator[None]:
273
+ old_enable_source_ranges = torch._C.Graph.global_print_source_ranges # type: ignore[attr-defined]
274
+ try:
275
+ torch._C.Graph.set_global_print_source_ranges(False) # type: ignore[attr-defined]
276
+ yield
277
+ finally:
278
+ torch._C.Graph.set_global_print_source_ranges(old_enable_source_ranges) # type: ignore[attr-defined]
279
+
280
+
281
+ def enable_onednn_fusion(enabled: bool):
282
+ """Enable or disables onednn JIT fusion based on the parameter `enabled`."""
283
+ torch._C._jit_set_llga_enabled(enabled)
284
+
285
+
286
+ def onednn_fusion_enabled():
287
+ """Return whether onednn JIT fusion is enabled."""
288
+ return torch._C._jit_llga_enabled()
289
+
290
+
291
+ del Any
292
+
293
+ if not torch._C._jit_init():
294
+ raise RuntimeError("JIT initialization failed")
venv/lib/python3.10/site-packages/torch/jit/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (8.66 kB). View file
 
venv/lib/python3.10/site-packages/torch/jit/__pycache__/_logging.cpython-310.pyc ADDED
Binary file (403 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/jit/__pycache__/_monkeytype_config.cpython-310.pyc ADDED
Binary file (6.96 kB). View file
 
venv/lib/python3.10/site-packages/torch/jit/__pycache__/_pickle.cpython-310.pyc ADDED
Binary file (873 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/jit/__pycache__/_recursive.cpython-310.pyc ADDED
Binary file (26.4 kB). View file
 
venv/lib/python3.10/site-packages/torch/jit/_async.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Async API.
2
+
3
+ This module contains the API for parallelism in TorchScript, notably:
4
+ * torch.jit.fork
5
+ * torch.jit.wait
6
+
7
+ This is not intended to be imported directly; please use the exposed
8
+ functionalities in `torch.jit`.
9
+ """
10
+
11
+ import torch
12
+ from torch._jit_internal import Future
13
+ from torch.jit._builtins import _register_builtin
14
+
15
+ from torch.utils import set_module
16
+
17
+ set_module(Future, "torch.jit")
18
+
19
+
20
+ def fork(func, *args, **kwargs):
21
+ r"""
22
+ Create an asynchronous task executing `func` and a reference to the value of the result of this execution.
23
+
24
+ `fork` will return immediately, so the return value of `func` may not have been computed yet. To force completion
25
+ of the task and access the return value invoke `torch.jit.wait` on the Future. `fork` invoked
26
+ with a `func` which returns `T` is typed as `torch.jit.Future[T]`. `fork` calls can be arbitrarily
27
+ nested, and may be invoked with positional and keyword arguments.
28
+ Asynchronous execution will only occur when run in TorchScript. If run in pure python,
29
+ `fork` will not execute in parallel. `fork` will also not execute in parallel when invoked
30
+ while tracing, however the `fork` and `wait` calls will be captured in the exported IR Graph.
31
+
32
+ .. warning::
33
+ `fork` tasks will execute non-deterministically. We recommend only spawning
34
+ parallel fork tasks for pure functions that do not modify their inputs,
35
+ module attributes, or global state.
36
+
37
+ Args:
38
+ func (callable or torch.nn.Module): A Python function or `torch.nn.Module`
39
+ that will be invoked. If executed in TorchScript, it will execute asynchronously,
40
+ otherwise it will not. Traced invocations of fork will be captured in the IR.
41
+ ``*args``, ``**kwargs``: arguments to invoke `func` with.
42
+ Returns:
43
+ `torch.jit.Future[T]`: a reference to the execution of `func`. The value `T`
44
+ can only be accessed by forcing completion of `func` through `torch.jit.wait`.
45
+
46
+ Example (fork a free function):
47
+
48
+ .. code-block:: python
49
+
50
+ import torch
51
+ from torch import Tensor
52
+ def foo(a : Tensor, b : int) -> Tensor:
53
+ return a + b
54
+ def bar(a):
55
+ fut : torch.jit.Future[Tensor] = torch.jit.fork(foo, a, b=2)
56
+ return torch.jit.wait(fut)
57
+ script_bar = torch.jit.script(bar)
58
+ input = torch.tensor(2)
59
+ # only the scripted version executes asynchronously
60
+ assert script_bar(input) == bar(input)
61
+ # trace is not run asynchronously, but fork is captured in IR
62
+ graph = torch.jit.trace(bar, (input,)).graph
63
+ assert "fork" in str(graph)
64
+
65
+ Example (fork a module method):
66
+
67
+ .. code-block:: python
68
+
69
+ import torch
70
+ from torch import Tensor
71
+ class AddMod(torch.nn.Module):
72
+ def forward(self, a: Tensor, b : int):
73
+ return a + b
74
+ class Mod(torch.nn.Module):
75
+ def __init__(self):
76
+ super(self).__init__()
77
+ self.mod = AddMod()
78
+ def forward(self, input):
79
+ fut = torch.jit.fork(self.mod, a, b=2)
80
+ return torch.jit.wait(fut)
81
+ input = torch.tensor(2)
82
+ mod = Mod()
83
+ assert mod(input) == torch.jit.script(mod).forward(input)
84
+ """
85
+ return torch._C.fork(func, *args, **kwargs)
86
+
87
+
88
+ def wait(future):
89
+ r"""
90
+ Force completion of a `torch.jit.Future[T]` asynchronous task, returning the result of the task.
91
+
92
+ See :func:`~fork` for docs and examples.
93
+ Args:
94
+ future (torch.jit.Future[T]): an asynchronous task reference, created through `torch.jit.fork`
95
+ Returns:
96
+ `T`: the return value of the completed task
97
+ """
98
+ return torch._C.wait(future)
99
+
100
+
101
+ _register_builtin(wait, "aten::wait")
venv/lib/python3.10/site-packages/torch/jit/_await.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch._jit_internal import _Await
3
+ from torch.jit._builtins import _register_builtin
4
+
5
+ from torch.utils import set_module
6
+
7
+ set_module(_Await, "torch.jit")
8
+
9
+
10
+ def _awaitable(func, *args, **kwargs):
11
+ r"""Create Await object that will call specified functioni with specified args, when it is requested for the result."""
12
+ return torch._C._awaitable(func, *args, **kwargs)
13
+
14
+
15
+ def _awaitable_wait(aw):
16
+ r"""Request await the result of execution, if Await is not completed yet, the func will be called immediately."""
17
+ return torch._C._awaitable_wait(aw)
18
+
19
+
20
+ def _awaitable_nowait(o):
21
+ r"""Create completed Await with specified result."""
22
+ return torch._C._awaitable_nowait(o)
23
+
24
+
25
+ _register_builtin(_awaitable_wait, "prim::awaitable_wait")
26
+ _register_builtin(_awaitable_nowait, "prim::awaitable_nowait")
venv/lib/python3.10/site-packages/torch/jit/_builtins.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cmath
2
+ import math
3
+ import warnings
4
+
5
+ from collections import OrderedDict
6
+ from typing import Dict, Optional
7
+
8
+ import torch
9
+ import torch.backends.cudnn as cudnn
10
+
11
+ from ..nn.modules.utils import _list_with_default, _pair, _quadruple, _single, _triple
12
+
13
+ _builtin_table: Optional[Dict[int, str]] = None
14
+
15
+ _modules_containing_builtins = (torch, torch._C._nn, torch._C._fft, torch._C._linalg, torch._C._nested, torch._C._sparse, torch._C._special) # type: ignore[attr-defined] # noqa: B950
16
+
17
+ _builtin_ops = [
18
+ # Pairs of (function, op_name)
19
+ (_pair, "aten::_pair"),
20
+ (_quadruple, "aten::_quadruple"),
21
+ (_single, "aten::_single"),
22
+ (_triple, "aten::_triple"),
23
+ (_list_with_default, "aten::list_with_default"),
24
+ (OrderedDict, "aten::dict"),
25
+ (dict, "aten::dict"),
26
+ (cudnn.is_acceptable, "aten::cudnn_is_acceptable"),
27
+ (math.ceil, "aten::ceil"),
28
+ (math.copysign, "aten::copysign"),
29
+ (math.erf, "aten::erf"),
30
+ (math.erfc, "aten::erfc"),
31
+ (math.exp, "aten::exp"),
32
+ (math.expm1, "aten::expm1"),
33
+ (math.fabs, "aten::fabs"),
34
+ (math.floor, "aten::floor"),
35
+ (math.gamma, "aten::gamma"),
36
+ (math.lgamma, "aten::lgamma"),
37
+ (math.log, "aten::log"),
38
+ (math.log10, "aten::log10"),
39
+ (math.log1p, "aten::log1p"),
40
+ (math.pow, "aten::pow"),
41
+ (math.sqrt, "aten::sqrt"),
42
+ (math.isnan, "aten::isnan"),
43
+ (math.asinh, "aten::asinh"),
44
+ (math.atanh, "aten::atanh"),
45
+ (math.cosh, "aten::cosh"),
46
+ (math.sinh, "aten::sinh"),
47
+ (math.tanh, "aten::tanh"),
48
+ (math.acos, "aten::acos"),
49
+ (math.asin, "aten::asin"),
50
+ (math.atan, "aten::atan"),
51
+ (math.atan2, "aten::atan2"),
52
+ (math.cos, "aten::cos"),
53
+ (math.sin, "aten::sin"),
54
+ (math.tan, "aten::tan"),
55
+ (math.asinh, "aten::asinh"),
56
+ (math.atanh, "aten::atanh"),
57
+ (math.acosh, "aten::acosh"),
58
+ (math.fmod, "aten::fmod"),
59
+ (math.modf, "aten::modf"),
60
+ (math.factorial, "aten::factorial"),
61
+ (math.frexp, "aten::frexp"),
62
+ (math.isinf, "aten::isinf"),
63
+ (math.degrees, "aten::degrees"),
64
+ (math.radians, "aten::radians"),
65
+ (cmath.isnan, "aten::isnan"),
66
+ (cmath.isfinite, "aten::isfinite"),
67
+ (cmath.isinf, "aten::isinf"),
68
+ (cmath.phase, "aten::angle"),
69
+ (cmath.rect, "aten::polar"),
70
+ (cmath.log, "aten::log"),
71
+ (cmath.log10, "aten::log10"),
72
+ (cmath.sqrt, "aten::sqrt"),
73
+ (cmath.exp, "aten::exp"),
74
+ (cmath.sin, "aten::sin"),
75
+ (cmath.tan, "aten::tan"),
76
+ (cmath.cos, "aten::cos"),
77
+ (cmath.asin, "aten::asin"),
78
+ (cmath.acos, "aten::acos"),
79
+ (cmath.atan, "aten::atan"),
80
+ (cmath.sinh, "aten::sinh"),
81
+ (cmath.cosh, "aten::cosh"),
82
+ (cmath.tanh, "aten::tanh"),
83
+ (cmath.asinh, "aten::asinh"),
84
+ (cmath.acosh, "aten::acosh"),
85
+ (cmath.atanh, "aten::atanh"),
86
+ (math.ldexp, "aten::ldexp"),
87
+ (torch._assert, "aten::_assert"),
88
+ (torch.autograd.grad, "aten::grad"),
89
+ (torch.autograd.backward, "aten::backward"),
90
+ (torch._C._infer_size, "aten::_infer_size"),
91
+ (torch.nn.functional._no_grad_embedding_renorm_, "aten::_no_grad_embedding_renorm_"), # type: ignore[attr-defined]
92
+ (torch.nn.functional.assert_int_or_pair, "aten::_assert_int_or_pair"),
93
+ (torch.nn.init._no_grad_fill_, "aten::_no_grad_fill_"),
94
+ (torch.nn.init._no_grad_normal_, "aten::_no_grad_normal_"),
95
+ (torch.nn.init._no_grad_uniform_, "aten::_no_grad_uniform_"),
96
+ (torch.nn.init._no_grad_zero_, "aten::_no_grad_zero_"),
97
+ (torch._C._get_tracing_state, "aten::_get_tracing_state"),
98
+ (torch._C._get_cpu_capability, "aten::_get_cpu_capability"),
99
+ (warnings.warn, "aten::warn"),
100
+ (torch._VF.stft, "aten::stft"), # type: ignore[attr-defined]
101
+ (torch._VF.istft, "aten::istft"), # type: ignore[attr-defined]
102
+ (torch._VF.cdist, "aten::cdist"), # type: ignore[attr-defined]
103
+ (torch._VF.norm, "aten::norm"), # type: ignore[attr-defined]
104
+ (torch._VF.unique_dim, "aten::unique_dim"),
105
+ (torch._VF.unique_consecutive, "aten::unique_consecutive"), # type: ignore[attr-defined]
106
+ (torch._VF.nuclear_norm, "aten::nuclear_norm"),
107
+ (torch._VF.frobenius_norm, "aten::frobenius_norm"),
108
+ (torch._VF.tensordot, "aten::tensordot"), # type: ignore[attr-defined]
109
+ ]
110
+
111
+ # ops in torch.functional are bound to torch
112
+ # in these cases, we want to resolve the function to their python implementation
113
+ # instead looking up a builtin "aten::" schema
114
+
115
+
116
+ def _gen_torch_functional_registered_ops():
117
+ # eventually ops should encompass all of torch/functional.py, (torch.functional.__all__)
118
+ # but we are currently only able to compile some of the functions. additionally,
119
+ # some functions directly map to their aten:: implementations.
120
+ # TODO: add support for more ops
121
+ ops = [
122
+ "stft",
123
+ "istft",
124
+ "lu",
125
+ "cdist",
126
+ "norm",
127
+ "unique",
128
+ "unique_consecutive",
129
+ "tensordot",
130
+ ]
131
+ return {getattr(torch.functional, name) for name in ops}
132
+
133
+
134
+ _functional_registered_ops = _gen_torch_functional_registered_ops()
135
+
136
+
137
+ def _is_special_functional_bound_op(fn):
138
+ return fn in _functional_registered_ops
139
+
140
+
141
+ # lazily built to ensure the correct initialization order
142
+ def _get_builtin_table():
143
+ global _builtin_table
144
+ if _builtin_table is not None:
145
+ return _builtin_table
146
+ _builtin_table = {}
147
+
148
+ def register_all(mod):
149
+ for name in dir(mod):
150
+ v = getattr(mod, name)
151
+ if (
152
+ callable(v)
153
+ and not _is_special_functional_bound_op(v)
154
+ and v is not torch.no_grad
155
+ and v is not torch.autocast
156
+ ):
157
+ # Fixup inconsistency in segment_reduce
158
+ if name == "_segment_reduce":
159
+ name = name[1:]
160
+ _builtin_ops.append((v, "aten::" + name))
161
+
162
+ for mod in _modules_containing_builtins:
163
+ register_all(mod)
164
+
165
+ _builtin_ops.append((math.gcd, "aten::gcd"))
166
+ _builtin_ops.append((math.isfinite, "aten::isfinite"))
167
+ _builtin_ops.append((math.remainder, "aten::mathremainder")) # type: ignore[attr-defined]
168
+
169
+ import torch.distributed.autograd as dist_autograd
170
+
171
+ if dist_autograd.is_available():
172
+ _builtin_ops.append((dist_autograd.get_gradients, "aten::get_gradients"))
173
+ _builtin_ops.append((dist_autograd.backward, "aten::dist_backward"))
174
+
175
+ # populate the _builtin_table from _builtin_ops
176
+ for builtin, aten_op in _builtin_ops:
177
+ _builtin_table[id(builtin)] = aten_op
178
+
179
+ return _builtin_table
180
+
181
+
182
+ def _register_builtin(fn, op):
183
+ _get_builtin_table()[id(fn)] = op
184
+
185
+
186
+ def _find_builtin(fn):
187
+ return _get_builtin_table().get(id(fn))
venv/lib/python3.10/site-packages/torch/jit/_check.py ADDED
@@ -0,0 +1,248 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ast
2
+ import inspect
3
+ import textwrap
4
+ import warnings
5
+
6
+ import torch
7
+
8
+
9
+ class AttributeTypeIsSupportedChecker(ast.NodeVisitor):
10
+ """Check the ``__init__`` method of a given ``nn.Module``.
11
+
12
+ It ensures that all instance-level attributes can be properly initialized.
13
+
14
+ Specifically, we do type inference based on attribute values...even
15
+ if the attribute in question has already been typed using
16
+ Python3-style annotations or ``torch.jit.annotate``. This means that
17
+ setting an instance-level attribute to ``[]`` (for ``List``),
18
+ ``{}`` for ``Dict``), or ``None`` (for ``Optional``) isn't enough
19
+ information for us to properly initialize that attribute.
20
+
21
+ An object of this class can walk a given ``nn.Module``'s AST and
22
+ determine if it meets our requirements or not.
23
+
24
+ Known limitations
25
+ 1. We can only check the AST nodes for certain constructs; we can't
26
+ ``eval`` arbitrary expressions. This means that function calls,
27
+ class instantiations, and complex expressions that resolve to one of
28
+ the "empty" values specified above will NOT be flagged as
29
+ problematic.
30
+ 2. We match on string literals, so if the user decides to use a
31
+ non-standard import (e.g. `from typing import List as foo`), we
32
+ won't catch it.
33
+
34
+ Example:
35
+ .. code-block:: python
36
+
37
+ class M(torch.nn.Module):
38
+ def fn(self):
39
+ return []
40
+
41
+ def __init__(self):
42
+ super().__init__()
43
+ self.x: List[int] = []
44
+
45
+ def forward(self, x: List[int]):
46
+ self.x = x
47
+ return 1
48
+
49
+ The above code will pass the ``AttributeTypeIsSupportedChecker``
50
+ check since we have a function call in ``__init__``. However,
51
+ it will still fail later with the ``RuntimeError`` "Tried to set
52
+ nonexistent attribute: x. Did you forget to initialize it in
53
+ __init__()?".
54
+
55
+ Args:
56
+ nn_module - The instance of ``torch.nn.Module`` whose
57
+ ``__init__`` method we wish to check
58
+ """
59
+
60
+ def check(self, nn_module: torch.nn.Module) -> None:
61
+ source_lines = inspect.getsource(nn_module.__class__.__init__)
62
+
63
+ # Ignore comments no matter the indentation
64
+ def is_useless_comment(line):
65
+ line = line.strip()
66
+ return line.startswith("#") and not line.startswith("# type:")
67
+
68
+ source_lines = "\n".join(
69
+ [l for l in source_lines.split("\n") if not is_useless_comment(l)]
70
+ )
71
+
72
+ # This AST only contains the `__init__` method of the nn.Module
73
+ init_ast = ast.parse(textwrap.dedent(source_lines))
74
+
75
+ # Get items annotated in the class body
76
+ self.class_level_annotations = list(nn_module.__annotations__.keys())
77
+
78
+ # Flag for later
79
+ self.visiting_class_level_ann = False
80
+
81
+ self.visit(init_ast)
82
+
83
+ def _is_empty_container(self, node: ast.AST, ann_type: str) -> bool:
84
+ if ann_type == "List":
85
+ # Assigning `[]` to a `List` type gives you a Node where
86
+ # value=List(elts=[], ctx=Load())
87
+ if not isinstance(node, ast.List):
88
+ return False
89
+ if node.elts:
90
+ return False
91
+ elif ann_type == "Dict":
92
+ # Assigning `{}` to a `Dict` type gives you a Node where
93
+ # value=Dict(keys=[], values=[])
94
+ if not isinstance(node, ast.Dict):
95
+ return False
96
+ if node.keys:
97
+ return False
98
+ elif ann_type == "Optional":
99
+ # Assigning `None` to an `Optional` type gives you a
100
+ # Node where value=Constant(value=None, kind=None)
101
+ if not isinstance(node, ast.Constant):
102
+ return False
103
+ if node.value: # type: ignore[attr-defined]
104
+ return False
105
+
106
+ return True
107
+
108
+ def visit_Assign(self, node):
109
+ """Store assignment state when assigning to a Call Node.
110
+
111
+ If we're visiting a Call Node (the right-hand side of an
112
+ assignment statement), we won't be able to check the variable
113
+ that we're assigning to (the left-hand side of an assignment).
114
+ Because of this, we need to store this state in visitAssign.
115
+ (Luckily, we only have to do this if we're assigning to a Call
116
+ Node, i.e. ``torch.jit.annotate``. If we're using normal Python
117
+ annotations, we'll be visiting an AnnAssign Node, which has its
118
+ target built in.)
119
+ """
120
+ try:
121
+ if (
122
+ isinstance(node.value, ast.Call)
123
+ and node.targets[0].attr in self.class_level_annotations
124
+ ):
125
+ self.visiting_class_level_ann = True
126
+ except AttributeError:
127
+ return
128
+ self.generic_visit(node)
129
+ self.visiting_class_level_ann = False
130
+
131
+ def visit_AnnAssign(self, node):
132
+ """Visit an AnnAssign node in an ``nn.Module``'s ``__init__`` method.
133
+
134
+ It checks if it conforms to our attribute annotation rules."""
135
+ # If we have a local variable
136
+ try:
137
+ if node.target.value.id != "self":
138
+ return
139
+ except AttributeError:
140
+ return
141
+
142
+ # If we have an attribute that's already been annotated at the
143
+ # class level
144
+ if node.target.attr in self.class_level_annotations:
145
+ return
146
+
147
+ # TODO @ansley: add `Union` once landed
148
+
149
+ # NB: Even though `Tuple` is a "container", we don't want to
150
+ # check for it here. `Tuple` functions as an type with an
151
+ # "infinite" number of subtypes, in the sense that you can have
152
+ # `Tuple[())]`, `Tuple[T1]`, `Tuple[T2]`, `Tuple[T1, T2]`,
153
+ # `Tuple[T2, T1]` and so on, and none of these subtypes can be
154
+ # used in place of the other. Therefore, assigning an empty
155
+ # tuple in `__init__` CORRECTLY means that that variable
156
+ # cannot be reassigned later to a non-empty tuple. Same
157
+ # deal with `NamedTuple`
158
+
159
+ containers = {"List", "Dict", "Optional"}
160
+
161
+ # If we're not evaluating one of the specified problem types
162
+ try:
163
+ if node.annotation.value.id not in containers:
164
+ return
165
+ except AttributeError:
166
+ # To evaluate a base type (`str`, `int`, etc.), we would
167
+ # have needed to get the name through `node.annotation.id`
168
+ # instead of `node.annotation.value.id`. Seems that we're
169
+ # not evaluating one of our "containers"
170
+ return
171
+
172
+ # Check if the assigned variable is empty
173
+ ann_type = node.annotation.value.id
174
+ if not self._is_empty_container(node.value, ann_type):
175
+ return
176
+
177
+ warnings.warn(
178
+ "The TorchScript type system doesn't support "
179
+ "instance-level annotations on empty non-base "
180
+ "types in `__init__`. Instead, either 1) use a "
181
+ "type annotation in the class body, or 2) wrap "
182
+ "the type in `torch.jit.Attribute`."
183
+ )
184
+
185
+ def visit_Call(self, node):
186
+ """Determine if a Call node is 'torch.jit.annotate' in __init__.
187
+
188
+ Visit a Call node in an ``nn.Module``'s ``__init__``
189
+ method and determine if it's ``torch.jit.annotate``. If so,
190
+ see if it conforms to our attribute annotation rules.
191
+ """
192
+ # If we have an attribute that's already been annotated at the
193
+ # class level
194
+ if self.visiting_class_level_ann:
195
+ return
196
+
197
+ # If this isn't a call to `torch.jit.annotate`
198
+ try:
199
+ if (
200
+ node.func.value.value.id != "torch"
201
+ or node.func.value.attr != "jit"
202
+ or node.func.attr != "annotate"
203
+ ):
204
+ self.generic_visit(node)
205
+ elif (
206
+ node.func.value.value.id != "jit" or node.func.value.attr != "annotate"
207
+ ):
208
+ self.generic_visit(node)
209
+ except AttributeError:
210
+ # Looks like we didn't even have the right node structure
211
+ # to check for `torch.jit.annotate` in the first place
212
+ self.generic_visit(node)
213
+
214
+ # Invariant: we have a `torch.jit.annotate` or a
215
+ # `torch.annotate` call
216
+
217
+ # A Call Node for `torch.jit.annotate` should have an `args`
218
+ # list of length 2 where args[0] represents the annotation and
219
+ # args[1] represents the actual value
220
+ if len(node.args) != 2:
221
+ return
222
+
223
+ if not isinstance(node.args[0], ast.Subscript):
224
+ return
225
+
226
+ # See notes in `visit_AnnAssign` r.e. containers
227
+
228
+ containers = {"List", "Dict", "Optional"}
229
+
230
+ try:
231
+ ann_type = node.args[0].value.id # type: ignore[attr-defined]
232
+ except AttributeError:
233
+ return
234
+
235
+ if ann_type not in containers:
236
+ return
237
+
238
+ # Check if the assigned variable is empty
239
+ if not self._is_empty_container(node.args[1], ann_type):
240
+ return
241
+
242
+ warnings.warn(
243
+ "The TorchScript type system doesn't support "
244
+ "instance-level annotations on empty non-base "
245
+ "types in `__init__`. Instead, either 1) use a "
246
+ "type annotation in the class body, or 2) wrap "
247
+ "the type in `torch.jit.Attribute`."
248
+ )
venv/lib/python3.10/site-packages/torch/jit/_dataclass_impls.py ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Functions for synthesizing magic methods for JIT-compiled dataclasses
2
+ import ast
3
+ import dataclasses
4
+ import inspect
5
+ import os
6
+ from functools import partial
7
+ from typing import Callable, Dict, List
8
+
9
+ from torch._jit_internal import FAKE_FILENAME_PREFIX, is_optional
10
+ from torch._sources import ParsedDef, SourceContext
11
+
12
+
13
+ def _get_fake_filename(cls, method_name):
14
+ return os.path.join(FAKE_FILENAME_PREFIX, cls.__name__, method_name)
15
+
16
+
17
+ def compose_fn(cls, name: str, body_lines: List[str], signature: str) -> ParsedDef:
18
+ body = "\n".join(f" {b}" for b in body_lines)
19
+ decl = f"def {name}{signature}:\n{body}"
20
+
21
+ # Parse the function declaration
22
+ try:
23
+ py_ast = ast.parse(decl)
24
+ except SyntaxError as e:
25
+ # This should only happen if there's some unforeseeable change
26
+ # in the dataclasses module that makes our synthesized code fail
27
+ raise RuntimeError(
28
+ f"TorchScript failed to synthesize dataclass method '{name}' for class '{cls.__name__}'. "
29
+ "Please file a bug report at <https://github.com/pytorch/pytorch/issues>"
30
+ ) from e
31
+ fake_filename = _get_fake_filename(cls, name)
32
+ # Parse the function
33
+ return ParsedDef(
34
+ py_ast,
35
+ ctx=SourceContext(
36
+ source=decl, filename=fake_filename, file_lineno=0, leading_whitespace_len=0
37
+ ),
38
+ source=decl,
39
+ filename=fake_filename,
40
+ file_lineno=0,
41
+ )
42
+
43
+
44
+ def synthesize__init__(cls) -> ParsedDef:
45
+ # Supporting default factories in the way that people expect would sort of require us to
46
+ # allow compiling lambda functions, which is not currently supported.
47
+ if any(
48
+ field.default_factory is not dataclasses.MISSING
49
+ for field in dataclasses.fields(cls)
50
+ ):
51
+ raise NotImplementedError(
52
+ "Default factory initializers are not supported in TorchScript dataclasses"
53
+ )
54
+
55
+ # Simply read off the generated __init__ signature from CPython's implementation. It'll be
56
+ # almost correct except for InitVar annotations, which we need to handle specially.
57
+ signature = inspect.signature(cls.__init__)
58
+
59
+ # Handle InitVars if needed (only works on Python 3.8+, when a `type` attribute was added to InitVar);
60
+ # see CPython commit here https://github.com/python/cpython/commit/01ee12ba35a333e8a6a25c4153c4a21838e9585c
61
+ init_vars: List[str] = []
62
+ params = []
63
+ for name, param in signature.parameters.items():
64
+ ann = param.annotation
65
+
66
+ if isinstance(ann, dataclasses.InitVar):
67
+ # The TorchScript interpreter can't handle InitVar annotations, so we unwrap the underlying type here
68
+ init_vars.append(name)
69
+ params.append(param.replace(annotation=ann.type)) # type: ignore[attr-defined]
70
+ else:
71
+ params.append(param)
72
+
73
+ signature = signature.replace(parameters=params)
74
+
75
+ body = [
76
+ # Assign all attributes to self
77
+ f"self.{field.name} = {field.name}"
78
+ for field in dataclasses.fields(cls)
79
+ if field.init and field.name not in init_vars
80
+ ]
81
+ # Call user's impl of __post_init__ if it exists
82
+ if hasattr(cls, "__post_init__"):
83
+ body.append("self.__post_init__(" + ", ".join(init_vars) + ")")
84
+
85
+ return compose_fn(cls, "__init__", body or ["pass"], signature=str(signature))
86
+
87
+
88
+ # This is a placeholder at the moment since the TorchScript interpreter doesn't call __repr__
89
+ def synthesize__repr__(cls) -> ParsedDef:
90
+ return compose_fn(
91
+ cls,
92
+ "__repr__",
93
+ [
94
+ f"return '{cls.__name__}("
95
+ + ", ".join(
96
+ [
97
+ f"{field.name}=self.{field.name}"
98
+ for field in dataclasses.fields(cls)
99
+ if field.repr
100
+ ]
101
+ )
102
+ + ")'"
103
+ ],
104
+ signature="(self) -> str",
105
+ )
106
+
107
+
108
+ def synthesize__hash__(cls) -> ParsedDef:
109
+ return compose_fn(
110
+ cls,
111
+ "__hash__",
112
+ [
113
+ # This is just a placeholder to prevent compilation from failing; this won't even get called at
114
+ # all right now because the TorchScript interpreter doesn't call custom __hash__ implementations
115
+ "raise NotImplementedError('__hash__ is not supported for dataclasses in TorchScript')"
116
+ ],
117
+ signature="(self) -> int",
118
+ )
119
+
120
+
121
+ # Implementation for __eq__ and __ne__
122
+ def synthesize_equality(cls, name: str, converse: str) -> ParsedDef:
123
+ return synthesize_comparison(
124
+ cls,
125
+ name,
126
+ allow_eq=True,
127
+ raise_on_none=False,
128
+ inner=[f"if val1 {converse} val2: return False"],
129
+ )
130
+
131
+
132
+ def synthesize_inequality(cls, name: str, op: str, allow_eq: bool) -> ParsedDef:
133
+ return synthesize_comparison(
134
+ cls,
135
+ name,
136
+ allow_eq,
137
+ raise_on_none=True,
138
+ inner=[
139
+ f"if val1 {op} val2: return True",
140
+ f"elif val2 {op} val1: return False",
141
+ ],
142
+ )
143
+
144
+
145
+ def synthesize_comparison(
146
+ cls, name: str, allow_eq: bool, raise_on_none: bool, inner: List[str]
147
+ ) -> ParsedDef:
148
+ body = []
149
+ for field in dataclasses.fields(cls):
150
+ if not field.compare:
151
+ continue
152
+
153
+ body.extend(
154
+ [
155
+ f"val1 = self.{field.name}",
156
+ f"val2 = other.{field.name}",
157
+ ]
158
+ )
159
+ body.extend(
160
+ inner
161
+ if not is_optional(field.type)
162
+ else [
163
+ # Type refinement for optional fields; we need this to avoid type errors from the interpreter
164
+ "if val1 is not None and val2 is not None:",
165
+ *[" " + line for line in inner],
166
+ "elif (val1 is None) != (val2 is None):",
167
+ f" raise TypeError('Cannot compare {cls.__name__} with None')"
168
+ if raise_on_none
169
+ else " return False",
170
+ ]
171
+ )
172
+
173
+ body.append(f"return {allow_eq}")
174
+ return compose_fn(
175
+ cls, name, body, signature=f"(self, other: {cls.__name__}) -> bool"
176
+ )
177
+
178
+
179
+ DATACLASS_MAGIC_METHODS: Dict[str, Callable] = {
180
+ "__init__": synthesize__init__,
181
+ "__repr__": synthesize__repr__,
182
+ "__hash__": synthesize__hash__,
183
+ "__eq__": partial(synthesize_equality, name="__eq__", converse="!="),
184
+ "__ne__": partial(synthesize_equality, name="__ne__", converse="=="),
185
+ "__lt__": partial(synthesize_inequality, name="__lt__", op="<", allow_eq=False),
186
+ "__le__": partial(synthesize_inequality, name="__le__", op="<", allow_eq=True),
187
+ "__gt__": partial(synthesize_inequality, name="__gt__", op=">", allow_eq=False),
188
+ "__ge__": partial(synthesize_inequality, name="__ge__", op=">", allow_eq=True),
189
+ }
venv/lib/python3.10/site-packages/torch/jit/_decomposition_utils.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch._ops import OpOverload, OpOverloadPacket
3
+
4
+
5
+ def _register_decomposition(op: OpOverload, graph: torch._C.Graph):
6
+ assert not isinstance(
7
+ op, OpOverloadPacket
8
+ ), f"Must pass specific op overload, not overload packet, found {op}"
9
+ assert isinstance(op, OpOverload)
10
+
11
+ torch._C._jit_register_decomposition_for_schema(op._schema, graph)
venv/lib/python3.10/site-packages/torch/jit/_decompositions.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import Tensor
3
+
4
+ aten = torch.ops.aten
5
+ import inspect
6
+ import warnings
7
+ from typing import Dict, List, Optional, Set
8
+
9
+ from torch.types import Number
10
+
11
+ decomposition_table: Dict[str, torch.jit.ScriptFunction] = {}
12
+ function_name_set: Set[str] = set()
13
+
14
+
15
+ def check_decomposition_has_type_annotations(f):
16
+ inspect_empty = inspect._empty # type: ignore[attr-defined]
17
+ sig = inspect.signature(f)
18
+ for param in sig.parameters.values():
19
+ assert (
20
+ param.annotation != inspect_empty
21
+ ), f"No signature on param {param.name} for function {f.name}"
22
+
23
+ assert (
24
+ sig.return_annotation != inspect_empty
25
+ ), f"No return annotation for function {f.name}"
26
+
27
+
28
+ def signatures_match(decomposition_sig, torch_op_sig):
29
+ decomp_params = decomposition_sig.parameters
30
+ op_params = torch_op_sig.parameters
31
+
32
+ if len(decomp_params) != len(op_params):
33
+ return False
34
+
35
+ for decomp_param, op_param in zip(decomp_params.values(), op_params.values()):
36
+ # can't check full equality yet because not all fields are correcly deduced
37
+ # in the torch_op_sig - like default value
38
+ # can't check 'kind' bc
39
+ # kwarg-only values with defaults not yet supported in TS
40
+ inspect_empty = inspect._empty # type: ignore[attr-defined]
41
+ for field in ["name", "annotation"]:
42
+ if field == "name" and decomp_param.name == "self":
43
+ warnings.warn("PyTorch uses 'input' instead of 'self' on public api")
44
+
45
+ if getattr(decomp_param, field) != getattr(op_param, field):
46
+ return False
47
+
48
+ decomp_default = decomp_param.default
49
+ op_default = op_param.default
50
+ # default value not always correctly inferred as being present on torch schema,
51
+ # but if specified on both they should be equal
52
+ if decomp_default != inspect_empty and op_default != inspect_empty:
53
+ if decomp_default != op_default:
54
+ return False
55
+
56
+ return decomposition_sig.return_annotation == torch_op_sig.return_annotation
57
+
58
+
59
+ def register_decomposition(aten_op, registry=None):
60
+ def decomposition_decorator(f):
61
+ nonlocal registry
62
+ if registry is None:
63
+ registry = decomposition_table
64
+
65
+ assert isinstance(aten_op, torch._ops.OpOverload)
66
+
67
+ # Need unique name for jit function serialization
68
+ assert (
69
+ f.__name__ not in function_name_set
70
+ ), f"Duplicated function name {f.__name__}"
71
+ function_name_set.add(f.__name__)
72
+
73
+ scripted_func = torch.jit.script(f)
74
+ torch._C._jit_pass_inline(scripted_func.graph)
75
+
76
+ for _ in range(2):
77
+ torch._C._jit_pass_peephole(scripted_func.graph)
78
+ torch._C._jit_pass_constant_propagation(scripted_func.graph)
79
+
80
+ registry[str(aten_op._schema)] = scripted_func
81
+ return f
82
+
83
+ return decomposition_decorator
84
+
85
+
86
+ # TODO: replace torch.sigmoid -> aten.sigmoid
87
+
88
+
89
+ @register_decomposition(aten.var.correction)
90
+ def var_decomposition(
91
+ input: Tensor,
92
+ dim: Optional[List[int]] = None,
93
+ correction: Optional[Number] = None,
94
+ keepdim: bool = False,
95
+ ) -> Tensor:
96
+ if dim is None:
97
+ dim_i: List[int] = []
98
+ dim = dim_i
99
+
100
+ if isinstance(dim, (tuple, list)) and len(dim) == 0:
101
+ n = input.numel()
102
+ else:
103
+ n = 1
104
+ for dim_i in dim: # type: ignore[assignment]
105
+ n *= input.shape[dim_i] # type: ignore[call-overload]
106
+
107
+ mean = aten.mean(input, dim, True)
108
+ sub = input - mean
109
+ sq = sub * sub
110
+ sum = aten.sum(sq, dim, keepdim)
111
+
112
+ if correction is None:
113
+ denom = float(n - 1)
114
+ else:
115
+ if isinstance(correction, int):
116
+ denom = float(n - correction)
117
+ elif isinstance(correction, float):
118
+ denom = float(n) - correction
119
+ else:
120
+ raise RuntimeError("correction must be int or float")
121
+
122
+ return sum / max(0, denom)
123
+
124
+
125
+ @register_decomposition(aten.var.default)
126
+ def var(input: Tensor, unbiased: bool = True) -> Tensor:
127
+ return var_decomposition(input, correction=(1 if unbiased else 0))
venv/lib/python3.10/site-packages/torch/jit/_freeze.py ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Freezing.
2
+
3
+ This is not intended to be imported directly; please use the exposed
4
+ functionalities in `torch.jit`.
5
+ """
6
+
7
+ from typing import List, Optional
8
+
9
+ import torch
10
+ from torch.jit._script import RecursiveScriptModule, ScriptModule
11
+
12
+
13
+ def freeze(
14
+ mod, preserved_attrs: Optional[List[str]] = None, optimize_numerics: bool = True
15
+ ):
16
+ r"""Freeze ScriptModule, inline submodules, and attributes as constants.
17
+
18
+ Freezing a :class:`ScriptModule` will clone it and attempt to inline the cloned
19
+ module's submodules, parameters, and attributes as constants in the TorchScript IR Graph.
20
+ By default, `forward` will be preserved, as well as attributes & methods specified in
21
+ `preserved_attrs`. Additionally, any attribute that is modified within a preserved
22
+ method will be preserved.
23
+
24
+ Freezing currently only accepts ScriptModules that are in eval mode.
25
+
26
+ Freezing applies generic optimization that will speed up your model regardless of machine.
27
+ To further optimize using server-specific settings, run `optimize_for_inference` after
28
+ freezing.
29
+
30
+ Args:
31
+ mod (:class:`ScriptModule`): a module to be frozen
32
+ preserved_attrs (Optional[List[str]]): a list of attributes to preserve in addition to the forward method.
33
+ Attributes modified in preserved methods will also be preserved.
34
+ optimize_numerics (bool): If ``True``, a set of optimization passes will be run that does not strictly
35
+ preserve numerics. Full details of optimization can be found at `torch.jit.run_frozen_optimizations`.
36
+
37
+ Returns:
38
+ Frozen :class:`ScriptModule`.
39
+
40
+ Example (Freezing a simple module with a Parameter):
41
+
42
+ .. testcode::
43
+ import torch
44
+ class MyModule(torch.nn.Module):
45
+ def __init__(self, N, M):
46
+ super().__init__()
47
+ self.weight = torch.nn.Parameter(torch.rand(N, M))
48
+ self.linear = torch.nn.Linear(N, M)
49
+
50
+ def forward(self, input):
51
+ output = self.weight.mm(input)
52
+ output = self.linear(output)
53
+ return output
54
+
55
+ scripted_module = torch.jit.script(MyModule(2, 3).eval())
56
+ frozen_module = torch.jit.freeze(scripted_module)
57
+ # parameters have been removed and inlined into the Graph as constants
58
+ assert len(list(frozen_module.named_parameters())) == 0
59
+ # See the compiled graph as Python code
60
+ print(frozen_module.code)
61
+
62
+ Example (Freezing a module with preserved attributes)
63
+
64
+ .. testcode::
65
+ import torch
66
+ class MyModule2(torch.nn.Module):
67
+ def __init__(self):
68
+ super().__init__()
69
+ self.modified_tensor = torch.tensor(10.)
70
+ self.version = 1
71
+
72
+ def forward(self, input):
73
+ self.modified_tensor += 1
74
+ return input + self.modified_tensor
75
+
76
+ scripted_module = torch.jit.script(MyModule2().eval())
77
+ frozen_module = torch.jit.freeze(scripted_module, preserved_attrs=["version"])
78
+ # we've manually preserved `version`, so it still exists on the frozen module and can be modified
79
+ assert frozen_module.version == 1
80
+ frozen_module.version = 2
81
+ # `modified_tensor` is detected as being mutated in the forward, so freezing preserves
82
+ # it to retain model semantics
83
+ assert frozen_module(torch.tensor(1)) == torch.tensor(12)
84
+ # now that we've run it once, the next result will be incremented by one
85
+ assert frozen_module(torch.tensor(1)) == torch.tensor(13)
86
+
87
+ Note:
88
+ Freezing submodule attributes is also supported:
89
+ frozen_module = torch.jit.freeze(scripted_module, preserved_attrs=["submodule.version"])
90
+
91
+ Note:
92
+ If you're not sure why an attribute is not being inlined as a constant, you can run
93
+ `dump_alias_db` on frozen_module.forward.graph to see if freezing has detected the
94
+ attribute is being modified.
95
+
96
+ Note:
97
+ Because freezing makes weights constants and removes module hierarchy, `to` and other
98
+ nn.Module methods to manipulate device or dtype no longer work. As a workaround,
99
+ You can remap devices by specifying `map_location` in `torch.jit.load`, however
100
+ device-specific logic may have been baked into the model.
101
+ """
102
+ if not isinstance(mod, ScriptModule):
103
+ raise RuntimeError(
104
+ "Freezing expects a ScriptModule as input. "
105
+ "Please use torch.jit.script or torch.jit.trace to script your 'nn.Module'."
106
+ )
107
+
108
+ if mod.training:
109
+ raise RuntimeError(
110
+ "Freezing is currently only implemented for modules in eval mode. "
111
+ "Please call .eval() on your module before freezing."
112
+ )
113
+
114
+ preserved_attrs = preserved_attrs if preserved_attrs is not None else []
115
+
116
+ out = RecursiveScriptModule(torch._C._freeze_module(mod._c, preserved_attrs))
117
+ RecursiveScriptModule._finalize_scriptmodule(out)
118
+
119
+ preserved_methods = [x for x in preserved_attrs if mod._c._has_method(x)]
120
+ run_frozen_optimizations(out, optimize_numerics, preserved_methods)
121
+
122
+ return out
123
+
124
+
125
+ def run_frozen_optimizations(
126
+ mod, optimize_numerics: bool = True, preserved_methods: Optional[List[str]] = None
127
+ ):
128
+ r"""
129
+ Run a series of optimizations looking for patterns that occur in frozen graphs.
130
+
131
+ The current set of optimizations includes:
132
+ - Dropout Removal
133
+ - Pretranspose Linear Layers
134
+ - Concat Linear Layers with same input Tensor
135
+ - Conv -> Batchnorm folding
136
+ - Conv -> Add/Sub folding
137
+ - Conv -> Mul/Div folding
138
+
139
+ Args:
140
+ mod (:class:`ScriptModule`): a frozen module to be optimized
141
+
142
+ optimize_numerics (bool): If ``True``, a set of optimization passes will be run that does not strictly
143
+ preserve numerics. These optimizations preserve default rtol and atol of `torch.testing.assert_close`
144
+ when applied on a single transformation, however in a module where many transformations are applied
145
+ the rtol or atol may no longer fall within the default `assert_close` tolerance. Conv -> Batchnorm folding,
146
+ Conv-Add/Sub, and Conv -> Mul/Div folding all may alter numerics.
147
+
148
+ Returns:
149
+ None
150
+
151
+ Note:
152
+ In rare occassions, this can result in slower execution.
153
+
154
+ Example (Freezing a module with Conv->Batchnorm)
155
+ .. code-block:: python
156
+ import torch
157
+ in_channels, out_channels = 3, 32
158
+ conv = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=True)
159
+ bn = torch.nn.BatchNorm2d(out_channels, eps=.001)
160
+ mod = torch.nn.Sequential(conv, bn)
161
+ # set optimize to False here, by default freezing runs run_frozen_optimizations
162
+ frozen_mod = torch.jit.freeze(torch.jit.script(mod.eval()), optimize=False)
163
+ # inspect frozen mod
164
+ assert "batch_norm" in str(frozen_mod.graph)
165
+ torch.jit.run_frozen_optimizations(frozen_mod)
166
+ assert "batch_norm" not in str(frozen_mod.graph)
167
+
168
+ """
169
+ if mod._c._has_method("forward"):
170
+ torch._C._jit_pass_optimize_frozen_graph(mod.graph, optimize_numerics)
171
+
172
+ if preserved_methods is None:
173
+ preserved_methods = []
174
+
175
+ for method in preserved_methods:
176
+ torch._C._jit_pass_optimize_frozen_graph(
177
+ mod.__getattr__(method).graph, optimize_numerics
178
+ )
179
+
180
+
181
+ def optimize_for_inference(
182
+ mod: ScriptModule, other_methods: Optional[List[str]] = None
183
+ ) -> ScriptModule:
184
+ """
185
+ Perform a set of optimization passes to optimize a model for the purposes of inference.
186
+
187
+ If the model is not already frozen, optimize_for_inference
188
+ will invoke `torch.jit.freeze` automatically.
189
+
190
+ In addition to generic optimizations that should speed up your model regardless
191
+ of environment, prepare for inference will also bake in build specific settings
192
+ such as the presence of CUDNN or MKLDNN, and may in the future make transformations
193
+ which speed things up on one machine but slow things down on another. Accordingly,
194
+ serialization is not implemented following invoking `optimize_for_inference` and
195
+ is not guaranteed.
196
+
197
+ This is still in prototype, and may have the potential to slow down your model.
198
+ Primary use cases that have been targeted so far have been vision models on cpu
199
+ and gpu to a lesser extent.
200
+
201
+ Example (optimizing a module with Conv->Batchnorm)::
202
+
203
+ import torch
204
+ in_channels, out_channels = 3, 32
205
+ conv = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=True)
206
+ bn = torch.nn.BatchNorm2d(out_channels, eps=.001)
207
+ mod = torch.nn.Sequential(conv, bn)
208
+ frozen_mod = torch.jit.optimize_for_inference(torch.jit.script(mod.eval()))
209
+ assert "batch_norm" not in str(frozen_mod.graph)
210
+ # if built with MKLDNN, convolution will be run with MKLDNN weights
211
+ assert "MKLDNN" in frozen_mod.graph
212
+ """
213
+ if not isinstance(mod, ScriptModule):
214
+ raise RuntimeError(
215
+ "optimize_for_inference expects a ScriptModule as input. "
216
+ "Please use torch.jit.script or torch.jit.trace to script your 'nn.Module'."
217
+ )
218
+
219
+ if other_methods is None:
220
+ other_methods = []
221
+
222
+ if hasattr(mod, "training"):
223
+ mod = freeze(mod.eval(), preserved_attrs=other_methods)
224
+
225
+ torch._C._jit_pass_optimize_for_inference(mod._c, other_methods)
226
+
227
+ return mod
venv/lib/python3.10/site-packages/torch/jit/_fuser.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ from typing import List, Tuple
3
+
4
+ import torch
5
+
6
+
7
+ @contextlib.contextmanager
8
+ def optimized_execution(should_optimize):
9
+ """Context manager that controls whether the JIT's executor will run optimizations before executing a function."""
10
+ stored_flag = torch._C._get_graph_executor_optimize()
11
+ torch._C._set_graph_executor_optimize(should_optimize)
12
+ try:
13
+ yield
14
+ finally:
15
+ torch._C._set_graph_executor_optimize(stored_flag)
16
+
17
+
18
+ @contextlib.contextmanager
19
+ def fuser(name):
20
+ """Context manager that facilitates switching between backend fusers.
21
+
22
+ Valid names:
23
+ * ``fuser0`` - enables only legacy fuser
24
+ * ``fuser1`` - enables only NNC
25
+ * ``fuser2`` - enables only nvFuser
26
+ * ``fuser3`` - enables oneDNN Graph
27
+ """
28
+ old_cpu_fuse = torch._C._jit_can_fuse_on_cpu()
29
+ old_gpu_fuse = torch._C._jit_can_fuse_on_gpu()
30
+ old_texpr_fuser_state = torch._C._jit_texpr_fuser_enabled()
31
+ old_nvfuser_state = torch._C._jit_nvfuser_enabled()
32
+ old_llga_state = torch._C._jit_llga_enabled()
33
+ if name == "fuser0": # legacy fuser
34
+ torch._C._jit_override_can_fuse_on_cpu(True)
35
+ torch._C._jit_override_can_fuse_on_gpu(True)
36
+ torch._C._jit_set_texpr_fuser_enabled(False)
37
+ torch._C._jit_set_nvfuser_enabled(False)
38
+ torch._C._jit_set_llga_enabled(False)
39
+ elif name == "fuser1": # NNC
40
+ old_profiling_executor = torch._C._jit_set_profiling_executor(True)
41
+ old_profiling_mode = torch._C._get_graph_executor_optimize(True)
42
+ torch._C._jit_override_can_fuse_on_cpu(True)
43
+ torch._C._jit_override_can_fuse_on_gpu(True)
44
+ torch._C._jit_set_texpr_fuser_enabled(True)
45
+ torch._C._jit_set_nvfuser_enabled(False)
46
+ torch._C._jit_set_llga_enabled(False)
47
+ elif name == "fuser2": # nvFuser
48
+ torch._C._jit_override_can_fuse_on_cpu(False)
49
+ torch._C._jit_override_can_fuse_on_gpu(False)
50
+ torch._C._jit_set_texpr_fuser_enabled(False)
51
+ torch._C._jit_set_nvfuser_enabled(True)
52
+ torch._C._jit_set_llga_enabled(False)
53
+ elif name == "fuser3": # oneDNN Graph
54
+ old_profiling_executor = torch._C._jit_set_profiling_executor(True)
55
+ old_profiling_mode = torch._C._get_graph_executor_optimize(True)
56
+ torch._C._jit_override_can_fuse_on_cpu(True)
57
+ torch._C._jit_override_can_fuse_on_gpu(False)
58
+ torch._C._jit_set_texpr_fuser_enabled(True)
59
+ torch._C._jit_set_nvfuser_enabled(False)
60
+ torch._C._jit_set_llga_enabled(True)
61
+ elif name == "none": # Turn Pytorch fuser off
62
+ torch._C._jit_override_can_fuse_on_cpu(False)
63
+ torch._C._jit_override_can_fuse_on_gpu(False)
64
+ torch._C._jit_set_texpr_fuser_enabled(False)
65
+ torch._C._jit_set_nvfuser_enabled(False)
66
+ torch._C._jit_set_llga_enabled(False)
67
+ else:
68
+ raise Exception(f"unrecognized fuser option (name: {name})")
69
+ try:
70
+ yield
71
+ finally:
72
+ if name in ["fuser1", "fuser3"]: # NNC or oneDNN Graph
73
+ torch._C._jit_set_profiling_executor(old_profiling_executor) # type: ignore[possibly-undefined]
74
+ torch._C._get_graph_executor_optimize(old_profiling_mode) # type: ignore[possibly-undefined]
75
+ # recover the previous values
76
+ torch._C._jit_override_can_fuse_on_cpu(old_cpu_fuse)
77
+ torch._C._jit_override_can_fuse_on_gpu(old_gpu_fuse)
78
+ torch._C._jit_set_texpr_fuser_enabled(old_texpr_fuser_state)
79
+ torch._C._jit_set_nvfuser_enabled(old_nvfuser_state)
80
+ torch._C._jit_set_llga_enabled(old_llga_state)
81
+
82
+
83
+ last_executed_optimized_graph = torch._C._last_executed_optimized_graph
84
+
85
+
86
+ def _get_differentiable_graph_node(node, diff_node):
87
+ if node.kind() == "prim::DifferentiableGraph":
88
+ diff_node.append(node)
89
+ else:
90
+ for block in node.blocks():
91
+ for n in block.nodes():
92
+ _get_differentiable_graph_node(n, diff_node)
93
+
94
+
95
+ def _graph_for(self, *args, **kwargs):
96
+ return _script_method_graph_for(self, self, *args, **kwargs)
97
+
98
+
99
+ def _script_method_graph_for(self, parent, *args, **kwargs):
100
+ try:
101
+ dbs = parent.get_debug_state()
102
+ eps = list(dbs.execution_plans.values())
103
+ assert len(eps) == 1
104
+ graph = eps[0].graph.copy()
105
+
106
+ # graph_executor_states for differentiable node
107
+ fw_states = eps[0].code.differentiable_op_executor_states()
108
+ diff_nodes: List[torch._C.Node] = []
109
+ for n in graph.nodes():
110
+ _get_differentiable_graph_node(n, diff_nodes)
111
+
112
+ assert len(fw_states) == len(diff_nodes)
113
+ # swap each differentiable graph with optimized graph in their execution plan
114
+ for n, state in zip(diff_nodes, fw_states):
115
+ fw_execution_plans = list(state.execution_plans.values())
116
+ # we can only update the subgraph when there's a unique execution
117
+ # plan. Avoid assert here so we would skip the ones that can't be
118
+ # updated while try the best effort to update other nodes.
119
+ if len(fw_execution_plans) == 1:
120
+ n.g_("Subgraph", fw_execution_plans[0].graph)
121
+
122
+ return graph
123
+ except Exception:
124
+ # fallback approach, we just ran the graph and return the recorded optimized
125
+ # graph
126
+ self(*args, **kwargs)
127
+ return last_executed_optimized_graph()
128
+
129
+
130
+ def set_fusion_strategy(strategy: List[Tuple[str, int]]):
131
+ """Set the type and number of specializations that can occur during fusion.
132
+
133
+ Usage: provide a list of pairs (type, depth) where type is one of "STATIC" or "DYNAMIC"
134
+ and depth is an integer.
135
+
136
+ Behavior - static vs dynamic:
137
+ In STATIC fusion, fused ops are compiled to have fixed input shapes. The shape is determined
138
+ based on some initial profiling runs.
139
+ In DYNAMIC fusion, fused ops are compiled to have variable input shapes, so that multiple
140
+ shapes are possible.
141
+
142
+ In both cases, we also recompile on new striding behavior, device, or dtype.
143
+
144
+ Behavior - fallback functions & depth:
145
+ When an input doesn't match the format required by the specialized compiled op, it will run
146
+ a fallback function. Fallback functions are recursively be compiled and specialized based
147
+ on the observed tensor shapes. Since compilation can be slow, the "depth" parameter is provided to
148
+ limit the number of specializations that can be compiled, before giving up on recompiling and
149
+ falling back to a completely un-fused, un-specialized implementation.
150
+
151
+ The list of (type, depth) pairs controls the type of specializations and the number of
152
+ specializations. For example: [("STATIC", 2), ("DYNAMIC", 2)] indicates that the first
153
+ two specializations will use static fusions, the following two specializations will use
154
+ dynamic fusion, and any inputs that satisfy none of the 4 options will run an
155
+ unfused implementation.
156
+
157
+ NB: in the future, if more as more fusion backends are added there may be more granular
158
+ apis for specific fusers.
159
+ """
160
+ return torch._C._jit_set_fusion_strategy(strategy)
venv/lib/python3.10/site-packages/torch/jit/_ir_utils.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Union
2
+
3
+ import torch
4
+
5
+
6
+ class _InsertPoint:
7
+ def __init__(
8
+ self,
9
+ insert_point_graph: torch._C.Graph,
10
+ insert_point: Union[torch._C.Node, torch._C.Block],
11
+ ):
12
+ self.insert_point = insert_point
13
+ self.g = insert_point_graph
14
+ self.guard = None
15
+
16
+ def __enter__(self):
17
+ self.prev_insert_point = self.g.insertPoint()
18
+ self.g.setInsertPoint(self.insert_point)
19
+
20
+ def __exit__(self, *args):
21
+ self.g.setInsertPoint(self.prev_insert_point)
22
+
23
+
24
+ def insert_point_guard(self, insert_point: Union[torch._C.Node, torch._C.Block]):
25
+ return _InsertPoint(self, insert_point)
venv/lib/python3.10/site-packages/torch/jit/_logging.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ add_stat_value = torch.ops.prim.AddStatValue
4
+
5
+ set_logger = torch._C._logging_set_logger
6
+ LockingLogger = torch._C.LockingLogger
7
+ AggregationType = torch._C.AggregationType
8
+ NoopLogger = torch._C.NoopLogger
9
+
10
+ time_point = torch.ops.prim.TimePoint
venv/lib/python3.10/site-packages/torch/jit/_monkeytype_config.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import pathlib
3
+ import sys
4
+ import typing
5
+ from collections import defaultdict
6
+ from types import CodeType
7
+ from typing import Dict, Iterable, List, Optional
8
+
9
+ import torch
10
+
11
+ _IS_MONKEYTYPE_INSTALLED = True
12
+ try:
13
+ import monkeytype # type: ignore[import]
14
+ from monkeytype import trace as monkeytype_trace
15
+ from monkeytype.config import _startswith, LIB_PATHS # type: ignore[import]
16
+ from monkeytype.db.base import ( # type: ignore[import]
17
+ CallTraceStore,
18
+ CallTraceStoreLogger,
19
+ CallTraceThunk,
20
+ )
21
+ from monkeytype.tracing import CallTrace, CodeFilter # type: ignore[import]
22
+ except ImportError:
23
+ _IS_MONKEYTYPE_INSTALLED = False
24
+
25
+
26
+ # Checks whether a class is defind in `torch.*` modules
27
+ def is_torch_native_class(cls):
28
+ if not hasattr(cls, "__module__"):
29
+ return False
30
+
31
+ parent_modules = cls.__module__.split(".")
32
+ if not parent_modules:
33
+ return False
34
+
35
+ root_module = sys.modules.get(parent_modules[0])
36
+ return root_module is torch
37
+
38
+
39
+ def get_type(type):
40
+ """Convert the given type to a torchScript acceptable format."""
41
+ if isinstance(type, str):
42
+ return type
43
+ elif inspect.getmodule(type) == typing:
44
+ # If the type is a type imported from typing
45
+ # like Tuple, List, Dict then replace `typing.`
46
+ # with a null string. This needs to be done since
47
+ # typing.List is not accepted by TorchScript.
48
+ type_to_string = str(type)
49
+ return type_to_string.replace(type.__module__ + ".", "")
50
+ elif is_torch_native_class(type):
51
+ # If the type is a subtype of torch module, then TorchScript expects a fully qualified name
52
+ # for the type which is obtained by combining the module name and type name.
53
+ return type.__module__ + "." + type.__name__
54
+ else:
55
+ # For all other types use the name for the type.
56
+ return type.__name__
57
+
58
+
59
+ def get_optional_of_element_type(types):
60
+ """Extract element type, return as `Optional[element type]` from consolidated types.
61
+
62
+ Helper function to extracts the type of the element to be annotated to Optional
63
+ from the list of consolidated types and returns `Optional[element type]`.
64
+ TODO: To remove this check once Union support lands.
65
+ """
66
+ elem_type = types[1] if type(None) == types[0] else types[0]
67
+ elem_type = get_type(elem_type)
68
+
69
+ # Optional type is internally converted to Union[type, NoneType], which
70
+ # is not supported yet in TorchScript. Hence, representing the optional type as string.
71
+ return "Optional[" + elem_type + "]"
72
+
73
+
74
+ def get_qualified_name(func):
75
+ return func.__qualname__
76
+
77
+
78
+ if _IS_MONKEYTYPE_INSTALLED:
79
+
80
+ class JitTypeTraceStoreLogger(CallTraceStoreLogger):
81
+ """A JitTypeCallTraceLogger that stores logged traces in a CallTraceStore."""
82
+
83
+ def __init__(self, store: CallTraceStore):
84
+ super().__init__(store)
85
+
86
+ def log(self, trace: CallTrace) -> None:
87
+ self.traces.append(trace)
88
+
89
+ class JitTypeTraceStore(CallTraceStore):
90
+ def __init__(self):
91
+ super().__init__()
92
+ # A dictionary keeping all collected CallTrace
93
+ # key is fully qualified name of called function
94
+ # value is list of all CallTrace
95
+ self.trace_records: Dict[str, list] = defaultdict(list)
96
+
97
+ def add(self, traces: Iterable[CallTrace]):
98
+ for t in traces:
99
+ qualified_name = get_qualified_name(t.func)
100
+ self.trace_records[qualified_name].append(t)
101
+
102
+ def filter(
103
+ self,
104
+ qualified_name: str,
105
+ qualname_prefix: Optional[str] = None,
106
+ limit: int = 2000,
107
+ ) -> List[CallTraceThunk]:
108
+ return self.trace_records[qualified_name]
109
+
110
+ def analyze(self, qualified_name: str) -> Dict:
111
+ # Analyze the types for the given module
112
+ # and create a dictionary of all the types
113
+ # for arguments.
114
+ records = self.trace_records[qualified_name]
115
+ all_args = defaultdict(set)
116
+ for record in records:
117
+ for arg, arg_type in record.arg_types.items():
118
+ all_args[arg].add(arg_type)
119
+ return all_args
120
+
121
+ def consolidate_types(self, qualified_name: str) -> Dict:
122
+ all_args = self.analyze(qualified_name)
123
+ # If there are more types for an argument,
124
+ # then consolidate the type to `Any` and replace the entry
125
+ # by type `Any`.
126
+ for arg, types in all_args.items():
127
+ types = list(types)
128
+ type_length = len(types)
129
+ if type_length == 2 and type(None) in types:
130
+ # TODO: To remove this check once Union suppport in TorchScript lands.
131
+ all_args[arg] = get_optional_of_element_type(types)
132
+ elif type_length > 1:
133
+ all_args[arg] = "Any"
134
+ elif type_length == 1:
135
+ all_args[arg] = get_type(types[0])
136
+ return all_args
137
+
138
+ def get_args_types(self, qualified_name: str) -> Dict:
139
+ return self.consolidate_types(qualified_name)
140
+
141
+ class JitTypeTraceConfig(monkeytype.config.Config):
142
+ def __init__(self, s: JitTypeTraceStore):
143
+ super().__init__()
144
+ self.s = s
145
+
146
+ def trace_logger(self) -> JitTypeTraceStoreLogger:
147
+ """Return a JitCallTraceStoreLogger that logs to the configured trace store."""
148
+ return JitTypeTraceStoreLogger(self.trace_store())
149
+
150
+ def trace_store(self) -> CallTraceStore:
151
+ return self.s
152
+
153
+ def code_filter(self) -> Optional[CodeFilter]:
154
+ return jit_code_filter
155
+
156
+ else:
157
+ # When MonkeyType is not installed, we provide dummy class definitions
158
+ # for the below classes.
159
+ class JitTypeTraceStoreLogger: # type: ignore[no-redef]
160
+ def __init__(self):
161
+ pass
162
+
163
+ class JitTypeTraceStore: # type: ignore[no-redef]
164
+ def __init__(self):
165
+ self.trace_records = None
166
+
167
+ class JitTypeTraceConfig: # type: ignore[no-redef]
168
+ def __init__(self):
169
+ pass
170
+
171
+ monkeytype_trace = None # type: ignore[assignment] # noqa: F811
172
+
173
+
174
+ def jit_code_filter(code: CodeType) -> bool:
175
+ """Codefilter for Torchscript to trace forward calls.
176
+
177
+ The custom CodeFilter is required while scripting a FX Traced forward calls.
178
+ FX Traced forward calls have `code.co_filename` start with '<' which is used
179
+ to exclude tracing of stdlib and site-packages in the default code filter.
180
+ Since we need all forward calls to be traced, this custom code filter
181
+ checks for code.co_name to be 'forward' and enables tracing for all such calls.
182
+ The code filter is similar to default code filter for monkeytype and
183
+ excludes tracing of stdlib and site-packages.
184
+ """
185
+ # Filter code without a source file and exclude this check for 'forward' calls.
186
+ if code.co_name != "forward" and (
187
+ not code.co_filename or code.co_filename[0] == "<"
188
+ ):
189
+ return False
190
+
191
+ filename = pathlib.Path(code.co_filename).resolve()
192
+ return not any(_startswith(filename, lib_path) for lib_path in LIB_PATHS)