applied-ai-018 commited on
Commit
7fca365
·
verified ·
1 Parent(s): c31e504

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/DataProperty-1.0.1.dist-info/INSTALLER +1 -0
  2. llmeval-env/lib/python3.10/site-packages/DataProperty-1.0.1.dist-info/LICENSE +21 -0
  3. llmeval-env/lib/python3.10/site-packages/DataProperty-1.0.1.dist-info/METADATA +276 -0
  4. llmeval-env/lib/python3.10/site-packages/DataProperty-1.0.1.dist-info/RECORD +47 -0
  5. llmeval-env/lib/python3.10/site-packages/DataProperty-1.0.1.dist-info/WHEEL +5 -0
  6. llmeval-env/lib/python3.10/site-packages/DataProperty-1.0.1.dist-info/top_level.txt +1 -0
  7. llmeval-env/lib/python3.10/site-packages/PyYAML-6.0.1.dist-info/INSTALLER +1 -0
  8. llmeval-env/lib/python3.10/site-packages/PyYAML-6.0.1.dist-info/LICENSE +20 -0
  9. llmeval-env/lib/python3.10/site-packages/PyYAML-6.0.1.dist-info/METADATA +46 -0
  10. llmeval-env/lib/python3.10/site-packages/PyYAML-6.0.1.dist-info/RECORD +43 -0
  11. llmeval-env/lib/python3.10/site-packages/PyYAML-6.0.1.dist-info/WHEEL +6 -0
  12. llmeval-env/lib/python3.10/site-packages/PyYAML-6.0.1.dist-info/top_level.txt +2 -0
  13. llmeval-env/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/INSTALLER +1 -0
  14. llmeval-env/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/LICENSE +201 -0
  15. llmeval-env/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/METADATA +128 -0
  16. llmeval-env/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/RECORD +10 -0
  17. llmeval-env/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/WHEEL +5 -0
  18. llmeval-env/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/top_level.txt +1 -0
  19. llmeval-env/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/INSTALLER +1 -0
  20. llmeval-env/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/LICENSE +27 -0
  21. llmeval-env/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/METADATA +233 -0
  22. llmeval-env/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/RECORD +180 -0
  23. llmeval-env/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/WHEEL +5 -0
  24. llmeval-env/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/top_level.txt +1 -0
  25. llmeval-env/lib/python3.10/site-packages/networkx-3.3.dist-info/INSTALLER +1 -0
  26. llmeval-env/lib/python3.10/site-packages/networkx-3.3.dist-info/LICENSE.txt +37 -0
  27. llmeval-env/lib/python3.10/site-packages/networkx-3.3.dist-info/METADATA +133 -0
  28. llmeval-env/lib/python3.10/site-packages/networkx-3.3.dist-info/RECORD +0 -0
  29. llmeval-env/lib/python3.10/site-packages/networkx-3.3.dist-info/WHEEL +5 -0
  30. llmeval-env/lib/python3.10/site-packages/networkx-3.3.dist-info/entry_points.txt +2 -0
  31. llmeval-env/lib/python3.10/site-packages/networkx-3.3.dist-info/top_level.txt +1 -0
  32. llmeval-env/lib/python3.10/site-packages/pandas/arrays/__init__.py +53 -0
  33. llmeval-env/lib/python3.10/site-packages/pandas/arrays/__pycache__/__init__.cpython-310.pyc +0 -0
  34. llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_series_apply.cpython-310.pyc +0 -0
  35. llmeval-env/lib/python3.10/site-packages/safetensors/__pycache__/__init__.cpython-310.pyc +0 -0
  36. llmeval-env/lib/python3.10/site-packages/safetensors/__pycache__/flax.cpython-310.pyc +0 -0
  37. llmeval-env/lib/python3.10/site-packages/safetensors/__pycache__/tensorflow.cpython-310.pyc +0 -0
  38. llmeval-env/lib/python3.10/site-packages/safetensors/__pycache__/torch.cpython-310.pyc +0 -0
  39. llmeval-env/lib/python3.10/site-packages/safetensors/mlx.py +138 -0
  40. llmeval-env/lib/python3.10/site-packages/tcolorpy-0.1.6.dist-info/INSTALLER +1 -0
  41. llmeval-env/lib/python3.10/site-packages/tcolorpy-0.1.6.dist-info/LICENSE +21 -0
  42. llmeval-env/lib/python3.10/site-packages/tcolorpy-0.1.6.dist-info/METADATA +171 -0
  43. llmeval-env/lib/python3.10/site-packages/tcolorpy-0.1.6.dist-info/RECORD +17 -0
  44. llmeval-env/lib/python3.10/site-packages/tcolorpy-0.1.6.dist-info/WHEEL +5 -0
  45. llmeval-env/lib/python3.10/site-packages/tcolorpy-0.1.6.dist-info/top_level.txt +1 -0
  46. llmeval-env/lib/python3.10/site-packages/torch/_C.cpython-310-x86_64-linux-gnu.so +0 -0
  47. llmeval-env/lib/python3.10/site-packages/torch/_VF.pyi +0 -0
  48. llmeval-env/lib/python3.10/site-packages/torch/__config__.py +22 -0
  49. llmeval-env/lib/python3.10/site-packages/torch/__future__.py +75 -0
  50. llmeval-env/lib/python3.10/site-packages/torch/__init__.py +2038 -0
llmeval-env/lib/python3.10/site-packages/DataProperty-1.0.1.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
llmeval-env/lib/python3.10/site-packages/DataProperty-1.0.1.dist-info/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ The MIT License (MIT)
2
+
3
+ Copyright (c) 2016 Tsuyoshi Hombashi
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
llmeval-env/lib/python3.10/site-packages/DataProperty-1.0.1.dist-info/METADATA ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: DataProperty
3
+ Version: 1.0.1
4
+ Summary: Python library for extract property from data.
5
+ Home-page: https://github.com/thombashi/DataProperty
6
+ Author: Tsuyoshi Hombashi
7
+ Author-email: [email protected]
8
+ Maintainer: Tsuyoshi Hombashi
9
+ Maintainer-email: [email protected]
10
+ License: MIT License
11
+ Project-URL: Source, https://github.com/thombashi/DataProperty
12
+ Project-URL: Tracker, https://github.com/thombashi/DataProperty/issues
13
+ Keywords: data,library,property
14
+ Classifier: Development Status :: 5 - Production/Stable
15
+ Classifier: Intended Audience :: Developers
16
+ Classifier: Intended Audience :: Information Technology
17
+ Classifier: License :: OSI Approved :: MIT License
18
+ Classifier: Operating System :: OS Independent
19
+ Classifier: Programming Language :: Python :: 3
20
+ Classifier: Programming Language :: Python :: 3.7
21
+ Classifier: Programming Language :: Python :: 3.8
22
+ Classifier: Programming Language :: Python :: 3.9
23
+ Classifier: Programming Language :: Python :: 3.10
24
+ Classifier: Programming Language :: Python :: 3.11
25
+ Classifier: Programming Language :: Python :: Implementation :: CPython
26
+ Classifier: Programming Language :: Python :: Implementation :: PyPy
27
+ Classifier: Topic :: Software Development :: Libraries
28
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
29
+ Requires-Python: >=3.7
30
+ Description-Content-Type: text/x-rst
31
+ License-File: LICENSE
32
+ Requires-Dist: mbstrdecoder (<2,>=1.0.0)
33
+ Requires-Dist: typepy[datetime] (<2,>=1.2.0)
34
+ Provides-Extra: logging
35
+ Requires-Dist: loguru (<1,>=0.4.1) ; extra == 'logging'
36
+ Provides-Extra: test
37
+ Requires-Dist: pytest (>=6.0.1) ; extra == 'test'
38
+ Requires-Dist: pytest-md-report (>=0.3) ; extra == 'test'
39
+ Requires-Dist: tcolorpy (>=0.1.2) ; extra == 'test'
40
+
41
+ .. contents:: **DataProperty**
42
+ :backlinks: top
43
+ :local:
44
+
45
+
46
+ Summary
47
+ =======
48
+ A Python library for extract property from data.
49
+
50
+
51
+ .. image:: https://badge.fury.io/py/DataProperty.svg
52
+ :target: https://badge.fury.io/py/DataProperty
53
+ :alt: PyPI package version
54
+
55
+ .. image:: https://anaconda.org/conda-forge/DataProperty/badges/version.svg
56
+ :target: https://anaconda.org/conda-forge/DataProperty
57
+ :alt: conda-forge package version
58
+
59
+ .. image:: https://img.shields.io/pypi/pyversions/DataProperty.svg
60
+ :target: https://pypi.org/project/DataProperty
61
+ :alt: Supported Python versions
62
+
63
+ .. image:: https://img.shields.io/pypi/implementation/DataProperty.svg
64
+ :target: https://pypi.org/project/DataProperty
65
+ :alt: Supported Python implementations
66
+
67
+ .. image:: https://github.com/thombashi/DataProperty/actions/workflows/ci.yml/badge.svg
68
+ :target: https://github.com/thombashi/DataProperty/actions/workflows/ci.yml
69
+ :alt: CI status of Linux/macOS/Windows
70
+
71
+ .. image:: https://coveralls.io/repos/github/thombashi/DataProperty/badge.svg?branch=master
72
+ :target: https://coveralls.io/github/thombashi/DataProperty?branch=master
73
+ :alt: Test coverage
74
+
75
+ .. image:: https://github.com/thombashi/DataProperty/actions/workflows/github-code-scanning/codeql/badge.svg
76
+ :target: https://github.com/thombashi/DataProperty/actions/workflows/github-code-scanning/codeql
77
+ :alt: CodeQL
78
+
79
+
80
+ Installation
81
+ ============
82
+
83
+ Installation: pip
84
+ ------------------------------
85
+ ::
86
+
87
+ pip install DataProperty
88
+
89
+ Installation: conda
90
+ ------------------------------
91
+ ::
92
+
93
+ conda install -c conda-forge dataproperty
94
+
95
+ Installation: apt
96
+ ------------------------------
97
+ ::
98
+
99
+ sudo add-apt-repository ppa:thombashi/ppa
100
+ sudo apt update
101
+ sudo apt install python3-dataproperty
102
+
103
+
104
+ Usage
105
+ =====
106
+
107
+ Extract property of data
108
+ ------------------------
109
+
110
+ e.g. Extract a ``float`` value property
111
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
112
+ .. code:: python
113
+
114
+ >>> from dataproperty import DataProperty
115
+ >>> DataProperty(-1.1)
116
+ data=-1.1, type=REAL_NUMBER, align=right, ascii_width=4, int_digits=1, decimal_places=1, extra_len=1
117
+
118
+ e.g. Extract a ``int`` value property
119
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
120
+ .. code:: python
121
+
122
+ >>> from dataproperty import DataProperty
123
+ >>> DataProperty(123456789)
124
+ data=123456789, type=INTEGER, align=right, ascii_width=9, int_digits=9, decimal_places=0, extra_len=0
125
+
126
+ e.g. Extract a ``str`` (ascii) value property
127
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
128
+ .. code:: python
129
+
130
+ >>> from dataproperty import DataProperty
131
+ >>> DataProperty("sample string")
132
+ data=sample string, type=STRING, align=left, length=13, ascii_width=13, extra_len=0
133
+
134
+ e.g. Extract a ``str`` (multi-byte) value property
135
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
136
+ .. code:: python
137
+
138
+ >>> from dataproperty import DataProperty
139
+ >>> str(DataProperty("吾輩は猫である"))
140
+ data=吾輩は猫である, type=STRING, align=left, length=7, ascii_width=14, extra_len=0
141
+
142
+ e.g. Extract a time (``datetime``) value property
143
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
144
+ .. code:: python
145
+
146
+ >>> import datetime
147
+ >>> from dataproperty import DataProperty
148
+ >>> DataProperty(datetime.datetime(2017, 1, 1, 0, 0, 0))
149
+ data=2017-01-01 00:00:00, type=DATETIME, align=left, ascii_width=19, extra_len=0
150
+
151
+ e.g. Extract a ``bool`` value property
152
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
153
+ .. code:: python
154
+
155
+ >>> from dataproperty import DataProperty
156
+ >>> DataProperty(True)
157
+ data=True, type=BOOL, align=left, ascii_width=4, extra_len=0
158
+
159
+
160
+ Extract data property for each element from a matrix
161
+ ----------------------------------------------------
162
+ ``DataPropertyExtractor.to_dp_matrix`` method returns a matrix of ``DataProperty`` instances from a data matrix.
163
+ An example data set and the result are as follows:
164
+
165
+ :Sample Code:
166
+ .. code:: python
167
+
168
+ import datetime
169
+ from dataproperty import DataPropertyExtractor
170
+
171
+ dp_extractor = DataPropertyExtractor()
172
+ dt = datetime.datetime(2017, 1, 1, 0, 0, 0)
173
+ inf = float("inf")
174
+ nan = float("nan")
175
+
176
+ dp_matrix = dp_extractor.to_dp_matrix([
177
+ [1, 1.1, "aa", 1, 1, True, inf, nan, dt],
178
+ [2, 2.2, "bbb", 2.2, 2.2, False, "inf", "nan", dt],
179
+ [3, 3.33, "cccc", -3, "ccc", "true", inf, "NAN", "2017-01-01T01:23:45+0900"],
180
+ ])
181
+
182
+ for row, dp_list in enumerate(dp_matrix):
183
+ for col, dp in enumerate(dp_list):
184
+ print("row={:d}, col={:d}, {}".format(row, col, str(dp)))
185
+
186
+ :Output:
187
+ ::
188
+
189
+ row=0, col=0, data=1, type=INTEGER, align=right, ascii_width=1, int_digits=1, decimal_places=0, extra_len=0
190
+ row=0, col=1, data=1.1, type=REAL_NUMBER, align=right, ascii_width=3, int_digits=1, decimal_places=1, extra_len=0
191
+ row=0, col=2, data=aa, type=STRING, align=left, ascii_width=2, length=2, extra_len=0
192
+ row=0, col=3, data=1, type=INTEGER, align=right, ascii_width=1, int_digits=1, decimal_places=0, extra_len=0
193
+ row=0, col=4, data=1, type=INTEGER, align=right, ascii_width=1, int_digits=1, decimal_places=0, extra_len=0
194
+ row=0, col=5, data=True, type=BOOL, align=left, ascii_width=4, extra_len=0
195
+ row=0, col=6, data=Infinity, type=INFINITY, align=left, ascii_width=8, extra_len=0
196
+ row=0, col=7, data=NaN, type=NAN, align=left, ascii_width=3, extra_len=0
197
+ row=0, col=8, data=2017-01-01 00:00:00, type=DATETIME, align=left, ascii_width=19, extra_len=0
198
+ row=1, col=0, data=2, type=INTEGER, align=right, ascii_width=1, int_digits=1, decimal_places=0, extra_len=0
199
+ row=1, col=1, data=2.2, type=REAL_NUMBER, align=right, ascii_width=3, int_digits=1, decimal_places=1, extra_len=0
200
+ row=1, col=2, data=bbb, type=STRING, align=left, ascii_width=3, length=3, extra_len=0
201
+ row=1, col=3, data=2.2, type=REAL_NUMBER, align=right, ascii_width=3, int_digits=1, decimal_places=1, extra_len=0
202
+ row=1, col=4, data=2.2, type=REAL_NUMBER, align=right, ascii_width=3, int_digits=1, decimal_places=1, extra_len=0
203
+ row=1, col=5, data=False, type=BOOL, align=left, ascii_width=5, extra_len=0
204
+ row=1, col=6, data=Infinity, type=INFINITY, align=left, ascii_width=8, extra_len=0
205
+ row=1, col=7, data=NaN, type=NAN, align=left, ascii_width=3, extra_len=0
206
+ row=1, col=8, data=2017-01-01 00:00:00, type=DATETIME, align=left, ascii_width=19, extra_len=0
207
+ row=2, col=0, data=3, type=INTEGER, align=right, ascii_width=1, int_digits=1, decimal_places=0, extra_len=0
208
+ row=2, col=1, data=3.33, type=REAL_NUMBER, align=right, ascii_width=4, int_digits=1, decimal_places=2, extra_len=0
209
+ row=2, col=2, data=cccc, type=STRING, align=left, ascii_width=4, length=4, extra_len=0
210
+ row=2, col=3, data=-3, type=INTEGER, align=right, ascii_width=2, int_digits=1, decimal_places=0, extra_len=1
211
+ row=2, col=4, data=ccc, type=STRING, align=left, ascii_width=3, length=3, extra_len=0
212
+ row=2, col=5, data=True, type=BOOL, align=left, ascii_width=4, extra_len=0
213
+ row=2, col=6, data=Infinity, type=INFINITY, align=left, ascii_width=8, extra_len=0
214
+ row=2, col=7, data=NaN, type=NAN, align=left, ascii_width=3, extra_len=0
215
+ row=2, col=8, data=2017-01-01T01:23:45+0900, type=STRING, align=left, ascii_width=24, length=24, extra_len=0
216
+
217
+
218
+ Full example source code can be found at *examples/py/to_dp_matrix.py*
219
+
220
+
221
+ Extract properties for each column from a matrix
222
+ ------------------------------------------------------
223
+ ``DataPropertyExtractor.to_column_dp_list`` method returns a list of ``DataProperty`` instances from a data matrix. The list represents the properties for each column.
224
+ An example data set and the result are as follows:
225
+
226
+ Example data set and result are as follows:
227
+
228
+ :Sample Code:
229
+ .. code:: python
230
+
231
+ import datetime
232
+ from dataproperty import DataPropertyExtractor
233
+
234
+ dp_extractor = DataPropertyExtractor()
235
+ dt = datetime.datetime(2017, 1, 1, 0, 0, 0)
236
+ inf = float("inf")
237
+ nan = float("nan")
238
+
239
+ data_matrix = [
240
+ [1, 1.1, "aa", 1, 1, True, inf, nan, dt],
241
+ [2, 2.2, "bbb", 2.2, 2.2, False, "inf", "nan", dt],
242
+ [3, 3.33, "cccc", -3, "ccc", "true", inf, "NAN", "2017-01-01T01:23:45+0900"],
243
+ ]
244
+
245
+ dp_extractor.headers = ["int", "float", "str", "num", "mix", "bool", "inf", "nan", "time"]
246
+ col_dp_list = dp_extractor.to_column_dp_list(dp_extractor.to_dp_matrix(dp_matrix))
247
+
248
+ for col_idx, col_dp in enumerate(col_dp_list):
249
+ print(str(col_dp))
250
+
251
+ :Output:
252
+ ::
253
+
254
+ column=0, type=INTEGER, align=right, ascii_width=3, bit_len=2, int_digits=1, decimal_places=0
255
+ column=1, type=REAL_NUMBER, align=right, ascii_width=5, int_digits=1, decimal_places=(min=1, max=2)
256
+ column=2, type=STRING, align=left, ascii_width=4
257
+ column=3, type=REAL_NUMBER, align=right, ascii_width=4, int_digits=1, decimal_places=(min=0, max=1), extra_len=(min=0, max=1)
258
+ column=4, type=STRING, align=left, ascii_width=3, int_digits=1, decimal_places=(min=0, max=1)
259
+ column=5, type=BOOL, align=left, ascii_width=5
260
+ column=6, type=INFINITY, align=left, ascii_width=8
261
+ column=7, type=NAN, align=left, ascii_width=3
262
+ column=8, type=STRING, align=left, ascii_width=24
263
+
264
+
265
+ Full example source code can be found at *examples/py/to_column_dp_list.py*
266
+
267
+
268
+ Dependencies
269
+ ============
270
+ - Python 3.7+
271
+ - `Python package dependencies (automatically installed) <https://github.com/thombashi/DataProperty/network/dependencies>`__
272
+
273
+ Optional dependencies
274
+ ---------------------
275
+ - `loguru <https://github.com/Delgan/loguru>`__
276
+ - Used for logging if the package installed
llmeval-env/lib/python3.10/site-packages/DataProperty-1.0.1.dist-info/RECORD ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DataProperty-1.0.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ DataProperty-1.0.1.dist-info/LICENSE,sha256=qT11vLB3TimQEGOAytrW3LLeGTxV1DX_xWujRaCLHcI,1084
3
+ DataProperty-1.0.1.dist-info/METADATA,sha256=BxNvMErHIPajm-sKqeSWNuN7mZwJU7L-m87uzOUQpb4,11519
4
+ DataProperty-1.0.1.dist-info/RECORD,,
5
+ DataProperty-1.0.1.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92
6
+ DataProperty-1.0.1.dist-info/top_level.txt,sha256=RiW0aJCSmIPslrGSqg9wyPRas0Rl7Kcdi_fBBEd0-LY,13
7
+ dataproperty/__init__.py,sha256=y_LoBUs28gC7b7AXv49X1XCPHckXo3oKECpW-Oj6LbM,1308
8
+ dataproperty/__pycache__/__init__.cpython-310.pyc,,
9
+ dataproperty/__pycache__/__version__.cpython-310.pyc,,
10
+ dataproperty/__pycache__/_align.cpython-310.pyc,,
11
+ dataproperty/__pycache__/_align_getter.cpython-310.pyc,,
12
+ dataproperty/__pycache__/_base.cpython-310.pyc,,
13
+ dataproperty/__pycache__/_column.cpython-310.pyc,,
14
+ dataproperty/__pycache__/_common.cpython-310.pyc,,
15
+ dataproperty/__pycache__/_container.cpython-310.pyc,,
16
+ dataproperty/__pycache__/_converter.cpython-310.pyc,,
17
+ dataproperty/__pycache__/_dataproperty.cpython-310.pyc,,
18
+ dataproperty/__pycache__/_extractor.cpython-310.pyc,,
19
+ dataproperty/__pycache__/_formatter.cpython-310.pyc,,
20
+ dataproperty/__pycache__/_function.cpython-310.pyc,,
21
+ dataproperty/__pycache__/_interface.cpython-310.pyc,,
22
+ dataproperty/__pycache__/_line_break.cpython-310.pyc,,
23
+ dataproperty/__pycache__/_preprocessor.cpython-310.pyc,,
24
+ dataproperty/__pycache__/typing.cpython-310.pyc,,
25
+ dataproperty/__version__.py,sha256=67tYZapqaNY9QXFm4kAOxyg6b6T1ttw2NjFPHfyCkkc,201
26
+ dataproperty/_align.py,sha256=VQCp3HUN-rw5lDcG0CHwoQNwabSOwMF8Fpn52nHpQs8,535
27
+ dataproperty/_align_getter.py,sha256=GV8rvnGaF8-8C6E7SNa3SsXw-gp80jR93knG_XDwcZQ,833
28
+ dataproperty/_base.py,sha256=WfDF5FqUFRm9_Aw8T0H5AxyKyvaz4Fv3Z0x7lDzzLTM,2514
29
+ dataproperty/_column.py,sha256=Y7Xn16Jtc8vBMcqarrulNVzV4A3-TkYOQxkGXmup4lw,11653
30
+ dataproperty/_common.py,sha256=scfSVZRoBT74UIOYS99lZye06OUbT9347QpbxRhIi8M,1915
31
+ dataproperty/_container.py,sha256=NT-zFw68PqCCV8wcK7sTuIKlnW3eStVA0gkiO0DcBkY,5130
32
+ dataproperty/_converter.py,sha256=rEYWC1rcBIgi2WRM9PrLAycoOs9uSsYUsXaAlW5dWzM,3269
33
+ dataproperty/_dataproperty.py,sha256=Mq8J1pcJIqI2PbOfqH0CUF0aUzGhJnfdlTuzpz8-5wU,11321
34
+ dataproperty/_extractor.py,sha256=Rg_z5aKUGulUxi0Y3iGhLCEQ2nQpMYRbU8-Dd7XfyG4,25899
35
+ dataproperty/_formatter.py,sha256=nqQkEhtYKfG6WskuuN8_0mw3tpGNov8kJ6VBK36VYUA,3000
36
+ dataproperty/_function.py,sha256=h48XjTqYuXwFI1xeerFIIAlaWINxtLXEDw91ZuF_AuQ,3115
37
+ dataproperty/_interface.py,sha256=nronY0GKDo5AkgXjM7wvpYY8cx5SmpxpBiDLLbW6NSY,626
38
+ dataproperty/_line_break.py,sha256=FGjtuWKftOchoeJZJ9DxHJ9DUY0PPO_tPTiAM1e-Wck,114
39
+ dataproperty/_preprocessor.py,sha256=7v-Py61jZK9SkNrpaHrmJLdwMbjumpsfzk6JU2PiThw,5467
40
+ dataproperty/logger/__init__.py,sha256=2kFcgMA8P4-c51nShgJQsY31tbbLvvsfSGDLXTOj9ig,88
41
+ dataproperty/logger/__pycache__/__init__.cpython-310.pyc,,
42
+ dataproperty/logger/__pycache__/_logger.cpython-310.pyc,,
43
+ dataproperty/logger/__pycache__/_null_logger.cpython-310.pyc,,
44
+ dataproperty/logger/_logger.py,sha256=edZ7M2Hf9zjSMr4iRi_IYAcf3l1EiLIVqhCEtf0AFHg,442
45
+ dataproperty/logger/_null_logger.py,sha256=xWCR2KAa2aKAcpKi8DosfCOgaRMb_YXr9MKrK7xMD-A,1071
46
+ dataproperty/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
47
+ dataproperty/typing.py,sha256=YhjN4wF_7uqG9tPUbFLFemWIzx3WgyJJFhTh62TyhJU,1403
llmeval-env/lib/python3.10/site-packages/DataProperty-1.0.1.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.40.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
llmeval-env/lib/python3.10/site-packages/DataProperty-1.0.1.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ dataproperty
llmeval-env/lib/python3.10/site-packages/PyYAML-6.0.1.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
llmeval-env/lib/python3.10/site-packages/PyYAML-6.0.1.dist-info/LICENSE ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (c) 2017-2021 Ingy döt Net
2
+ Copyright (c) 2006-2016 Kirill Simonov
3
+
4
+ Permission is hereby granted, free of charge, to any person obtaining a copy of
5
+ this software and associated documentation files (the "Software"), to deal in
6
+ the Software without restriction, including without limitation the rights to
7
+ use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
8
+ of the Software, and to permit persons to whom the Software is furnished to do
9
+ so, subject to the following conditions:
10
+
11
+ The above copyright notice and this permission notice shall be included in all
12
+ copies or substantial portions of the Software.
13
+
14
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20
+ SOFTWARE.
llmeval-env/lib/python3.10/site-packages/PyYAML-6.0.1.dist-info/METADATA ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: PyYAML
3
+ Version: 6.0.1
4
+ Summary: YAML parser and emitter for Python
5
+ Home-page: https://pyyaml.org/
6
+ Download-URL: https://pypi.org/project/PyYAML/
7
+ Author: Kirill Simonov
8
+ Author-email: [email protected]
9
+ License: MIT
10
+ Project-URL: Bug Tracker, https://github.com/yaml/pyyaml/issues
11
+ Project-URL: CI, https://github.com/yaml/pyyaml/actions
12
+ Project-URL: Documentation, https://pyyaml.org/wiki/PyYAMLDocumentation
13
+ Project-URL: Mailing lists, http://lists.sourceforge.net/lists/listinfo/yaml-core
14
+ Project-URL: Source Code, https://github.com/yaml/pyyaml
15
+ Platform: Any
16
+ Classifier: Development Status :: 5 - Production/Stable
17
+ Classifier: Intended Audience :: Developers
18
+ Classifier: License :: OSI Approved :: MIT License
19
+ Classifier: Operating System :: OS Independent
20
+ Classifier: Programming Language :: Cython
21
+ Classifier: Programming Language :: Python
22
+ Classifier: Programming Language :: Python :: 3
23
+ Classifier: Programming Language :: Python :: 3.6
24
+ Classifier: Programming Language :: Python :: 3.7
25
+ Classifier: Programming Language :: Python :: 3.8
26
+ Classifier: Programming Language :: Python :: 3.9
27
+ Classifier: Programming Language :: Python :: 3.10
28
+ Classifier: Programming Language :: Python :: 3.11
29
+ Classifier: Programming Language :: Python :: Implementation :: CPython
30
+ Classifier: Programming Language :: Python :: Implementation :: PyPy
31
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
32
+ Classifier: Topic :: Text Processing :: Markup
33
+ Requires-Python: >=3.6
34
+ License-File: LICENSE
35
+
36
+ YAML is a data serialization format designed for human readability
37
+ and interaction with scripting languages. PyYAML is a YAML parser
38
+ and emitter for Python.
39
+
40
+ PyYAML features a complete YAML 1.1 parser, Unicode support, pickle
41
+ support, capable extension API, and sensible error messages. PyYAML
42
+ supports standard YAML tags and provides Python-specific tags that
43
+ allow to represent an arbitrary Python object.
44
+
45
+ PyYAML is applicable for a broad range of tasks from complex
46
+ configuration files to object serialization and persistence.
llmeval-env/lib/python3.10/site-packages/PyYAML-6.0.1.dist-info/RECORD ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ PyYAML-6.0.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ PyYAML-6.0.1.dist-info/LICENSE,sha256=jTko-dxEkP1jVwfLiOsmvXZBAqcoKVQwfT5RZ6V36KQ,1101
3
+ PyYAML-6.0.1.dist-info/METADATA,sha256=UNNF8-SzzwOKXVo-kV5lXUGH2_wDWMBmGxqISpp5HQk,2058
4
+ PyYAML-6.0.1.dist-info/RECORD,,
5
+ PyYAML-6.0.1.dist-info/WHEEL,sha256=iZaXX0Td62Nww8bojl0E84uJHjT41csHPKZmbUBbJPs,152
6
+ PyYAML-6.0.1.dist-info/top_level.txt,sha256=rpj0IVMTisAjh_1vG3Ccf9v5jpCQwAz6cD1IVU5ZdhQ,11
7
+ _yaml/__init__.py,sha256=04Ae_5osxahpJHa3XBZUAf4wi6XX32gR8D6X6p64GEA,1402
8
+ _yaml/__pycache__/__init__.cpython-310.pyc,,
9
+ yaml/__init__.py,sha256=bhl05qSeO-1ZxlSRjGrvl2m9nrXb1n9-GQatTN0Mrqc,12311
10
+ yaml/__pycache__/__init__.cpython-310.pyc,,
11
+ yaml/__pycache__/composer.cpython-310.pyc,,
12
+ yaml/__pycache__/constructor.cpython-310.pyc,,
13
+ yaml/__pycache__/cyaml.cpython-310.pyc,,
14
+ yaml/__pycache__/dumper.cpython-310.pyc,,
15
+ yaml/__pycache__/emitter.cpython-310.pyc,,
16
+ yaml/__pycache__/error.cpython-310.pyc,,
17
+ yaml/__pycache__/events.cpython-310.pyc,,
18
+ yaml/__pycache__/loader.cpython-310.pyc,,
19
+ yaml/__pycache__/nodes.cpython-310.pyc,,
20
+ yaml/__pycache__/parser.cpython-310.pyc,,
21
+ yaml/__pycache__/reader.cpython-310.pyc,,
22
+ yaml/__pycache__/representer.cpython-310.pyc,,
23
+ yaml/__pycache__/resolver.cpython-310.pyc,,
24
+ yaml/__pycache__/scanner.cpython-310.pyc,,
25
+ yaml/__pycache__/serializer.cpython-310.pyc,,
26
+ yaml/__pycache__/tokens.cpython-310.pyc,,
27
+ yaml/_yaml.cpython-310-x86_64-linux-gnu.so,sha256=_9iVrASatQgQSFXlKeCe2uK2TyKwk9nd61Cs_-fqAHM,2226000
28
+ yaml/composer.py,sha256=_Ko30Wr6eDWUeUpauUGT3Lcg9QPBnOPVlTnIMRGJ9FM,4883
29
+ yaml/constructor.py,sha256=kNgkfaeLUkwQYY_Q6Ff1Tz2XVw_pG1xVE9Ak7z-viLA,28639
30
+ yaml/cyaml.py,sha256=6ZrAG9fAYvdVe2FK_w0hmXoG7ZYsoYUwapG8CiC72H0,3851
31
+ yaml/dumper.py,sha256=PLctZlYwZLp7XmeUdwRuv4nYOZ2UBnDIUy8-lKfLF-o,2837
32
+ yaml/emitter.py,sha256=jghtaU7eFwg31bG0B7RZea_29Adi9CKmXq_QjgQpCkQ,43006
33
+ yaml/error.py,sha256=Ah9z-toHJUbE9j-M8YpxgSRM5CgLCcwVzJgLLRF2Fxo,2533
34
+ yaml/events.py,sha256=50_TksgQiE4up-lKo_V-nBy-tAIxkIPQxY5qDhKCeHw,2445
35
+ yaml/loader.py,sha256=UVa-zIqmkFSCIYq_PgSGm4NSJttHY2Rf_zQ4_b1fHN0,2061
36
+ yaml/nodes.py,sha256=gPKNj8pKCdh2d4gr3gIYINnPOaOxGhJAUiYhGRnPE84,1440
37
+ yaml/parser.py,sha256=ilWp5vvgoHFGzvOZDItFoGjD6D42nhlZrZyjAwa0oJo,25495
38
+ yaml/reader.py,sha256=0dmzirOiDG4Xo41RnuQS7K9rkY3xjHiVasfDMNTqCNw,6794
39
+ yaml/representer.py,sha256=IuWP-cAW9sHKEnS0gCqSa894k1Bg4cgTxaDwIcbRQ-Y,14190
40
+ yaml/resolver.py,sha256=9L-VYfm4mWHxUD1Vg4X7rjDRK_7VZd6b92wzq7Y2IKY,9004
41
+ yaml/scanner.py,sha256=YEM3iLZSaQwXcQRg2l2R4MdT0zGP2F9eHkKGKnHyWQY,51279
42
+ yaml/serializer.py,sha256=ChuFgmhU01hj4xgI8GaKv6vfM2Bujwa9i7d2FAHj7cA,4165
43
+ yaml/tokens.py,sha256=lTQIzSVw8Mg9wv459-TjiOQe6wVziqaRlqX2_89rp54,2573
llmeval-env/lib/python3.10/site-packages/PyYAML-6.0.1.dist-info/WHEEL ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.40.0)
3
+ Root-Is-Purelib: false
4
+ Tag: cp310-cp310-manylinux_2_17_x86_64
5
+ Tag: cp310-cp310-manylinux2014_x86_64
6
+
llmeval-env/lib/python3.10/site-packages/PyYAML-6.0.1.dist-info/top_level.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ _yaml
2
+ yaml
llmeval-env/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
llmeval-env/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "{}"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright 2013-2019 Nikolay Kim and Andrew Svetlov
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
llmeval-env/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/METADATA ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: aiosignal
3
+ Version: 1.3.1
4
+ Summary: aiosignal: a list of registered asynchronous callbacks
5
+ Home-page: https://github.com/aio-libs/aiosignal
6
+ Maintainer: aiohttp team <[email protected]>
7
+ Maintainer-email: [email protected]
8
+ License: Apache 2.0
9
+ Project-URL: Chat: Gitter, https://gitter.im/aio-libs/Lobby
10
+ Project-URL: CI: GitHub Actions, https://github.com/aio-libs/aiosignal/actions
11
+ Project-URL: Coverage: codecov, https://codecov.io/github/aio-libs/aiosignal
12
+ Project-URL: Docs: RTD, https://docs.aiosignal.org
13
+ Project-URL: GitHub: issues, https://github.com/aio-libs/aiosignal/issues
14
+ Project-URL: GitHub: repo, https://github.com/aio-libs/aiosignal
15
+ Classifier: License :: OSI Approved :: Apache Software License
16
+ Classifier: Intended Audience :: Developers
17
+ Classifier: Programming Language :: Python
18
+ Classifier: Programming Language :: Python :: 3
19
+ Classifier: Programming Language :: Python :: 3 :: Only
20
+ Classifier: Programming Language :: Python :: 3.7
21
+ Classifier: Programming Language :: Python :: 3.8
22
+ Classifier: Programming Language :: Python :: 3.9
23
+ Classifier: Programming Language :: Python :: 3.10
24
+ Classifier: Programming Language :: Python :: 3.11
25
+ Classifier: Development Status :: 5 - Production/Stable
26
+ Classifier: Operating System :: POSIX
27
+ Classifier: Operating System :: MacOS :: MacOS X
28
+ Classifier: Operating System :: Microsoft :: Windows
29
+ Classifier: Framework :: AsyncIO
30
+ Requires-Python: >=3.7
31
+ Description-Content-Type: text/x-rst
32
+ License-File: LICENSE
33
+ Requires-Dist: frozenlist (>=1.1.0)
34
+
35
+ =========
36
+ aiosignal
37
+ =========
38
+
39
+ .. image:: https://github.com/aio-libs/aiosignal/workflows/CI/badge.svg
40
+ :target: https://github.com/aio-libs/aiosignal/actions?query=workflow%3ACI
41
+ :alt: GitHub status for master branch
42
+
43
+ .. image:: https://codecov.io/gh/aio-libs/aiosignal/branch/master/graph/badge.svg
44
+ :target: https://codecov.io/gh/aio-libs/aiosignal
45
+ :alt: codecov.io status for master branch
46
+
47
+ .. image:: https://badge.fury.io/py/aiosignal.svg
48
+ :target: https://pypi.org/project/aiosignal
49
+ :alt: Latest PyPI package version
50
+
51
+ .. image:: https://readthedocs.org/projects/aiosignal/badge/?version=latest
52
+ :target: https://aiosignal.readthedocs.io/
53
+ :alt: Latest Read The Docs
54
+
55
+ .. image:: https://img.shields.io/discourse/topics?server=https%3A%2F%2Faio-libs.discourse.group%2F
56
+ :target: https://aio-libs.discourse.group/
57
+ :alt: Discourse group for io-libs
58
+
59
+ .. image:: https://badges.gitter.im/Join%20Chat.svg
60
+ :target: https://gitter.im/aio-libs/Lobby
61
+ :alt: Chat on Gitter
62
+
63
+ Introduction
64
+ ============
65
+
66
+ A project to manage callbacks in `asyncio` projects.
67
+
68
+ ``Signal`` is a list of registered asynchronous callbacks.
69
+
70
+ The signal's life-cycle has two stages: after creation its content
71
+ could be filled by using standard list operations: ``sig.append()``
72
+ etc.
73
+
74
+ After you call ``sig.freeze()`` the signal is *frozen*: adding, removing
75
+ and dropping callbacks is forbidden.
76
+
77
+ The only available operation is calling the previously registered
78
+ callbacks by using ``await sig.send(data)``.
79
+
80
+ For concrete usage examples see the `Signals
81
+ <https://docs.aiohttp.org/en/stable/web_advanced.html#aiohttp-web-signals>
82
+ section of the `Web Server Advanced
83
+ <https://docs.aiohttp.org/en/stable/web_advanced.html>` chapter of the `aiohttp
84
+ documentation`_.
85
+
86
+
87
+ Installation
88
+ ------------
89
+
90
+ ::
91
+
92
+ $ pip install aiosignal
93
+
94
+ The library requires Python 3.6 or newer.
95
+
96
+
97
+ Documentation
98
+ =============
99
+
100
+ https://aiosignal.readthedocs.io/
101
+
102
+ Communication channels
103
+ ======================
104
+
105
+ *gitter chat* https://gitter.im/aio-libs/Lobby
106
+
107
+ Requirements
108
+ ============
109
+
110
+ - Python >= 3.6
111
+ - frozenlist >= 1.0.0
112
+
113
+ License
114
+ =======
115
+
116
+ ``aiosignal`` is offered under the Apache 2 license.
117
+
118
+ Source code
119
+ ===========
120
+
121
+ The project is hosted on GitHub_
122
+
123
+ Please file an issue in the `bug tracker
124
+ <https://github.com/aio-libs/aiosignal/issues>`_ if you have found a bug
125
+ or have some suggestions to improve the library.
126
+
127
+ .. _GitHub: https://github.com/aio-libs/aiosignal
128
+ .. _aiohttp documentation: https://docs.aiohttp.org/
llmeval-env/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/RECORD ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ aiosignal-1.3.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ aiosignal-1.3.1.dist-info/LICENSE,sha256=b9UkPpLdf5jsacesN3co50kFcJ_1J6W_mNbQJjwE9bY,11332
3
+ aiosignal-1.3.1.dist-info/METADATA,sha256=c0HRnlYzfXKztZPTFDlPfygizTherhG5WdwXlvco0Ug,4008
4
+ aiosignal-1.3.1.dist-info/RECORD,,
5
+ aiosignal-1.3.1.dist-info/WHEEL,sha256=ZL1lC_LiPDNRgDnOl2taCMc83aPEUZgHHv2h-LDgdiM,92
6
+ aiosignal-1.3.1.dist-info/top_level.txt,sha256=z45aNOKGDdrI1roqZY3BGXQ22kJFPHBmVdwtLYLtXC0,10
7
+ aiosignal/__init__.py,sha256=zQNfFYRSd84bswvpFv8ZWjEr5DeYwV3LXbMSyo2222s,867
8
+ aiosignal/__init__.pyi,sha256=xeCddYSS8fZAkz8S4HuKSR2IDe3N7RW_LKcXDPPA1Xk,311
9
+ aiosignal/__pycache__/__init__.cpython-310.pyc,,
10
+ aiosignal/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
llmeval-env/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.38.2)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
llmeval-env/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ aiosignal
llmeval-env/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
llmeval-env/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/LICENSE ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (c) 2005-2021 Fredrik Johansson and mpmath contributors
2
+
3
+ All rights reserved.
4
+
5
+ Redistribution and use in source and binary forms, with or without
6
+ modification, are permitted provided that the following conditions are met:
7
+
8
+ a. Redistributions of source code must retain the above copyright notice,
9
+ this list of conditions and the following disclaimer.
10
+ b. Redistributions in binary form must reproduce the above copyright
11
+ notice, this list of conditions and the following disclaimer in the
12
+ documentation and/or other materials provided with the distribution.
13
+ c. Neither the name of the copyright holder nor the names of its
14
+ contributors may be used to endorse or promote products derived
15
+ from this software without specific prior written permission.
16
+
17
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20
+ ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
21
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
24
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25
+ LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26
+ OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
27
+ DAMAGE.
llmeval-env/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/METADATA ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: mpmath
3
+ Version: 1.3.0
4
+ Summary: Python library for arbitrary-precision floating-point arithmetic
5
+ Home-page: http://mpmath.org/
6
+ Author: Fredrik Johansson
7
+ Author-email: [email protected]
8
+ License: BSD
9
+ Project-URL: Source, https://github.com/fredrik-johansson/mpmath
10
+ Project-URL: Tracker, https://github.com/fredrik-johansson/mpmath/issues
11
+ Project-URL: Documentation, http://mpmath.org/doc/current/
12
+ Classifier: License :: OSI Approved :: BSD License
13
+ Classifier: Topic :: Scientific/Engineering :: Mathematics
14
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
15
+ Classifier: Programming Language :: Python
16
+ Classifier: Programming Language :: Python :: 2
17
+ Classifier: Programming Language :: Python :: 2.7
18
+ Classifier: Programming Language :: Python :: 3
19
+ Classifier: Programming Language :: Python :: 3.5
20
+ Classifier: Programming Language :: Python :: 3.6
21
+ Classifier: Programming Language :: Python :: 3.7
22
+ Classifier: Programming Language :: Python :: 3.8
23
+ Classifier: Programming Language :: Python :: 3.9
24
+ Classifier: Programming Language :: Python :: Implementation :: CPython
25
+ Classifier: Programming Language :: Python :: Implementation :: PyPy
26
+ License-File: LICENSE
27
+ Provides-Extra: develop
28
+ Requires-Dist: pytest (>=4.6) ; extra == 'develop'
29
+ Requires-Dist: pycodestyle ; extra == 'develop'
30
+ Requires-Dist: pytest-cov ; extra == 'develop'
31
+ Requires-Dist: codecov ; extra == 'develop'
32
+ Requires-Dist: wheel ; extra == 'develop'
33
+ Provides-Extra: docs
34
+ Requires-Dist: sphinx ; extra == 'docs'
35
+ Provides-Extra: gmpy
36
+ Requires-Dist: gmpy2 (>=2.1.0a4) ; (platform_python_implementation != "PyPy") and extra == 'gmpy'
37
+ Provides-Extra: tests
38
+ Requires-Dist: pytest (>=4.6) ; extra == 'tests'
39
+
40
+ mpmath
41
+ ======
42
+
43
+ |pypi version| |Build status| |Code coverage status| |Zenodo Badge|
44
+
45
+ .. |pypi version| image:: https://img.shields.io/pypi/v/mpmath.svg
46
+ :target: https://pypi.python.org/pypi/mpmath
47
+ .. |Build status| image:: https://github.com/fredrik-johansson/mpmath/workflows/test/badge.svg
48
+ :target: https://github.com/fredrik-johansson/mpmath/actions?workflow=test
49
+ .. |Code coverage status| image:: https://codecov.io/gh/fredrik-johansson/mpmath/branch/master/graph/badge.svg
50
+ :target: https://codecov.io/gh/fredrik-johansson/mpmath
51
+ .. |Zenodo Badge| image:: https://zenodo.org/badge/2934512.svg
52
+ :target: https://zenodo.org/badge/latestdoi/2934512
53
+
54
+ A Python library for arbitrary-precision floating-point arithmetic.
55
+
56
+ Website: http://mpmath.org/
57
+ Main author: Fredrik Johansson <[email protected]>
58
+
59
+ Mpmath is free software released under the New BSD License (see the
60
+ LICENSE file for details)
61
+
62
+ 0. History and credits
63
+ ----------------------
64
+
65
+ The following people (among others) have contributed major patches
66
+ or new features to mpmath:
67
+
68
+ * Pearu Peterson <[email protected]>
69
+ * Mario Pernici <[email protected]>
70
+ * Ondrej Certik <[email protected]>
71
+ * Vinzent Steinberg <[email protected]>
72
+ * Nimish Telang <[email protected]>
73
+ * Mike Taschuk <[email protected]>
74
+ * Case Van Horsen <[email protected]>
75
+ * Jorn Baayen <[email protected]>
76
+ * Chris Smith <[email protected]>
77
+ * Juan Arias de Reyna <[email protected]>
78
+ * Ioannis Tziakos <[email protected]>
79
+ * Aaron Meurer <[email protected]>
80
+ * Stefan Krastanov <[email protected]>
81
+ * Ken Allen <[email protected]>
82
+ * Timo Hartmann <[email protected]>
83
+ * Sergey B Kirpichev <[email protected]>
84
+ * Kris Kuhlman <[email protected]>
85
+ * Paul Masson <[email protected]>
86
+ * Michael Kagalenko <[email protected]>
87
+ * Jonathan Warner <[email protected]>
88
+ * Max Gaukler <[email protected]>
89
+ * Guillermo Navas-Palencia <[email protected]>
90
+ * Nike Dattani <[email protected]>
91
+
92
+ Numerous other people have contributed by reporting bugs,
93
+ requesting new features, or suggesting improvements to the
94
+ documentation.
95
+
96
+ For a detailed changelog, including individual contributions,
97
+ see the CHANGES file.
98
+
99
+ Fredrik's work on mpmath during summer 2008 was sponsored by Google
100
+ as part of the Google Summer of Code program.
101
+
102
+ Fredrik's work on mpmath during summer 2009 was sponsored by the
103
+ American Institute of Mathematics under the support of the National Science
104
+ Foundation Grant No. 0757627 (FRG: L-functions and Modular Forms).
105
+
106
+ Any opinions, findings, and conclusions or recommendations expressed in this
107
+ material are those of the author(s) and do not necessarily reflect the
108
+ views of the sponsors.
109
+
110
+ Credit also goes to:
111
+
112
+ * The authors of the GMP library and the Python wrapper
113
+ gmpy, enabling mpmath to become much faster at
114
+ high precision
115
+ * The authors of MPFR, pari/gp, MPFUN, and other arbitrary-
116
+ precision libraries, whose documentation has been helpful
117
+ for implementing many of the algorithms in mpmath
118
+ * Wikipedia contributors; Abramowitz & Stegun; Gradshteyn & Ryzhik;
119
+ Wolfram Research for MathWorld and the Wolfram Functions site.
120
+ These are the main references used for special functions
121
+ implementations.
122
+ * George Brandl for developing the Sphinx documentation tool
123
+ used to build mpmath's documentation
124
+
125
+ Release history:
126
+
127
+ * Version 1.3.0 released on March 7, 2023
128
+ * Version 1.2.0 released on February 1, 2021
129
+ * Version 1.1.0 released on December 11, 2018
130
+ * Version 1.0.0 released on September 27, 2017
131
+ * Version 0.19 released on June 10, 2014
132
+ * Version 0.18 released on December 31, 2013
133
+ * Version 0.17 released on February 1, 2011
134
+ * Version 0.16 released on September 24, 2010
135
+ * Version 0.15 released on June 6, 2010
136
+ * Version 0.14 released on February 5, 2010
137
+ * Version 0.13 released on August 13, 2009
138
+ * Version 0.12 released on June 9, 2009
139
+ * Version 0.11 released on January 26, 2009
140
+ * Version 0.10 released on October 15, 2008
141
+ * Version 0.9 released on August 23, 2008
142
+ * Version 0.8 released on April 20, 2008
143
+ * Version 0.7 released on March 12, 2008
144
+ * Version 0.6 released on January 13, 2008
145
+ * Version 0.5 released on November 24, 2007
146
+ * Version 0.4 released on November 3, 2007
147
+ * Version 0.3 released on October 5, 2007
148
+ * Version 0.2 released on October 2, 2007
149
+ * Version 0.1 released on September 27, 2007
150
+
151
+ 1. Download & installation
152
+ --------------------------
153
+
154
+ Mpmath requires Python 2.7 or 3.5 (or later versions). It has been tested
155
+ with CPython 2.7, 3.5 through 3.7 and for PyPy.
156
+
157
+ The latest release of mpmath can be downloaded from the mpmath
158
+ website and from https://github.com/fredrik-johansson/mpmath/releases
159
+
160
+ It should also be available in the Python Package Index at
161
+ https://pypi.python.org/pypi/mpmath
162
+
163
+ To install latest release of Mpmath with pip, simply run
164
+
165
+ ``pip install mpmath``
166
+
167
+ Or unpack the mpmath archive and run
168
+
169
+ ``python setup.py install``
170
+
171
+ Mpmath can also be installed using
172
+
173
+ ``python -m easy_install mpmath``
174
+
175
+ The latest development code is available from
176
+ https://github.com/fredrik-johansson/mpmath
177
+
178
+ See the main documentation for more detailed instructions.
179
+
180
+ 2. Running tests
181
+ ----------------
182
+
183
+ The unit tests in mpmath/tests/ can be run via the script
184
+ runtests.py, but it is recommended to run them with py.test
185
+ (https://pytest.org/), especially
186
+ to generate more useful reports in case there are failures.
187
+
188
+ You may also want to check out the demo scripts in the demo
189
+ directory.
190
+
191
+ The master branch is automatically tested by Travis CI.
192
+
193
+ 3. Documentation
194
+ ----------------
195
+
196
+ Documentation in reStructuredText format is available in the
197
+ doc directory included with the source package. These files
198
+ are human-readable, but can be compiled to prettier HTML using
199
+ the build.py script (requires Sphinx, http://sphinx.pocoo.org/).
200
+
201
+ See setup.txt in the documentation for more information.
202
+
203
+ The most recent documentation is also available in HTML format:
204
+
205
+ http://mpmath.org/doc/current/
206
+
207
+ 4. Known problems
208
+ -----------------
209
+
210
+ Mpmath is a work in progress. Major issues include:
211
+
212
+ * Some functions may return incorrect values when given extremely
213
+ large arguments or arguments very close to singularities.
214
+
215
+ * Directed rounding works for arithmetic operations. It is implemented
216
+ heuristically for other operations, and their results may be off by one
217
+ or two units in the last place (even if otherwise accurate).
218
+
219
+ * Some IEEE 754 features are not available. Inifinities and NaN are
220
+ partially supported; denormal rounding is currently not available
221
+ at all.
222
+
223
+ * The interface for switching precision and rounding is not finalized.
224
+ The current method is not threadsafe.
225
+
226
+ 5. Help and bug reports
227
+ -----------------------
228
+
229
+ General questions and comments can be sent to the mpmath mailinglist,
230
231
+
232
+ You can also report bugs and send patches to the mpmath issue tracker,
233
+ https://github.com/fredrik-johansson/mpmath/issues
llmeval-env/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/RECORD ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ mpmath-1.3.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ mpmath-1.3.0.dist-info/LICENSE,sha256=wmyugdpFCOXiSZhXd6M4IfGDIj67dNf4z7-Q_n7vL7c,1537
3
+ mpmath-1.3.0.dist-info/METADATA,sha256=RLZupES5wNGa6UgV01a_BHrmtoDBkmi1wmVofNaoFAY,8630
4
+ mpmath-1.3.0.dist-info/RECORD,,
5
+ mpmath-1.3.0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
6
+ mpmath-1.3.0.dist-info/top_level.txt,sha256=BUVWrh8EVlkOhM1n3X9S8msTaVcC-3s6Sjt60avHYus,7
7
+ mpmath/__init__.py,sha256=skFYTSwfwDBLChAV6pI3SdewgAQR3UBtyrfIK_Jdn-g,8765
8
+ mpmath/__pycache__/__init__.cpython-310.pyc,,
9
+ mpmath/__pycache__/ctx_base.cpython-310.pyc,,
10
+ mpmath/__pycache__/ctx_fp.cpython-310.pyc,,
11
+ mpmath/__pycache__/ctx_iv.cpython-310.pyc,,
12
+ mpmath/__pycache__/ctx_mp.cpython-310.pyc,,
13
+ mpmath/__pycache__/ctx_mp_python.cpython-310.pyc,,
14
+ mpmath/__pycache__/function_docs.cpython-310.pyc,,
15
+ mpmath/__pycache__/identification.cpython-310.pyc,,
16
+ mpmath/__pycache__/math2.cpython-310.pyc,,
17
+ mpmath/__pycache__/rational.cpython-310.pyc,,
18
+ mpmath/__pycache__/usertools.cpython-310.pyc,,
19
+ mpmath/__pycache__/visualization.cpython-310.pyc,,
20
+ mpmath/calculus/__init__.py,sha256=UAgCIJ1YmaeyTqpNzjBlCZGeIzLtUZMEEpl99VWNjus,162
21
+ mpmath/calculus/__pycache__/__init__.cpython-310.pyc,,
22
+ mpmath/calculus/__pycache__/approximation.cpython-310.pyc,,
23
+ mpmath/calculus/__pycache__/calculus.cpython-310.pyc,,
24
+ mpmath/calculus/__pycache__/differentiation.cpython-310.pyc,,
25
+ mpmath/calculus/__pycache__/extrapolation.cpython-310.pyc,,
26
+ mpmath/calculus/__pycache__/inverselaplace.cpython-310.pyc,,
27
+ mpmath/calculus/__pycache__/odes.cpython-310.pyc,,
28
+ mpmath/calculus/__pycache__/optimization.cpython-310.pyc,,
29
+ mpmath/calculus/__pycache__/polynomials.cpython-310.pyc,,
30
+ mpmath/calculus/__pycache__/quadrature.cpython-310.pyc,,
31
+ mpmath/calculus/approximation.py,sha256=vyzu3YI6r63Oq1KFHrQz02mGXAcH23emqNYhJuUaFZ4,8817
32
+ mpmath/calculus/calculus.py,sha256=A0gSp0hxSyEDfugJViY3CeWalF-vK701YftzrjSQzQ4,112
33
+ mpmath/calculus/differentiation.py,sha256=2L6CBj8xtX9iip98NPbKsLtwtRjxi571wYmTMHFeL90,20226
34
+ mpmath/calculus/extrapolation.py,sha256=xM0rvk2DFEF4iR1Jhl-Y3aS93iW9VVJX7y9IGpmzC-A,73306
35
+ mpmath/calculus/inverselaplace.py,sha256=5-pn8N_t0PtgBTXixsXZ4xxrihK2J5gYsVfTKfDx4gA,36056
36
+ mpmath/calculus/odes.py,sha256=gaHiw7IJjsONNTAa6izFPZpmcg9uyTp8MULnGdzTIGo,9908
37
+ mpmath/calculus/optimization.py,sha256=bKnShXElBOmVOIOlFeksDsYCp9fYSmYwKmXDt0z26MM,32856
38
+ mpmath/calculus/polynomials.py,sha256=D16BhU_SHbVi06IxNwABHR-H77IylndNsN3muPTuFYs,7877
39
+ mpmath/calculus/quadrature.py,sha256=n-avtS8E43foV-5tr5lofgOBaiMUYE8AJjQcWI9QcKk,42432
40
+ mpmath/ctx_base.py,sha256=rfjmfMyA55x8R_cWFINUwWVTElfZmyx5erKDdauSEVw,15985
41
+ mpmath/ctx_fp.py,sha256=ctUjx_NoU0iFWk05cXDYCL2ZtLZOlWs1n6Zao3pbG2g,6572
42
+ mpmath/ctx_iv.py,sha256=tqdMr-GDfkZk1EhoGeCAajy7pQv-RWtrVqhYjfI8r4g,17211
43
+ mpmath/ctx_mp.py,sha256=d3r4t7xHNqSFtmqsA9Btq1Npy3WTM-pcM2_jeCyECxY,49452
44
+ mpmath/ctx_mp_python.py,sha256=3olYWo4lk1SnQ0A_IaZ181qqG8u5pxGat_v-L4Qtn3Y,37815
45
+ mpmath/function_docs.py,sha256=g4PP8n6ILXmHcLyA50sxK6Tmp_Z4_pRN-wDErU8D1i4,283512
46
+ mpmath/functions/__init__.py,sha256=YXVdhqv-6LKm6cr5xxtTNTtuD9zDPKGQl8GmS0xz2xo,330
47
+ mpmath/functions/__pycache__/__init__.cpython-310.pyc,,
48
+ mpmath/functions/__pycache__/bessel.cpython-310.pyc,,
49
+ mpmath/functions/__pycache__/elliptic.cpython-310.pyc,,
50
+ mpmath/functions/__pycache__/expintegrals.cpython-310.pyc,,
51
+ mpmath/functions/__pycache__/factorials.cpython-310.pyc,,
52
+ mpmath/functions/__pycache__/functions.cpython-310.pyc,,
53
+ mpmath/functions/__pycache__/hypergeometric.cpython-310.pyc,,
54
+ mpmath/functions/__pycache__/orthogonal.cpython-310.pyc,,
55
+ mpmath/functions/__pycache__/qfunctions.cpython-310.pyc,,
56
+ mpmath/functions/__pycache__/rszeta.cpython-310.pyc,,
57
+ mpmath/functions/__pycache__/signals.cpython-310.pyc,,
58
+ mpmath/functions/__pycache__/theta.cpython-310.pyc,,
59
+ mpmath/functions/__pycache__/zeta.cpython-310.pyc,,
60
+ mpmath/functions/__pycache__/zetazeros.cpython-310.pyc,,
61
+ mpmath/functions/bessel.py,sha256=dUPLu8frlK-vmf3-irX_7uvwyw4xccv6EIizmIZ88kM,37938
62
+ mpmath/functions/elliptic.py,sha256=qz0yVMb4lWEeOTDL_DWz5u5awmGIPKAsuZFJXgwHJNU,42237
63
+ mpmath/functions/expintegrals.py,sha256=75X_MRdYc1F_X73bgNiOJqwRlS2hqAzcFLl3RM2tCDc,11644
64
+ mpmath/functions/factorials.py,sha256=8_6kCR7e4k1GwxiAOJu0NRadeF4jA28qx4hidhu4ILk,5273
65
+ mpmath/functions/functions.py,sha256=ub2JExvqzCWLkm5yAm72Fr6fdWmZZUknq9_3w9MEigI,18100
66
+ mpmath/functions/hypergeometric.py,sha256=Z0OMAMC4ylK42n_SnamyFVnUx6zHLyCLCoJDSZ1JrHY,51570
67
+ mpmath/functions/orthogonal.py,sha256=FabkxKfBoSseA5flWu1a3re-2BYaew9augqIsT8LaLw,16097
68
+ mpmath/functions/qfunctions.py,sha256=a3EHGKQt_jMd4x9I772Jz-TGFnGY-arWqPvZGz9QSe0,7633
69
+ mpmath/functions/rszeta.py,sha256=yuUVp4ilIyDmXyE3WTBxDDjwfEJNypJnbPS-xPH5How,46184
70
+ mpmath/functions/signals.py,sha256=ELotwQaW1CDpv-eeJzOZ5c23NhfaZcj9_Gkb3psvS0Q,703
71
+ mpmath/functions/theta.py,sha256=KggOocczoMG6_HMoal4oEP7iZ4SKOou9JFE-WzY2r3M,37320
72
+ mpmath/functions/zeta.py,sha256=ue7JY7GXA0oX8q08sQJl2CSRrZ7kOt8HsftpVjnTwrE,36410
73
+ mpmath/functions/zetazeros.py,sha256=uq6TVyZBcY2MLX7VSdVfn0TOkowBLM9fXtnySEwaNzw,30858
74
+ mpmath/identification.py,sha256=7aMdngRAaeL_MafDUNbmEIlGQSklHDZ8pmPFt-OLgkw,29253
75
+ mpmath/libmp/__init__.py,sha256=UCDjLZw4brbklaCmSixCcPdLdHkz8sF_-6F_wr0duAg,3790
76
+ mpmath/libmp/__pycache__/__init__.cpython-310.pyc,,
77
+ mpmath/libmp/__pycache__/backend.cpython-310.pyc,,
78
+ mpmath/libmp/__pycache__/gammazeta.cpython-310.pyc,,
79
+ mpmath/libmp/__pycache__/libelefun.cpython-310.pyc,,
80
+ mpmath/libmp/__pycache__/libhyper.cpython-310.pyc,,
81
+ mpmath/libmp/__pycache__/libintmath.cpython-310.pyc,,
82
+ mpmath/libmp/__pycache__/libmpc.cpython-310.pyc,,
83
+ mpmath/libmp/__pycache__/libmpf.cpython-310.pyc,,
84
+ mpmath/libmp/__pycache__/libmpi.cpython-310.pyc,,
85
+ mpmath/libmp/backend.py,sha256=26A8pUkaGov26vrrFNQVyWJ5LDtK8sl3UHrYLecaTjA,3360
86
+ mpmath/libmp/gammazeta.py,sha256=Xqdw6PMoswDaSca_sOs-IglRuk3fb8c9p43M_lbcrlc,71469
87
+ mpmath/libmp/libelefun.py,sha256=joBZP4FOdxPfieWso1LPtSr6dHydpG_LQiF_bYQYWMg,43861
88
+ mpmath/libmp/libhyper.py,sha256=J9fmdDF6u27EcssEWvBuVaAa3hFjPvPN1SgRgu1dEbc,36624
89
+ mpmath/libmp/libintmath.py,sha256=aIRT0rkUZ_sdGQf3TNCLd-pBMvtQWjssbvFLfK7U0jc,16688
90
+ mpmath/libmp/libmpc.py,sha256=KBndUjs5YVS32-Id3fflDfYgpdW1Prx6zfo8Ez5Qbrs,26875
91
+ mpmath/libmp/libmpf.py,sha256=vpP0kNVkScbCVoZogJ4Watl4I7Ce0d4dzHVjfVe57so,45021
92
+ mpmath/libmp/libmpi.py,sha256=u0I5Eiwkqa-4-dXETi5k7MuaxBeZbvCAPFtl93U9YF0,27622
93
+ mpmath/math2.py,sha256=O5Dglg81SsW0wfHDUJcXOD8-cCaLvbVIvyw0sVmRbpI,18561
94
+ mpmath/matrices/__init__.py,sha256=ETzGDciYbq9ftiKwaMbJ15EI-KNXHrzRb-ZHehhqFjs,94
95
+ mpmath/matrices/__pycache__/__init__.cpython-310.pyc,,
96
+ mpmath/matrices/__pycache__/calculus.cpython-310.pyc,,
97
+ mpmath/matrices/__pycache__/eigen.cpython-310.pyc,,
98
+ mpmath/matrices/__pycache__/eigen_symmetric.cpython-310.pyc,,
99
+ mpmath/matrices/__pycache__/linalg.cpython-310.pyc,,
100
+ mpmath/matrices/__pycache__/matrices.cpython-310.pyc,,
101
+ mpmath/matrices/calculus.py,sha256=PNRq-p2nxgT-fzC54K2depi8ddhdx6Q86G8qpUiHeUY,18609
102
+ mpmath/matrices/eigen.py,sha256=GbDXI3CixzEdXxr1G86uUWkAngAvd-05MmSQ-Tsu_5k,24394
103
+ mpmath/matrices/eigen_symmetric.py,sha256=FPKPeQr1cGYw6Y6ea32a1YdEWQDLP6JlQHEA2WfNLYg,58534
104
+ mpmath/matrices/linalg.py,sha256=04C3ijzMFom7ob5fXBCDfyPPdo3BIboIeE8x2A6vqF0,26958
105
+ mpmath/matrices/matrices.py,sha256=o78Eq62EHQnxcsR0LBoWDEGREOoN4L2iDM1q3dQrw0o,32331
106
+ mpmath/rational.py,sha256=64d56fvZXngYZT7nOAHeFRUX77eJ1A0R3rpfWBU-mSo,5976
107
+ mpmath/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
108
+ mpmath/tests/__pycache__/__init__.cpython-310.pyc,,
109
+ mpmath/tests/__pycache__/extratest_gamma.cpython-310.pyc,,
110
+ mpmath/tests/__pycache__/extratest_zeta.cpython-310.pyc,,
111
+ mpmath/tests/__pycache__/runtests.cpython-310.pyc,,
112
+ mpmath/tests/__pycache__/test_basic_ops.cpython-310.pyc,,
113
+ mpmath/tests/__pycache__/test_bitwise.cpython-310.pyc,,
114
+ mpmath/tests/__pycache__/test_calculus.cpython-310.pyc,,
115
+ mpmath/tests/__pycache__/test_compatibility.cpython-310.pyc,,
116
+ mpmath/tests/__pycache__/test_convert.cpython-310.pyc,,
117
+ mpmath/tests/__pycache__/test_diff.cpython-310.pyc,,
118
+ mpmath/tests/__pycache__/test_division.cpython-310.pyc,,
119
+ mpmath/tests/__pycache__/test_eigen.cpython-310.pyc,,
120
+ mpmath/tests/__pycache__/test_eigen_symmetric.cpython-310.pyc,,
121
+ mpmath/tests/__pycache__/test_elliptic.cpython-310.pyc,,
122
+ mpmath/tests/__pycache__/test_fp.cpython-310.pyc,,
123
+ mpmath/tests/__pycache__/test_functions.cpython-310.pyc,,
124
+ mpmath/tests/__pycache__/test_functions2.cpython-310.pyc,,
125
+ mpmath/tests/__pycache__/test_gammazeta.cpython-310.pyc,,
126
+ mpmath/tests/__pycache__/test_hp.cpython-310.pyc,,
127
+ mpmath/tests/__pycache__/test_identify.cpython-310.pyc,,
128
+ mpmath/tests/__pycache__/test_interval.cpython-310.pyc,,
129
+ mpmath/tests/__pycache__/test_levin.cpython-310.pyc,,
130
+ mpmath/tests/__pycache__/test_linalg.cpython-310.pyc,,
131
+ mpmath/tests/__pycache__/test_matrices.cpython-310.pyc,,
132
+ mpmath/tests/__pycache__/test_mpmath.cpython-310.pyc,,
133
+ mpmath/tests/__pycache__/test_ode.cpython-310.pyc,,
134
+ mpmath/tests/__pycache__/test_pickle.cpython-310.pyc,,
135
+ mpmath/tests/__pycache__/test_power.cpython-310.pyc,,
136
+ mpmath/tests/__pycache__/test_quad.cpython-310.pyc,,
137
+ mpmath/tests/__pycache__/test_rootfinding.cpython-310.pyc,,
138
+ mpmath/tests/__pycache__/test_special.cpython-310.pyc,,
139
+ mpmath/tests/__pycache__/test_str.cpython-310.pyc,,
140
+ mpmath/tests/__pycache__/test_summation.cpython-310.pyc,,
141
+ mpmath/tests/__pycache__/test_trig.cpython-310.pyc,,
142
+ mpmath/tests/__pycache__/test_visualization.cpython-310.pyc,,
143
+ mpmath/tests/__pycache__/torture.cpython-310.pyc,,
144
+ mpmath/tests/extratest_gamma.py,sha256=xidhXUelILcxtiPGoTBHjqUOKIJzEaZ_v3nntGQyWZQ,7228
145
+ mpmath/tests/extratest_zeta.py,sha256=sg10j9RhjBpV2EdUqyYhGV2ERWvM--EvwwGIz6HTmlw,1003
146
+ mpmath/tests/runtests.py,sha256=7NUV82F3K_5AhU8mCLUFf5OibtT7uloFCwPyM3l71wM,5189
147
+ mpmath/tests/test_basic_ops.py,sha256=dsB8DRG-GrPzBaZ-bIauYabaeqXbfqBo9SIP9BqcTSs,15348
148
+ mpmath/tests/test_bitwise.py,sha256=-nLYhgQbhDza3SQM63BhktYntACagqMYx9ib3dPnTKM,7686
149
+ mpmath/tests/test_calculus.py,sha256=4oxtNfMpO4RLLoOzrv7r9-h8BcqfBsJIE6UpsHe7c4w,9187
150
+ mpmath/tests/test_compatibility.py,sha256=_t3ASZ3jhfAMnN1voWX7PDNIDzn-3PokkJGIdT1x7y0,2306
151
+ mpmath/tests/test_convert.py,sha256=JPcDcTJIWh5prIxjx5DM1aNWgqlUoF2KpHvAgK3uHi4,8834
152
+ mpmath/tests/test_diff.py,sha256=qjiF8NxQ8vueuZ5ZHGPQ-kjcj_I7Jh_fEdFtaA8DzEI,2466
153
+ mpmath/tests/test_division.py,sha256=6lUeZfmaBWvvszdqlWLMHgXPjVsxvW1WZpd4-jFWCpU,5340
154
+ mpmath/tests/test_eigen.py,sha256=2mnqVATGbsJkvSVHPpitfAk881twFfb3LsO3XikV9Hs,3905
155
+ mpmath/tests/test_eigen_symmetric.py,sha256=v0VimCicIU2owASDMBaP-t-30uq-pXcsglt95KBtNO4,8778
156
+ mpmath/tests/test_elliptic.py,sha256=Kjiwq9Bb6N_OOzzWewGQ1M_PMa7vRs42V0t90gloZxo,26225
157
+ mpmath/tests/test_fp.py,sha256=AJo0FTyH4BuUnUsv176LD956om308KGYndy-b54KGxM,89997
158
+ mpmath/tests/test_functions.py,sha256=b47VywdomoOX6KmMmz9-iv2IqVIydwKSuUw2pWlFHrY,30955
159
+ mpmath/tests/test_functions2.py,sha256=vlw2RWhL1oTcifnOMDx1a_YzN96UgNNIE5STeKRv1HY,96990
160
+ mpmath/tests/test_gammazeta.py,sha256=AB34O0DV7AlEf9Z4brnCadeQU5-uAwhWRw5FZas65DA,27917
161
+ mpmath/tests/test_hp.py,sha256=6hcENu6Te2klPEiTSeLBIRPlH7PADlJwFKbx8xpnOhg,10461
162
+ mpmath/tests/test_identify.py,sha256=lGUIPfrB2paTg0cFUo64GmMzF77F9gs9FQjX7gxGHV8,692
163
+ mpmath/tests/test_interval.py,sha256=TjYd7a9ca6iRJiLjw06isLeZTuGoGAPmgleDZ0cYfJ0,17527
164
+ mpmath/tests/test_levin.py,sha256=P8M11yV1dj_gdSNv5xuwCzFiF86QyRDtPMjURy6wJ28,5090
165
+ mpmath/tests/test_linalg.py,sha256=miKEnwB8iwWV13hi1bF1cg3hgB4rTKOR0fvDVfWmXds,10440
166
+ mpmath/tests/test_matrices.py,sha256=qyA4Ml2CvNvW034lzB01G6wVgNr7UrgZqh2wkMXtpzM,7944
167
+ mpmath/tests/test_mpmath.py,sha256=LVyJUeofiaxW-zLKWVBCz59L9UQsjlW0Ts9_oBiEv_4,196
168
+ mpmath/tests/test_ode.py,sha256=zAxexBH4fnmFNO4bvEHbug1NJWC5zqfFaVDlYijowkY,1822
169
+ mpmath/tests/test_pickle.py,sha256=Y8CKmDLFsJHUqG8CDaBw5ilrPP4YT1xijVduLpQ7XFE,401
170
+ mpmath/tests/test_power.py,sha256=sz_K02SmNxpa6Kb1uJLN_N4tXTJGdQ___vPRshEN7Gk,5227
171
+ mpmath/tests/test_quad.py,sha256=49Ltft0vZ_kdKLL5s-Kj-BzAVoF5LPVEUeNUzdOkghI,3893
172
+ mpmath/tests/test_rootfinding.py,sha256=umQegEaKHmYOEl5jEyoD-VLKDtXsTJJkepKEr4c0dC0,3132
173
+ mpmath/tests/test_special.py,sha256=YbMIoMIkJEvvKYIzS0CXthJFG0--j6un7-tcE6b7FPM,2848
174
+ mpmath/tests/test_str.py,sha256=0WsGD9hMPRi8zcuYMA9Cu2mOvQiCFskPwMsMf8lBDK4,544
175
+ mpmath/tests/test_summation.py,sha256=fdNlsvRVOsbWxbhlyDLDaEO2S8kTJrRMKIvB5-aNci0,2035
176
+ mpmath/tests/test_trig.py,sha256=zPtkIEnZaThxcWur4k7BX8-2Jmj-AhO191Svv7ANYUU,4799
177
+ mpmath/tests/test_visualization.py,sha256=1PqtkoUx-WsKYgTRiu5o9pBc85kwhf1lzU2eobDQCJM,944
178
+ mpmath/tests/torture.py,sha256=LD95oES7JY2KroELK-m-jhvtbvZaKChnt0Cq7kFMNCw,7868
179
+ mpmath/usertools.py,sha256=a-TDw7XSRsPdBEffxOooDV4WDFfuXnO58P75dcAD87I,3029
180
+ mpmath/visualization.py,sha256=pnnbjcd9AhFVRBZavYX5gjx4ytK_kXoDDisYR6EpXhs,10627
llmeval-env/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.38.4)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
llmeval-env/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ mpmath
llmeval-env/lib/python3.10/site-packages/networkx-3.3.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
llmeval-env/lib/python3.10/site-packages/networkx-3.3.dist-info/LICENSE.txt ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ NetworkX is distributed with the 3-clause BSD license.
2
+
3
+ ::
4
+
5
+ Copyright (C) 2004-2024, NetworkX Developers
6
+ Aric Hagberg <[email protected]>
7
+ Dan Schult <[email protected]>
8
+ Pieter Swart <[email protected]>
9
+ All rights reserved.
10
+
11
+ Redistribution and use in source and binary forms, with or without
12
+ modification, are permitted provided that the following conditions are
13
+ met:
14
+
15
+ * Redistributions of source code must retain the above copyright
16
+ notice, this list of conditions and the following disclaimer.
17
+
18
+ * Redistributions in binary form must reproduce the above
19
+ copyright notice, this list of conditions and the following
20
+ disclaimer in the documentation and/or other materials provided
21
+ with the distribution.
22
+
23
+ * Neither the name of the NetworkX Developers nor the names of its
24
+ contributors may be used to endorse or promote products derived
25
+ from this software without specific prior written permission.
26
+
27
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
llmeval-env/lib/python3.10/site-packages/networkx-3.3.dist-info/METADATA ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: networkx
3
+ Version: 3.3
4
+ Summary: Python package for creating and manipulating graphs and networks
5
+ Author-email: Aric Hagberg <[email protected]>
6
+ Maintainer-email: NetworkX Developers <[email protected]>
7
+ Project-URL: Homepage, https://networkx.org/
8
+ Project-URL: Bug Tracker, https://github.com/networkx/networkx/issues
9
+ Project-URL: Documentation, https://networkx.org/documentation/stable/
10
+ Project-URL: Source Code, https://github.com/networkx/networkx
11
+ Keywords: Networks,Graph Theory,Mathematics,network,graph,discrete mathematics,math
12
+ Platform: Linux
13
+ Platform: Mac OSX
14
+ Platform: Windows
15
+ Platform: Unix
16
+ Classifier: Development Status :: 5 - Production/Stable
17
+ Classifier: Intended Audience :: Developers
18
+ Classifier: Intended Audience :: Science/Research
19
+ Classifier: License :: OSI Approved :: BSD License
20
+ Classifier: Operating System :: OS Independent
21
+ Classifier: Programming Language :: Python :: 3
22
+ Classifier: Programming Language :: Python :: 3.10
23
+ Classifier: Programming Language :: Python :: 3.11
24
+ Classifier: Programming Language :: Python :: 3.12
25
+ Classifier: Programming Language :: Python :: 3 :: Only
26
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
27
+ Classifier: Topic :: Scientific/Engineering :: Bio-Informatics
28
+ Classifier: Topic :: Scientific/Engineering :: Information Analysis
29
+ Classifier: Topic :: Scientific/Engineering :: Mathematics
30
+ Classifier: Topic :: Scientific/Engineering :: Physics
31
+ Requires-Python: >=3.10
32
+ Description-Content-Type: text/x-rst
33
+ License-File: LICENSE.txt
34
+ Provides-Extra: default
35
+ Requires-Dist: numpy >=1.23 ; extra == 'default'
36
+ Requires-Dist: scipy !=1.11.0,!=1.11.1,>=1.9 ; extra == 'default'
37
+ Requires-Dist: matplotlib >=3.6 ; extra == 'default'
38
+ Requires-Dist: pandas >=1.4 ; extra == 'default'
39
+ Provides-Extra: developer
40
+ Requires-Dist: changelist ==0.5 ; extra == 'developer'
41
+ Requires-Dist: pre-commit >=3.2 ; extra == 'developer'
42
+ Requires-Dist: mypy >=1.1 ; extra == 'developer'
43
+ Requires-Dist: rtoml ; extra == 'developer'
44
+ Provides-Extra: doc
45
+ Requires-Dist: sphinx >=7 ; extra == 'doc'
46
+ Requires-Dist: pydata-sphinx-theme >=0.14 ; extra == 'doc'
47
+ Requires-Dist: sphinx-gallery >=0.14 ; extra == 'doc'
48
+ Requires-Dist: numpydoc >=1.7 ; extra == 'doc'
49
+ Requires-Dist: pillow >=9.4 ; extra == 'doc'
50
+ Requires-Dist: texext >=0.6.7 ; extra == 'doc'
51
+ Requires-Dist: myst-nb >=1.0 ; extra == 'doc'
52
+ Provides-Extra: extra
53
+ Requires-Dist: lxml >=4.6 ; extra == 'extra'
54
+ Requires-Dist: pygraphviz >=1.12 ; extra == 'extra'
55
+ Requires-Dist: pydot >=2.0 ; extra == 'extra'
56
+ Requires-Dist: sympy >=1.10 ; extra == 'extra'
57
+ Provides-Extra: test
58
+ Requires-Dist: pytest >=7.2 ; extra == 'test'
59
+ Requires-Dist: pytest-cov >=4.0 ; extra == 'test'
60
+
61
+ NetworkX
62
+ ========
63
+
64
+
65
+ .. image:: https://github.com/networkx/networkx/workflows/test/badge.svg?branch=main
66
+ :target: https://github.com/networkx/networkx/actions?query=workflow%3A%22test%22
67
+
68
+ .. image:: https://codecov.io/gh/networkx/networkx/branch/main/graph/badge.svg
69
+ :target: https://app.codecov.io/gh/networkx/networkx/branch/main
70
+
71
+ .. image:: https://img.shields.io/github/labels/networkx/networkx/Good%20First%20Issue?color=green&label=Contribute%20&style=flat-square
72
+ :target: https://github.com/networkx/networkx/issues?q=is%3Aopen+is%3Aissue+label%3A%22Good+First+Issue%22
73
+
74
+
75
+ NetworkX is a Python package for the creation, manipulation,
76
+ and study of the structure, dynamics, and functions
77
+ of complex networks.
78
+
79
+ - **Website (including documentation):** https://networkx.org
80
+ - **Mailing list:** https://groups.google.com/forum/#!forum/networkx-discuss
81
+ - **Source:** https://github.com/networkx/networkx
82
+ - **Bug reports:** https://github.com/networkx/networkx/issues
83
+ - **Report a security vulnerability:** https://tidelift.com/security
84
+ - **Tutorial:** https://networkx.org/documentation/latest/tutorial.html
85
+ - **GitHub Discussions:** https://github.com/networkx/networkx/discussions
86
+
87
+ Simple example
88
+ --------------
89
+
90
+ Find the shortest path between two nodes in an undirected graph:
91
+
92
+ .. code:: pycon
93
+
94
+ >>> import networkx as nx
95
+ >>> G = nx.Graph()
96
+ >>> G.add_edge("A", "B", weight=4)
97
+ >>> G.add_edge("B", "D", weight=2)
98
+ >>> G.add_edge("A", "C", weight=3)
99
+ >>> G.add_edge("C", "D", weight=4)
100
+ >>> nx.shortest_path(G, "A", "D", weight="weight")
101
+ ['A', 'B', 'D']
102
+
103
+ Install
104
+ -------
105
+
106
+ Install the latest version of NetworkX::
107
+
108
+ $ pip install networkx
109
+
110
+ Install with all optional dependencies::
111
+
112
+ $ pip install networkx[all]
113
+
114
+ For additional details, please see `INSTALL.rst`.
115
+
116
+ Bugs
117
+ ----
118
+
119
+ Please report any bugs that you find `here <https://github.com/networkx/networkx/issues>`_.
120
+ Or, even better, fork the repository on `GitHub <https://github.com/networkx/networkx>`_
121
+ and create a pull request (PR). We welcome all changes, big or small, and we
122
+ will help you make the PR if you are new to `git` (just ask on the issue and/or
123
+ see `CONTRIBUTING.rst`).
124
+
125
+ License
126
+ -------
127
+
128
+ Released under the 3-Clause BSD license (see `LICENSE.txt`)::
129
+
130
+ Copyright (C) 2004-2024 NetworkX Developers
131
+ Aric Hagberg <[email protected]>
132
+ Dan Schult <[email protected]>
133
+ Pieter Swart <[email protected]>
llmeval-env/lib/python3.10/site-packages/networkx-3.3.dist-info/RECORD ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/networkx-3.3.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.43.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
llmeval-env/lib/python3.10/site-packages/networkx-3.3.dist-info/entry_points.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ [networkx.backends]
2
+ nx-loopback = networkx.classes.tests.dispatch_interface:dispatcher
llmeval-env/lib/python3.10/site-packages/networkx-3.3.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ networkx
llmeval-env/lib/python3.10/site-packages/pandas/arrays/__init__.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ All of pandas' ExtensionArrays.
3
+
4
+ See :ref:`extending.extension-types` for more.
5
+ """
6
+ from pandas.core.arrays import (
7
+ ArrowExtensionArray,
8
+ ArrowStringArray,
9
+ BooleanArray,
10
+ Categorical,
11
+ DatetimeArray,
12
+ FloatingArray,
13
+ IntegerArray,
14
+ IntervalArray,
15
+ NumpyExtensionArray,
16
+ PeriodArray,
17
+ SparseArray,
18
+ StringArray,
19
+ TimedeltaArray,
20
+ )
21
+
22
+ __all__ = [
23
+ "ArrowExtensionArray",
24
+ "ArrowStringArray",
25
+ "BooleanArray",
26
+ "Categorical",
27
+ "DatetimeArray",
28
+ "FloatingArray",
29
+ "IntegerArray",
30
+ "IntervalArray",
31
+ "NumpyExtensionArray",
32
+ "PeriodArray",
33
+ "SparseArray",
34
+ "StringArray",
35
+ "TimedeltaArray",
36
+ ]
37
+
38
+
39
+ def __getattr__(name: str) -> type[NumpyExtensionArray]:
40
+ if name == "PandasArray":
41
+ # GH#53694
42
+ import warnings
43
+
44
+ from pandas.util._exceptions import find_stack_level
45
+
46
+ warnings.warn(
47
+ "PandasArray has been renamed NumpyExtensionArray. Use that "
48
+ "instead. This alias will be removed in a future version.",
49
+ FutureWarning,
50
+ stacklevel=find_stack_level(),
51
+ )
52
+ return NumpyExtensionArray
53
+ raise AttributeError(f"module 'pandas.arrays' has no attribute '{name}'")
llmeval-env/lib/python3.10/site-packages/pandas/arrays/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.18 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_series_apply.cpython-310.pyc ADDED
Binary file (23.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/safetensors/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (350 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/safetensors/__pycache__/flax.cpython-310.pyc ADDED
Binary file (4.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/safetensors/__pycache__/tensorflow.cpython-310.pyc ADDED
Binary file (4.38 kB). View file
 
llmeval-env/lib/python3.10/site-packages/safetensors/__pycache__/torch.cpython-310.pyc ADDED
Binary file (14.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/safetensors/mlx.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import Dict, Optional, Union
3
+
4
+ import numpy as np
5
+
6
+ import mlx.core as mx
7
+ from safetensors import numpy, safe_open
8
+
9
+
10
+ def save(tensors: Dict[str, mx.array], metadata: Optional[Dict[str, str]] = None) -> bytes:
11
+ """
12
+ Saves a dictionary of tensors into raw bytes in safetensors format.
13
+
14
+ Args:
15
+ tensors (`Dict[str, mx.array]`):
16
+ The incoming tensors. Tensors need to be contiguous and dense.
17
+ metadata (`Dict[str, str]`, *optional*, defaults to `None`):
18
+ Optional text only metadata you might want to save in your header.
19
+ For instance it can be useful to specify more about the underlying
20
+ tensors. This is purely informative and does not affect tensor loading.
21
+
22
+ Returns:
23
+ `bytes`: The raw bytes representing the format
24
+
25
+ Example:
26
+
27
+ ```python
28
+ from safetensors.mlx import save
29
+ import mlx.core as mx
30
+
31
+ tensors = {"embedding": mx.zeros((512, 1024)), "attention": mx.zeros((256, 256))}
32
+ byte_data = save(tensors)
33
+ ```
34
+ """
35
+ np_tensors = _mx2np(tensors)
36
+ return numpy.save(np_tensors, metadata=metadata)
37
+
38
+
39
+ def save_file(
40
+ tensors: Dict[str, mx.array],
41
+ filename: Union[str, os.PathLike],
42
+ metadata: Optional[Dict[str, str]] = None,
43
+ ) -> None:
44
+ """
45
+ Saves a dictionary of tensors into raw bytes in safetensors format.
46
+
47
+ Args:
48
+ tensors (`Dict[str, mx.array]`):
49
+ The incoming tensors. Tensors need to be contiguous and dense.
50
+ filename (`str`, or `os.PathLike`)):
51
+ The filename we're saving into.
52
+ metadata (`Dict[str, str]`, *optional*, defaults to `None`):
53
+ Optional text only metadata you might want to save in your header.
54
+ For instance it can be useful to specify more about the underlying
55
+ tensors. This is purely informative and does not affect tensor loading.
56
+
57
+ Returns:
58
+ `None`
59
+
60
+ Example:
61
+
62
+ ```python
63
+ from safetensors.mlx import save_file
64
+ import mlx.core as mx
65
+
66
+ tensors = {"embedding": mx.zeros((512, 1024)), "attention": mx.zeros((256, 256))}
67
+ save_file(tensors, "model.safetensors")
68
+ ```
69
+ """
70
+ np_tensors = _mx2np(tensors)
71
+ return numpy.save_file(np_tensors, filename, metadata=metadata)
72
+
73
+
74
+ def load(data: bytes) -> Dict[str, mx.array]:
75
+ """
76
+ Loads a safetensors file into MLX format from pure bytes.
77
+
78
+ Args:
79
+ data (`bytes`):
80
+ The content of a safetensors file
81
+
82
+ Returns:
83
+ `Dict[str, mx.array]`: dictionary that contains name as key, value as `mx.array`
84
+
85
+ Example:
86
+
87
+ ```python
88
+ from safetensors.mlx import load
89
+
90
+ file_path = "./my_folder/bert.safetensors"
91
+ with open(file_path, "rb") as f:
92
+ data = f.read()
93
+
94
+ loaded = load(data)
95
+ ```
96
+ """
97
+ flat = numpy.load(data)
98
+ return _np2mx(flat)
99
+
100
+
101
+ def load_file(filename: Union[str, os.PathLike]) -> Dict[str, mx.array]:
102
+ """
103
+ Loads a safetensors file into MLX format.
104
+
105
+ Args:
106
+ filename (`str`, or `os.PathLike`)):
107
+ The name of the file which contains the tensors
108
+
109
+ Returns:
110
+ `Dict[str, mx.array]`: dictionary that contains name as key, value as `mx.array`
111
+
112
+ Example:
113
+
114
+ ```python
115
+ from safetensors.flax import load_file
116
+
117
+ file_path = "./my_folder/bert.safetensors"
118
+ loaded = load_file(file_path)
119
+ ```
120
+ """
121
+ result = {}
122
+ with safe_open(filename, framework="mlx") as f:
123
+ for k in f.keys():
124
+ result[k] = f.get_tensor(k)
125
+ return result
126
+
127
+
128
+ def _np2mx(numpy_dict: Dict[str, np.ndarray]) -> Dict[str, mx.array]:
129
+ for k, v in numpy_dict.items():
130
+ numpy_dict[k] = mx.array(v)
131
+ return numpy_dict
132
+
133
+
134
+ def _mx2np(mx_dict: Dict[str, mx.array]) -> Dict[str, np.array]:
135
+ new_dict = {}
136
+ for k, v in mx_dict.items():
137
+ new_dict[k] = np.asarray(v)
138
+ return new_dict
llmeval-env/lib/python3.10/site-packages/tcolorpy-0.1.6.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
llmeval-env/lib/python3.10/site-packages/tcolorpy-0.1.6.dist-info/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2020 Tsuyoshi Hombashi
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
llmeval-env/lib/python3.10/site-packages/tcolorpy-0.1.6.dist-info/METADATA ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: tcolorpy
3
+ Version: 0.1.6
4
+ Summary: tcolopy is a Python library to apply true color for terminal text.
5
+ Home-page: https://github.com/thombashi/tcolorpy
6
+ Author: Tsuyoshi Hombashi
7
+ Author-email: [email protected]
8
+ License: MIT License
9
+ Project-URL: Changlog, https://github.com/thombashi/tcolorpy/blob/master/CHANGELOG.md
10
+ Project-URL: Source, https://github.com/thombashi/tcolorpy
11
+ Project-URL: Tracker, https://github.com/thombashi/tcolorpy/issues
12
+ Keywords: ANSI escape,terminal color,truecolor
13
+ Classifier: Development Status :: 4 - Beta
14
+ Classifier: Intended Audience :: Information Technology
15
+ Classifier: License :: OSI Approved :: MIT License
16
+ Classifier: Operating System :: OS Independent
17
+ Classifier: Programming Language :: Python :: 3
18
+ Classifier: Programming Language :: Python :: 3.7
19
+ Classifier: Programming Language :: Python :: 3.8
20
+ Classifier: Programming Language :: Python :: 3.9
21
+ Classifier: Programming Language :: Python :: 3.10
22
+ Classifier: Programming Language :: Python :: 3.11
23
+ Classifier: Programming Language :: Python :: 3.12
24
+ Classifier: Programming Language :: Python :: 3 :: Only
25
+ Classifier: Programming Language :: Python :: Implementation :: CPython
26
+ Classifier: Programming Language :: Python :: Implementation :: PyPy
27
+ Classifier: Topic :: Software Development :: Libraries
28
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
29
+ Classifier: Topic :: Terminals
30
+ Classifier: Topic :: Text Processing
31
+ Classifier: Typing :: Typed
32
+ Requires-Python: >=3.7
33
+ Description-Content-Type: text/x-rst
34
+ License-File: LICENSE
35
+ Provides-Extra: test
36
+ Requires-Dist: pytest >=6.0.1 ; extra == 'test'
37
+ Requires-Dist: pytest-md-report >=0.5 ; extra == 'test'
38
+
39
+ .. contents:: **tcolorpy**
40
+ :backlinks: top
41
+ :depth: 2
42
+
43
+
44
+ Summary
45
+ ============================================
46
+ tcolopy is a Python library to apply true color for terminal text.
47
+
48
+ |PyPI pkg ver| |conda pkg ver| |Supported Python implementations| |Supported Python versions| |CI status| |CodeQL| |coverage|
49
+
50
+ .. |PyPI pkg ver| image:: https://badge.fury.io/py/tcolorpy.svg
51
+ :target: https://badge.fury.io/py/tcolorpy
52
+ :alt: PyPI package version
53
+
54
+ .. |conda pkg ver| image:: https://anaconda.org/conda-forge/tcolorpy/badges/version.svg
55
+ :target: https://anaconda.org/conda-forge/tcolorpy
56
+ :alt: conda-forge package version
57
+
58
+ .. |Supported Python implementations| image:: https://img.shields.io/pypi/implementation/tcolorpy.svg
59
+ :target: https://pypi.org/project/tcolorpy
60
+ :alt: Supported Python implementations
61
+
62
+ .. |Supported Python versions| image:: https://img.shields.io/pypi/pyversions/tcolorpy.svg
63
+ :target: https://pypi.org/project/tcolorpy
64
+ :alt: Supported Python versions
65
+
66
+ .. |CI status| image:: https://github.com/thombashi/tcolorpy/actions/workflows/ci.yml/badge.svg
67
+ :target: https://github.com/thombashi/tcolorpy/actions/workflows/ci.yml
68
+ :alt: CI status of Linux/macOS/Windows
69
+
70
+ .. |CodeQL| image:: https://github.com/thombashi/tcolorpy/actions/workflows/github-code-scanning/codeql/badge.svg
71
+ :target: https://github.com/thombashi/tcolorpy/actions/workflows/github-code-scanning/codeql
72
+ :alt: CodeQL
73
+
74
+ .. |coverage| image:: https://coveralls.io/repos/github/thombashi/tcolorpy/badge.svg?branch=master
75
+ :target: https://coveralls.io/github/thombashi/tcolorpy?branch=master
76
+ :alt: Test coverage: coveralls
77
+
78
+
79
+ Installation
80
+ ============================================
81
+
82
+ Installation: pip
83
+ ------------------------------
84
+ ::
85
+
86
+ pip install tcolorpy
87
+
88
+ Installation: conda
89
+ ------------------------------
90
+ ::
91
+
92
+ conda install -c conda-forge tcolorpy
93
+
94
+
95
+ Usage
96
+ ============================================
97
+
98
+ Library usage
99
+ --------------------------------------------
100
+
101
+ :Sample Code:
102
+ .. code-block:: python
103
+
104
+ from tcolorpy import tcolor
105
+
106
+ print(tcolor("tcolopy example", color="#ee1177", styles=["bold", "italic", "underline"]))
107
+
108
+ :Output:
109
+ .. figure:: https://cdn.jsdelivr.net/gh/thombashi/tcolorpy@master/ss/oneline.png
110
+ :scale: 60%
111
+ :alt: https://github.com/thombashi/tcolorpy/blob/master/ss/oneline.png
112
+
113
+ You can set the following ``tcolor`` arguments:
114
+
115
+ - ``color``/``bg_color``
116
+ - color names (``"red"``, ``"green"``, etc.) or color code (``"#RRGGBB"``)
117
+ - ``styles``
118
+ - ``"bold"``, ``"italic"``, etc.
119
+
120
+
121
+ Other examples
122
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
123
+ Apply true color and styles to text:
124
+
125
+ .. figure:: https://cdn.jsdelivr.net/gh/thombashi/tcolorpy@master/ss/styles.png
126
+ :scale: 60%
127
+ :alt: https://github.com/thombashi/tcolorpy/blob/master/ss/styles.png
128
+
129
+ `example source code <https://github.com/thombashi/tcolorpy/blob/master/examples/ansi_styles.py>`__
130
+
131
+ You can also specify colors by name:
132
+
133
+ .. figure:: https://cdn.jsdelivr.net/gh/thombashi/tcolorpy@master/ss/ansi_colors.png
134
+ :scale: 60%
135
+ :alt: https://github.com/thombashi/tcolorpy/blob/master/ss/ansi_colors.png
136
+
137
+ `example source code <https://github.com/thombashi/tcolorpy/blob/master/examples/ansi_colors.py>`__
138
+
139
+
140
+ CLI usage
141
+ --------------------------------------------
142
+ ``tcolorpy`` can be used via CLI:
143
+
144
+ ::
145
+
146
+ $ python3 -m tcolorpy "tcolopy example" -c "#ee1177" -s bold,italic,underline
147
+
148
+ Command help
149
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
150
+ ::
151
+
152
+ usage: __main__.py [-h] [-c COLOR] [-b BG_COLOR] [-s STYLES] [--encode ENCODE] string
153
+
154
+ positional arguments:
155
+ string string to apply styles.
156
+
157
+ options:
158
+ -h, --help show this help message and exit
159
+ -c COLOR, --color COLOR
160
+ specify a color code (#XXXXXX) or a name. valid names are: black, red, green, yellow, blue, magenta, cyan, white, lightblack, lightred, lightgreen, lightyellow, lightblue, lightmagenta, lightcyan, lightwhite
161
+ -b BG_COLOR, --bg-color BG_COLOR
162
+ specify a background color code (#XXXXXX) or a name. valid names are: black, red, green, yellow, blue, magenta, cyan, white, lightblack, lightred, lightgreen, lightyellow, lightblue, lightmagenta, lightcyan, lightwhite
163
+ -s STYLES, --styles STYLES
164
+ specify a comma-separated style. valid values are: bold, dim, italic, underline, blink, invert, strike
165
+ --encode ENCODE output a text encoded with the specified encoding
166
+
167
+
168
+ Dependencies
169
+ ============================================
170
+ Python 3.7+
171
+ no external dependencies.
llmeval-env/lib/python3.10/site-packages/tcolorpy-0.1.6.dist-info/RECORD ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ tcolorpy-0.1.6.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ tcolorpy-0.1.6.dist-info/LICENSE,sha256=9BoEVtXyu6Jf1NflC1GpXeMEdw_x21p5UV0DOXqRTY0,1074
3
+ tcolorpy-0.1.6.dist-info/METADATA,sha256=IDGYAt_oFtLBO4jHLKx8SETH0FP33K-RaszTkTLhMes,6358
4
+ tcolorpy-0.1.6.dist-info/RECORD,,
5
+ tcolorpy-0.1.6.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
6
+ tcolorpy-0.1.6.dist-info/top_level.txt,sha256=g8LDaQz0FVP61jibPz7OTwQqiseVV9pxUYDeGp2lFAI,9
7
+ tcolorpy/__init__.py,sha256=6fI5Y7N04ZgSFfienFNtd7hjJtAmBO8j4zxcDpk4OYk,913
8
+ tcolorpy/__main__.py,sha256=gjNpi78hE-X6CpY20ZLMmQ_yaWYIh_eOu2XrLnoGkBE,1701
9
+ tcolorpy/__pycache__/__init__.cpython-310.pyc,,
10
+ tcolorpy/__pycache__/__main__.cpython-310.pyc,,
11
+ tcolorpy/__pycache__/__version__.cpython-310.pyc,,
12
+ tcolorpy/__pycache__/_const.cpython-310.pyc,,
13
+ tcolorpy/__pycache__/_truecolor.cpython-310.pyc,,
14
+ tcolorpy/__version__.py,sha256=FfUl1ix-FI5DHv8TmnpAYpPWggJASYcLGQ0s-sVO6Ko,201
15
+ tcolorpy/_const.py,sha256=XS2rzsxY7SKxg0HreYTR_kEGeSi_59gOrrntI2_kG1o,1080
16
+ tcolorpy/_truecolor.py,sha256=nzu2GCc6Tu_4no5_Qcksm88-Vm75sCdeOMDQHG_2DhM,7495
17
+ tcolorpy/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
llmeval-env/lib/python3.10/site-packages/tcolorpy-0.1.6.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.43.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
llmeval-env/lib/python3.10/site-packages/tcolorpy-0.1.6.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ tcolorpy
llmeval-env/lib/python3.10/site-packages/torch/_C.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (37.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/_VF.pyi ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/torch/__config__.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+
4
+ def show():
5
+ """
6
+ Return a human-readable string with descriptions of the
7
+ configuration of PyTorch.
8
+ """
9
+ return torch._C._show_config()
10
+
11
+
12
+ # TODO: In principle, we could provide more structured version/config
13
+ # information here. For now only CXX_FLAGS is exposed, as Timer
14
+ # uses them.
15
+ def _cxx_flags():
16
+ """Returns the CXX_FLAGS used when building PyTorch."""
17
+ return torch._C._cxx_flags()
18
+
19
+
20
+ def parallel_info():
21
+ r"""Returns detailed string with parallelization settings"""
22
+ return torch._C._parallel_info()
llmeval-env/lib/python3.10/site-packages/torch/__future__.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _overwrite_module_params_on_conversion: bool = False
2
+ _swap_module_params_on_conversion: bool = False
3
+
4
+
5
+ def set_overwrite_module_params_on_conversion(value: bool) -> None:
6
+ """
7
+ Sets whether to assign new tensors to the parameters instead of changing the
8
+ existing parameters in-place when converting an ``nn.Module``.
9
+
10
+ When enabled, the following methods will assign new parameters to the module:
11
+
12
+ #. ``module.{device}()`` (e.g. :meth:`nn.Module.cuda()`) for moving a module between devices
13
+ #. ``module.{dtype}()`` (e.g. :meth:`nn.Module.float()`) for converting a module to a different dtype
14
+ #. :meth:`nn.Module.to`
15
+ #. :meth:`nn.Module.to_empty`
16
+
17
+ Args:
18
+ value (bool): Whether to assign new tensors or not.
19
+
20
+ """
21
+ global _overwrite_module_params_on_conversion
22
+ _overwrite_module_params_on_conversion = value
23
+
24
+
25
+ def get_overwrite_module_params_on_conversion() -> bool:
26
+ """
27
+ Returns whether to assign new tensors to the parameters instead of changing the
28
+ existing parameters in-place when converting an :class:`torch.nn.Module`. Defaults to ``False``.
29
+
30
+ See :func:`~torch.__future__.set_overwrite_module_params_on_conversion` for more information.
31
+ """
32
+ return _overwrite_module_params_on_conversion
33
+
34
+
35
+ def set_swap_module_params_on_conversion(value: bool) -> None:
36
+ """
37
+ Sets whether to use :func:`~torch.utils.swap_tensors` instead of setting ``.data`` to
38
+ change the existing parameters in-place when converting an ``nn.Module`` and instead
39
+ of ``param.copy_(state_dict[key])`` when loading a state dict into an ``nn.Module``.
40
+
41
+ .. note::
42
+ This function takes precedence over :func:`~torch.__future__.get_overwrite_module_params_on_conversion`
43
+
44
+ When enabled, the following methods will swap the existing parameters in-place:
45
+
46
+ #. ``module.{device}()`` (e.g. :meth:`nn.Module.cuda()`) for moving a module between devices
47
+ #. ``module.{dtype}()`` (e.g. :meth:`nn.Module.float()`) for converting a module to a different dtype
48
+ #. :meth:`nn.Module.to`
49
+ #. :meth:`nn.Module.to_empty`
50
+ #. :meth:`nn.Module.load_state_dict`
51
+
52
+ The semantics for :meth:`~nn.Module.load_state_dict` when this is set are as follows:
53
+
54
+ #. For each parameter/buffer, its corresponding ``state_dict['key']`` is transformed via
55
+ :meth:`~torch.Tensor.module_load` (i.e. ``res = param.module_load(state_dict['key'])``)
56
+ #. If necessary, ``res`` will be wrapped in an :class:`~nn.Parameter`
57
+ #. The parameter/buffer in the module will be swapped via :func:`~torch.utils.swap_tensors`
58
+ with ``res``
59
+
60
+ Args:
61
+ value (bool): Whether to use :func:`~torch.utils.swap_tensors` or not.
62
+
63
+ """
64
+ global _swap_module_params_on_conversion
65
+ _swap_module_params_on_conversion = value
66
+
67
+
68
+ def get_swap_module_params_on_conversion() -> bool:
69
+ """
70
+ Returns whether to use :func:`~torch.utils.swap_tensors` instead of setting .data to
71
+ change the existing parameters in-place when converting an ``nn.Module``. Defaults to ``False``.
72
+
73
+ See :func:`~torch.__future__.set_swap_module_params_on_conversion` for more information.
74
+ """
75
+ return _swap_module_params_on_conversion
llmeval-env/lib/python3.10/site-packages/torch/__init__.py ADDED
@@ -0,0 +1,2038 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ r"""
3
+ The torch package contains data structures for multi-dimensional
4
+ tensors and defines mathematical operations over these tensors.
5
+ Additionally, it provides many utilities for efficient serialization of
6
+ Tensors and arbitrary types, and other useful utilities.
7
+
8
+ It has a CUDA counterpart, that enables you to run your tensor computations
9
+ on an NVIDIA GPU with compute capability >= 3.0.
10
+ """
11
+
12
+ import math
13
+ import os
14
+ import sys
15
+ import platform
16
+ import textwrap
17
+ import ctypes
18
+ import inspect
19
+ import threading
20
+
21
+ # multipy/deploy is setting this import before importing torch, this is the most
22
+ # reliable way we have to detect if we're running within deploy.
23
+ # https://github.com/pytorch/multipy/blob/d60f34ad38c371e441fe7ffdb77a3c3dda5a5d19/multipy/runtime/interpreter/interpreter_impl.cpp#L134-L137
24
+ def _running_with_deploy():
25
+ return sys.modules.get("torch._meta_registrations", None) is object
26
+
27
+ from ._utils import _import_dotted_name, classproperty
28
+ from ._utils import _functionalize_sync as _sync
29
+ from ._utils_internal import get_file_path, prepare_multiprocessing_environment, \
30
+ USE_RTLD_GLOBAL_WITH_LIBTORCH, USE_GLOBAL_DEPS
31
+
32
+ # TODO(torch_deploy) figure out how to freeze version.py in fbcode build
33
+ if _running_with_deploy():
34
+ __version__ = "torch-deploy-1.8"
35
+ else:
36
+ from .torch_version import __version__ as __version__
37
+
38
+ from typing import Any, Callable, Dict, Optional, Set, Tuple, Type, TYPE_CHECKING, Union, List
39
+ import builtins
40
+
41
+ __all__ = [
42
+ 'typename', 'is_tensor', 'is_storage',
43
+ 'set_default_tensor_type', 'set_default_device', 'get_default_device',
44
+ 'set_rng_state', 'get_rng_state', 'manual_seed', 'initial_seed', 'seed',
45
+ 'save', 'load', 'set_printoptions', 'chunk', 'split', 'stack', 'matmul',
46
+ 'no_grad', 'enable_grad', 'rand', 'randn', 'inference_mode',
47
+ 'DoubleStorage', 'FloatStorage', 'LongStorage', 'IntStorage',
48
+ 'ShortStorage', 'CharStorage', 'ByteStorage', 'BoolStorage',
49
+ 'TypedStorage', 'UntypedStorage',
50
+ 'DoubleTensor', 'FloatTensor', 'LongTensor', 'IntTensor',
51
+ 'ShortTensor', 'CharTensor', 'ByteTensor', 'BoolTensor', 'Tensor',
52
+ 'lobpcg', 'use_deterministic_algorithms',
53
+ 'are_deterministic_algorithms_enabled',
54
+ 'is_deterministic_algorithms_warn_only_enabled',
55
+ 'set_deterministic_debug_mode', 'get_deterministic_debug_mode',
56
+ 'set_float32_matmul_precision', 'get_float32_matmul_precision',
57
+ 'set_warn_always', 'is_warn_always_enabled', 'SymInt', 'SymFloat',
58
+ 'SymBool', 'sym_not', 'unravel_index',
59
+ 'sym_int', 'sym_float', 'sym_max', 'sym_min', 'sym_ite', 'compile', 'vmap',
60
+ 'export', 'autocast', 'cond', 'GradScaler',
61
+ ]
62
+
63
+ ################################################################################
64
+ # Load the extension module
65
+ ################################################################################
66
+
67
+ if sys.platform == 'win32':
68
+ pfiles_path = os.getenv('ProgramFiles', 'C:\\Program Files')
69
+ py_dll_path = os.path.join(sys.exec_prefix, 'Library', 'bin')
70
+ th_dll_path = os.path.join(os.path.dirname(__file__), 'lib')
71
+
72
+ # When users create a virtualenv that inherits the base environment,
73
+ # we will need to add the corresponding library directory into
74
+ # DLL search directories. Otherwise, it will rely on `PATH` which
75
+ # is dependent on user settings.
76
+ if sys.exec_prefix != sys.base_exec_prefix:
77
+ base_py_dll_path = os.path.join(sys.base_exec_prefix, 'Library', 'bin')
78
+ else:
79
+ base_py_dll_path = ''
80
+
81
+ dll_paths = list(filter(os.path.exists, [th_dll_path, py_dll_path, base_py_dll_path]))
82
+
83
+ if all(not os.path.exists(os.path.join(p, 'nvToolsExt64_1.dll')) for p in dll_paths):
84
+ nvtoolsext_dll_path = os.path.join(
85
+ os.getenv('NVTOOLSEXT_PATH', os.path.join(pfiles_path, 'NVIDIA Corporation', 'NvToolsExt')), 'bin', 'x64')
86
+ else:
87
+ nvtoolsext_dll_path = ''
88
+
89
+ from .version import cuda as cuda_version
90
+ import glob
91
+ if cuda_version and all(not glob.glob(os.path.join(p, 'cudart64*.dll')) for p in dll_paths):
92
+ cuda_version_1 = cuda_version.replace('.', '_')
93
+ cuda_path_var = 'CUDA_PATH_V' + cuda_version_1
94
+ default_path = os.path.join(pfiles_path, 'NVIDIA GPU Computing Toolkit', 'CUDA', 'v' + cuda_version)
95
+ cuda_path = os.path.join(os.getenv(cuda_path_var, default_path), 'bin')
96
+ else:
97
+ cuda_path = ''
98
+
99
+ dll_paths.extend(filter(os.path.exists, [nvtoolsext_dll_path, cuda_path]))
100
+
101
+ kernel32 = ctypes.WinDLL('kernel32.dll', use_last_error=True)
102
+ with_load_library_flags = hasattr(kernel32, 'AddDllDirectory')
103
+ prev_error_mode = kernel32.SetErrorMode(0x0001)
104
+
105
+ kernel32.LoadLibraryW.restype = ctypes.c_void_p
106
+ if with_load_library_flags:
107
+ kernel32.LoadLibraryExW.restype = ctypes.c_void_p
108
+
109
+ for dll_path in dll_paths:
110
+ os.add_dll_directory(dll_path)
111
+
112
+ try:
113
+ ctypes.CDLL('vcruntime140.dll')
114
+ ctypes.CDLL('msvcp140.dll')
115
+ ctypes.CDLL('vcruntime140_1.dll')
116
+ except OSError:
117
+ print('''Microsoft Visual C++ Redistributable is not installed, this may lead to the DLL load failure.
118
+ It can be downloaded at https://aka.ms/vs/16/release/vc_redist.x64.exe''')
119
+
120
+ dlls = glob.glob(os.path.join(th_dll_path, '*.dll'))
121
+ path_patched = False
122
+ for dll in dlls:
123
+ is_loaded = False
124
+ if with_load_library_flags:
125
+ res = kernel32.LoadLibraryExW(dll, None, 0x00001100)
126
+ last_error = ctypes.get_last_error()
127
+ if res is None and last_error != 126:
128
+ err = ctypes.WinError(last_error)
129
+ err.strerror += f' Error loading "{dll}" or one of its dependencies.'
130
+ raise err
131
+ elif res is not None:
132
+ is_loaded = True
133
+ if not is_loaded:
134
+ if not path_patched:
135
+ os.environ['PATH'] = ';'.join(dll_paths + [os.environ['PATH']])
136
+ path_patched = True
137
+ res = kernel32.LoadLibraryW(dll)
138
+ if res is None:
139
+ err = ctypes.WinError(ctypes.get_last_error())
140
+ err.strerror += f' Error loading "{dll}" or one of its dependencies.'
141
+ raise err
142
+
143
+ kernel32.SetErrorMode(prev_error_mode)
144
+
145
+
146
+ def _preload_cuda_deps(lib_folder, lib_name):
147
+ """Preloads cuda deps if they could not be found otherwise."""
148
+ # Should only be called on Linux if default path resolution have failed
149
+ assert platform.system() == 'Linux', 'Should only be called on Linux'
150
+ import glob
151
+ lib_path = None
152
+ for path in sys.path:
153
+ nvidia_path = os.path.join(path, 'nvidia')
154
+ if not os.path.exists(nvidia_path):
155
+ continue
156
+ candidate_lib_paths = glob.glob(os.path.join(nvidia_path, lib_folder, 'lib', lib_name))
157
+ if candidate_lib_paths and not lib_path:
158
+ lib_path = candidate_lib_paths[0]
159
+ if lib_path:
160
+ break
161
+ if not lib_path:
162
+ raise ValueError(f"{lib_name} not found in the system path {sys.path}")
163
+ ctypes.CDLL(lib_path)
164
+
165
+
166
+ # See Note [Global dependencies]
167
+ def _load_global_deps() -> None:
168
+ if _running_with_deploy() or platform.system() == 'Windows':
169
+ return
170
+
171
+ lib_name = 'libtorch_global_deps' + ('.dylib' if platform.system() == 'Darwin' else '.so')
172
+ here = os.path.abspath(__file__)
173
+ lib_path = os.path.join(os.path.dirname(here), 'lib', lib_name)
174
+
175
+ try:
176
+ ctypes.CDLL(lib_path, mode=ctypes.RTLD_GLOBAL)
177
+ except OSError as err:
178
+ # Can only happen for wheel with cuda libs as PYPI deps
179
+ # As PyTorch is not purelib, but nvidia-*-cu12 is
180
+ cuda_libs: Dict[str, str] = {
181
+ 'cublas': 'libcublas.so.*[0-9]',
182
+ 'cudnn': 'libcudnn.so.*[0-9]',
183
+ 'cuda_nvrtc': 'libnvrtc.so.*[0-9]',
184
+ 'cuda_runtime': 'libcudart.so.*[0-9]',
185
+ 'cuda_cupti': 'libcupti.so.*[0-9]',
186
+ 'cufft': 'libcufft.so.*[0-9]',
187
+ 'curand': 'libcurand.so.*[0-9]',
188
+ 'cusolver': 'libcusolver.so.*[0-9]',
189
+ 'cusparse': 'libcusparse.so.*[0-9]',
190
+ 'nccl': 'libnccl.so.*[0-9]',
191
+ 'nvtx': 'libnvToolsExt.so.*[0-9]',
192
+ }
193
+ is_cuda_lib_err = [lib for lib in cuda_libs.values() if lib.split('.')[0] in err.args[0]]
194
+ if not is_cuda_lib_err:
195
+ raise err
196
+ for lib_folder, lib_name in cuda_libs.items():
197
+ _preload_cuda_deps(lib_folder, lib_name)
198
+ ctypes.CDLL(lib_path, mode=ctypes.RTLD_GLOBAL)
199
+
200
+
201
+ if (USE_RTLD_GLOBAL_WITH_LIBTORCH or os.getenv('TORCH_USE_RTLD_GLOBAL')) and \
202
+ (_running_with_deploy() or platform.system() != 'Windows'):
203
+ # Do it the hard way. You might want to load libtorch with RTLD_GLOBAL in a
204
+ # few circumstances:
205
+ #
206
+ # 1. You're in a build environment (e.g., fbcode) where
207
+ # libtorch_global_deps is not available, but you still need
208
+ # to get mkl to link in with RTLD_GLOBAL or it will just
209
+ # not work.
210
+ #
211
+ # 2. You're trying to run PyTorch under UBSAN and you need
212
+ # to ensure that only one copy of libtorch is loaded, so
213
+ # vptr checks work properly
214
+ #
215
+ # If you're using this setting, you must verify that all the libraries
216
+ # you load consistently use the same libstdc++, or you may have
217
+ # mysterious segfaults.
218
+ #
219
+ old_flags = sys.getdlopenflags()
220
+ sys.setdlopenflags(os.RTLD_GLOBAL | os.RTLD_LAZY)
221
+ from torch._C import * # noqa: F403
222
+ sys.setdlopenflags(old_flags)
223
+ del old_flags
224
+
225
+ else:
226
+ # Easy way. You want this most of the time, because it will prevent
227
+ # C++ symbols from libtorch clobbering C++ symbols from other
228
+ # libraries, leading to mysterious segfaults.
229
+ #
230
+ # If building in an environment where libtorch_global_deps isn't available
231
+ # like parts of fbsource, but where RTLD_GLOBAL causes segfaults, you will
232
+ # want USE_RTLD_GLOBAL_WITH_LIBTORCH = False and USE_GLOBAL_DEPS = False
233
+ #
234
+ # See Note [Global dependencies]
235
+ if USE_GLOBAL_DEPS:
236
+ _load_global_deps()
237
+ from torch._C import * # noqa: F403
238
+
239
+ # Appease the type checker; ordinarily this binding is inserted by the
240
+ # torch._C module initialization code in C
241
+ if TYPE_CHECKING:
242
+ from . import _C as _C
243
+
244
+ class SymInt:
245
+ """
246
+ Like an int (including magic methods), but redirects all operations on the
247
+ wrapped node. This is used in particular to symbolically record operations
248
+ in the symbolic shape workflow.
249
+ """
250
+
251
+ def __init__(self, node):
252
+ # This field MUST be named node; C++ binding code assumes that this
253
+ # class has a field named node that stores SymNode
254
+ self.node = node
255
+
256
+ def __bool__(self):
257
+ return builtins.bool(self != 0)
258
+
259
+ def __int__(self):
260
+ return self.node.int_()
261
+
262
+ def __index__(self):
263
+ return self.node.int_()
264
+
265
+ # Magic methods installed by torch.fx.experimental.sym_node
266
+
267
+ def __eq__(self, other: object) -> builtins.bool:
268
+ raise AssertionError("type stub not overridden")
269
+
270
+ def __lt__(self, other) -> builtins.bool:
271
+ raise AssertionError("type stub not overridden")
272
+
273
+ def __gt__(self, other) -> builtins.bool:
274
+ raise AssertionError("type stub not overridden")
275
+
276
+ def __le__(self, other) -> builtins.bool:
277
+ raise AssertionError("type stub not overridden")
278
+
279
+ def __ge__(self, other) -> builtins.bool:
280
+ raise AssertionError("type stub not overridden")
281
+
282
+ def __add__(self, other) -> "SymInt":
283
+ raise AssertionError("type stub not overridden")
284
+
285
+ def __mul__(self, other) -> "SymInt":
286
+ raise AssertionError("type stub not overridden")
287
+
288
+ def __sym_max__(self, other):
289
+ raise AssertionError("type stub not overridden")
290
+
291
+ def __sym_min__(self, other):
292
+ raise AssertionError("type stub not overridden")
293
+
294
+ def __sym_float__(self):
295
+ raise AssertionError("type stub not overridden")
296
+
297
+ def __neg__(self):
298
+ raise AssertionError("type stub not overridden")
299
+
300
+ def __repr__(self):
301
+ return str(self.node)
302
+
303
+ def __hash__(self) -> builtins.int:
304
+ if self.node.is_nested_int():
305
+ return hash(self.node.nested_int())
306
+ else:
307
+ # We could support constant SymInts as well, but not doing it for now
308
+ raise TypeError("unhashable type: non-nested SymInt")
309
+
310
+ class SymFloat:
311
+ """
312
+ Like an float (including magic methods), but redirects all operations on the
313
+ wrapped node. This is used in particular to symbolically record operations
314
+ in the symbolic shape workflow.
315
+ """
316
+
317
+ def __init__(self, node):
318
+ # This field MUST be named node; C++ binding code assumes that this
319
+ # class has a field named node that stores SymNode
320
+ self.node = node
321
+
322
+ def __bool__(self):
323
+ return self.node.bool_()
324
+
325
+ # Magic methods installed by torch.fx.experimental.sym_node
326
+
327
+ def __eq__(self, other: object) -> builtins.bool:
328
+ raise AssertionError("type stub not overridden")
329
+
330
+ def __lt__(self, other) -> builtins.bool:
331
+ raise AssertionError("type stub not overridden")
332
+
333
+ def __gt__(self, other) -> builtins.bool:
334
+ raise AssertionError("type stub not overridden")
335
+
336
+ def __le__(self, other) -> builtins.bool:
337
+ raise AssertionError("type stub not overridden")
338
+
339
+ def __ge__(self, other) -> builtins.bool:
340
+ raise AssertionError("type stub not overridden")
341
+
342
+ def __sym_max__(self, other):
343
+ raise AssertionError("type stub not overridden")
344
+
345
+ def __sym_min__(self, other):
346
+ raise AssertionError("type stub not overridden")
347
+
348
+ def __sym_int__(self):
349
+ raise AssertionError("type stub not overridden")
350
+
351
+ def is_integer(self):
352
+ """Return True if the float is an integer."""
353
+ raise AssertionError("type stub not overridden")
354
+
355
+ def __repr__(self):
356
+ return self.node.str()
357
+
358
+ class SymBool:
359
+ """
360
+ Like an bool (including magic methods), but redirects all operations on the
361
+ wrapped node. This is used in particular to symbolically record operations
362
+ in the symbolic shape workflow.
363
+
364
+ Unlike regular bools, regular boolean operators will force extra guards instead
365
+ of symbolically evaluate. Use the bitwise operators instead to handle this.
366
+ """
367
+
368
+ def __init__(self, node):
369
+ # This field MUST be named node; C++ binding code assumes that this
370
+ # class has a field named node that stores SymNode
371
+ self.node = node
372
+
373
+ def __bool__(self):
374
+ return self.node.bool_()
375
+
376
+ def __int__(self):
377
+ return builtins.int(self.node.bool_())
378
+
379
+ # Magic methods installed by torch.fx.experimental.sym_node
380
+ def __and__(self, other) -> "SymBool":
381
+ raise AssertionError("type stub not overridden")
382
+
383
+ def __or__(self, other) -> "SymBool":
384
+ raise AssertionError("type stub not overridden")
385
+
386
+ # We very carefully define __sym_not__, and not a number of other
387
+ # plausible alternatives:
388
+ #
389
+ # - We do not override __not__ because this is not a real magic
390
+ # method; you cannot override the meaning of the not builtin in
391
+ # Python. We use the name 'sym_not' to clarify that in user code you
392
+ # cannot use the builtin not or operator.not_ or operator.__not__ and
393
+ # hit this magic method; you must use our custom sym_not operator.
394
+ #
395
+ # - We do not override the __invert__ method because SymBool is
396
+ # meant to be usable in situations where bool is expected. However,
397
+ # bitwise negation ~a does the wrong thing with booleans (because
398
+ # bool is a subclass of int, so ~1 = -2 which is not falseish.)
399
+ # This would be a giant footgun, so we get around it by defining
400
+ # our own operator. Note that bitwise and/or do the right thing,
401
+ # so we reuse the conventional operators there for readability.
402
+ #
403
+ def __sym_not__(self) -> "SymBool":
404
+ raise AssertionError("type stub not overridden")
405
+
406
+ def __sym_ite__(self, then_val, else_val):
407
+ raise AssertionError("type stub not overridden")
408
+
409
+ def __eq__(self, other) -> builtins.bool:
410
+ raise AssertionError("type stub not overridden")
411
+
412
+ def __repr__(self):
413
+ return str(self.node)
414
+
415
+ def __hash__(self):
416
+ if self.node.is_constant():
417
+ return hash(self.node.bool_())
418
+ else:
419
+ raise TypeError("unhashable type: SymBool")
420
+
421
+ def sym_not(a):
422
+ r""" SymInt-aware utility for logical negation.
423
+
424
+ Args:
425
+ a (SymBool or bool): Object to negate
426
+ """
427
+ import sympy
428
+ from .overrides import has_torch_function_unary, handle_torch_function
429
+
430
+ if has_torch_function_unary(a):
431
+ return handle_torch_function(sym_not, (a,), a)
432
+ if hasattr(a, '__sym_not__'):
433
+ return a.__sym_not__()
434
+ if isinstance(a, sympy.Basic):
435
+ return ~a # type: ignore[operator]
436
+ return not a
437
+
438
+ def sym_float(a):
439
+ r""" SymInt-aware utility for float casting.
440
+
441
+ Args:
442
+ a (SymInt, SymFloat, or object): Object to cast
443
+ """
444
+ from .overrides import has_torch_function_unary, handle_torch_function
445
+
446
+ if has_torch_function_unary(a):
447
+ return handle_torch_function(sym_float, (a,), a)
448
+ if isinstance(a, SymFloat):
449
+ return a
450
+ elif hasattr(a, '__sym_float__'):
451
+ return a.__sym_float__()
452
+ return py_float(a) # type: ignore[operator]
453
+
454
+
455
+ def sym_int(a):
456
+ r""" SymInt-aware utility for int casting.
457
+
458
+ Args:
459
+ a (SymInt, SymFloat, or object): Object to cast
460
+ """
461
+ from .overrides import has_torch_function_unary, handle_torch_function
462
+
463
+ if has_torch_function_unary(a):
464
+ return handle_torch_function(sym_int, (a,), a)
465
+ if isinstance(a, SymInt):
466
+ return a
467
+ elif isinstance(a, SymFloat):
468
+ return math.floor(a) if a >= 0 else math.ceil(a) # type: ignore[arg-type, call-overload]
469
+ return py_int(a) # type: ignore[operator]
470
+
471
+ def sym_max(a, b):
472
+ """ SymInt-aware utility for max()."""
473
+ from .overrides import has_torch_function, handle_torch_function
474
+
475
+ if has_torch_function((a, b)):
476
+ return handle_torch_function(sym_max, (a, b), a, b)
477
+ if isinstance(a, (SymInt, SymFloat)):
478
+ return a.__sym_max__(b)
479
+ elif isinstance(b, (SymInt, SymFloat)):
480
+ # NB: If you actually care about preserving output type exactly
481
+ # if you do something like max(0, 0.0), it is NOT sound to treat
482
+ # min/max as commutative
483
+ return b.__sym_max__(a)
484
+ return builtins.max(a, b) # type: ignore[operator]
485
+
486
+ def sym_min(a, b):
487
+ """ SymInt-aware utility for max()."""
488
+ from .overrides import has_torch_function, handle_torch_function
489
+
490
+ if has_torch_function((a, b)):
491
+ return handle_torch_function(sym_min, (a, b), a, b)
492
+ if isinstance(a, (SymInt, SymFloat)):
493
+ return a.__sym_min__(b)
494
+ elif isinstance(b, (SymInt, SymFloat)):
495
+ return b.__sym_min__(a)
496
+ return builtins.min(a, b) # type: ignore[operator]
497
+
498
+ # Drop in replacement for math.sqrt, math.sin, math.cos etc
499
+ current_module = sys.modules[__name__]
500
+
501
+ def _get_sym_math_fn(name):
502
+ def fn(a):
503
+ from .overrides import has_torch_function_unary, handle_torch_function
504
+
505
+ if has_torch_function_unary(a):
506
+ return handle_torch_function(fn, (a,), a)
507
+ if hasattr(a, f"__sym_{name}__"):
508
+ return getattr(a, f"__sym_{name}__")()
509
+ return getattr(math, name)(a)
510
+
511
+ return fn
512
+
513
+ for name in ("sqrt", "cos", "cosh", "sin", "sinh", "tan", "tanh", "asin", "acos", "atan"):
514
+ sym_name = f"_sym_{name}"
515
+ fn = _get_sym_math_fn(name)
516
+ fn.__qualname__ = fn.__name__ = sym_name
517
+ setattr(current_module, sym_name, fn)
518
+
519
+ # Adding temporary shortcut
520
+ sym_sqrt = current_module._sym_sqrt
521
+ __all__.append("sym_sqrt")
522
+
523
+ del fn, name, sym_name, current_module # type: ignore[possibly-undefined]
524
+
525
+
526
+ def sym_ite(b, t, f):
527
+ from .overrides import has_torch_function, handle_torch_function
528
+
529
+ if has_torch_function((b, t, f)):
530
+ return handle_torch_function(sym_ite, (b, t, f), b, t, f)
531
+ assert isinstance(b, (SymBool, builtins.bool)) and type(t) == type(f)
532
+ if isinstance(b, SymBool):
533
+ return b.__sym_ite__(t, f)
534
+ return t if b else f
535
+
536
+ # Check to see if we can load C extensions, and if not provide some guidance
537
+ # on what the problem might be.
538
+ try:
539
+ # _initExtension is chosen (arbitrarily) as a sentinel.
540
+ from torch._C import _initExtension
541
+ except ImportError:
542
+ import torch._C as _C_for_compiled_check
543
+
544
+ # The __file__ check only works for Python 3.7 and above.
545
+ if _C_for_compiled_check.__file__ is None:
546
+ raise ImportError(textwrap.dedent('''
547
+ Failed to load PyTorch C extensions:
548
+ It appears that PyTorch has loaded the `torch/_C` folder
549
+ of the PyTorch repository rather than the C extensions which
550
+ are expected in the `torch._C` namespace. This can occur when
551
+ using the `install` workflow. e.g.
552
+ $ python setup.py install && python -c "import torch"
553
+
554
+ This error can generally be solved using the `develop` workflow
555
+ $ python setup.py develop && python -c "import torch" # This should succeed
556
+ or by running Python from a different directory.
557
+ ''').strip()) from None
558
+ raise # If __file__ is not None the cause is unknown, so just re-raise.
559
+
560
+ for name in dir(_C):
561
+ if name[0] != '_' and not name.endswith('Base'):
562
+ __all__.append(name)
563
+ obj = getattr(_C, name)
564
+ if (isinstance(obj, Callable) or inspect.isclass(obj)): # type: ignore[arg-type]
565
+ if (obj.__module__ != 'torch'):
566
+ # TODO: fix their module from C++ side
567
+ if name not in ['DisableTorchFunctionSubclass', 'DisableTorchFunction', 'Generator']:
568
+ obj.__module__ = 'torch'
569
+ elif name == 'TensorBase':
570
+ # issue 109438 / pr 109940. Prevent TensorBase from being copied into torch.
571
+ delattr(sys.modules[__name__], name)
572
+
573
+ if not TYPE_CHECKING:
574
+ # issue 38137 and python issue 43367. Submodules of a C extension are
575
+ # non-standard, and attributes of those submodules cannot be pickled since
576
+ # pickle expect to be able to import them as "from _C.sub import attr"
577
+ # which fails with "_C is not a package
578
+ for attr in dir(_C):
579
+ candidate = getattr(_C, attr)
580
+ if type(candidate) is type(_C):
581
+ # submodule
582
+ if f'torch._C.{attr}' not in sys.modules:
583
+ sys.modules[f'torch._C.{attr}'] = candidate
584
+
585
+
586
+ ################################################################################
587
+ # Define basic utilities
588
+ ################################################################################
589
+
590
+
591
+ def typename(o):
592
+ if isinstance(o, torch.Tensor):
593
+ return o.type()
594
+
595
+ module = ''
596
+ class_name = ''
597
+ if hasattr(o, '__module__') and o.__module__ != 'builtins' \
598
+ and o.__module__ != '__builtin__' and o.__module__ is not None:
599
+ module = o.__module__ + '.'
600
+
601
+ if hasattr(o, '__qualname__'):
602
+ class_name = o.__qualname__
603
+ elif hasattr(o, '__name__'):
604
+ class_name = o.__name__
605
+ else:
606
+ class_name = o.__class__.__name__
607
+
608
+ return module + class_name
609
+
610
+
611
+ def is_tensor(obj):
612
+ r"""Returns True if `obj` is a PyTorch tensor.
613
+
614
+ Note that this function is simply doing ``isinstance(obj, Tensor)``.
615
+ Using that ``isinstance`` check is better for typechecking with mypy,
616
+ and more explicit - so it's recommended to use that instead of
617
+ ``is_tensor``.
618
+
619
+ Args:
620
+ obj (Object): Object to test
621
+ Example::
622
+
623
+ >>> x = torch.tensor([1, 2, 3])
624
+ >>> torch.is_tensor(x)
625
+ True
626
+
627
+ """
628
+ return isinstance(obj, torch.Tensor)
629
+
630
+
631
+ def is_storage(obj):
632
+ r"""Returns True if `obj` is a PyTorch storage object.
633
+
634
+ Args:
635
+ obj (Object): Object to test
636
+ """
637
+ return type(obj) in _storage_classes
638
+
639
+
640
+ _GLOBAL_DEVICE_CONTEXT = threading.local()
641
+
642
+
643
+ def get_default_device() -> "torch.device":
644
+ r"""Gets the default ``torch.Tensor`` to be allocated on ``device``"""
645
+ global _GLOBAL_DEVICE_CONTEXT
646
+ if hasattr(_GLOBAL_DEVICE_CONTEXT, "device_context"):
647
+ device = _GLOBAL_DEVICE_CONTEXT.device_context.device
648
+ if device.index is not None:
649
+ return device
650
+ else:
651
+ # TODO: Call like get_device_index() method corresponding to
652
+ # each device type
653
+ return torch.tensor([]).device
654
+ else:
655
+ return torch.device("cpu")
656
+
657
+
658
+ def set_default_device(device):
659
+ """Sets the default ``torch.Tensor`` to be allocated on ``device``. This
660
+ does not affect factory function calls which are called with an explicit
661
+ ``device`` argument. Factory calls will be performed as if they
662
+ were passed ``device`` as an argument.
663
+
664
+ To only temporarily change the default device instead of setting it
665
+ globally, use ``with torch.device(device):`` instead.
666
+
667
+ The default device is initially ``cpu``. If you set the default tensor
668
+ device to another device (e.g., ``cuda``) without a device index, tensors
669
+ will be allocated on whatever the current device for the device type,
670
+ even after :func:`torch.cuda.set_device` is called.
671
+
672
+ .. warning::
673
+
674
+ This function imposes a slight performance cost on every Python
675
+ call to the torch API (not just factory functions). If this
676
+ is causing problems for you, please comment on
677
+ https://github.com/pytorch/pytorch/issues/92701
678
+
679
+ .. note::
680
+
681
+ This doesn't affect functions that create tensors that share the same memory as the input, like:
682
+ :func:`torch.from_numpy` and :func:`torch.frombuffer`
683
+
684
+ Args:
685
+ device (device or string): the device to set as default
686
+
687
+ Example::
688
+
689
+ >>> # xdoctest: +SKIP("requires cuda, changes global state")
690
+ >>> torch.get_default_device()
691
+ device(type='cpu')
692
+ >>> torch.set_default_device('cuda') # current device is 0
693
+ >>> torch.get_default_device()
694
+ device(type='cuda', index=0)
695
+ >>> torch.set_default_device('cuda')
696
+ >>> torch.cuda.set_device('cuda:1') # current device is 1
697
+ >>> torch.get_default_device()
698
+ device(type='cuda', index=1)
699
+ >>> torch.set_default_device('cuda:1')
700
+ >>> torch.get_default_device()
701
+ device(type='cuda', index=1)
702
+
703
+ """
704
+ global _GLOBAL_DEVICE_CONTEXT
705
+ if hasattr(_GLOBAL_DEVICE_CONTEXT, "device_context"):
706
+ device_context = _GLOBAL_DEVICE_CONTEXT.device_context
707
+ if device_context is not None:
708
+ device_context.__exit__(None, None, None)
709
+
710
+ if device is None:
711
+ device_context = None
712
+ else:
713
+ from torch.utils._device import DeviceContext
714
+ device_context = DeviceContext(device)
715
+ device_context.__enter__()
716
+ _GLOBAL_DEVICE_CONTEXT.device_context = device_context
717
+
718
+
719
+ def set_default_tensor_type(t):
720
+ r"""
721
+ .. warning::
722
+
723
+ This function is deprecated as of PyTorch 2.1, please use :func:`torch.set_default_dtype()` and
724
+ :func:`torch.set_default_device()` as alternatives.
725
+
726
+ Sets the default ``torch.Tensor`` type to floating point tensor type
727
+ ``t``. This type will also be used as default floating point type for
728
+ type inference in :func:`torch.tensor`.
729
+
730
+ The default floating point tensor type is initially ``torch.FloatTensor``.
731
+
732
+ Args:
733
+ t (type or string): the floating point tensor type or its name
734
+
735
+ Example::
736
+
737
+ >>> # xdoctest: +SKIP("Other tests may have changed the default type. Can we reset it?")
738
+ >>> torch.tensor([1.2, 3]).dtype # initial default for floating point is torch.float32
739
+ torch.float32
740
+ >>> torch.set_default_tensor_type(torch.DoubleTensor)
741
+ >>> torch.tensor([1.2, 3]).dtype # a new floating point tensor
742
+ torch.float64
743
+
744
+ """
745
+ if isinstance(t, str):
746
+ t = _import_dotted_name(t)
747
+ _C._set_default_tensor_type(t)
748
+
749
+
750
+ def set_default_dtype(d):
751
+ r"""
752
+
753
+ Sets the default floating point dtype to :attr:`d`. Supports torch.float32
754
+ and torch.float64 as inputs. Other dtypes may be accepted without complaint
755
+ but are not supported and are unlikely to work as expected.
756
+
757
+ When PyTorch is initialized its default floating point dtype is torch.float32,
758
+ and the intent of set_default_dtype(torch.float64) is to facilitate NumPy-like
759
+ type inference. The default floating point dtype is used to:
760
+
761
+ 1. Implicitly determine the default complex dtype. When the default floating point
762
+ type is float32 the default complex dtype is complex64, and when the default
763
+ floating point type is float64 the default complex type is complex128.
764
+ 2. Infer the dtype for tensors constructed using Python floats or complex Python
765
+ numbers. See examples below.
766
+ 3. Determine the result of type promotion between bool and integer tensors and
767
+ Python floats and complex Python numbers.
768
+
769
+ Args:
770
+ d (:class:`torch.dtype`): the floating point dtype to make the default.
771
+ Either torch.float32 or torch.float64.
772
+
773
+ Example:
774
+ >>> # xdoctest: +SKIP("Other tests may have changed the default type. Can we reset it?")
775
+ >>> # initial default for floating point is torch.float32
776
+ >>> # Python floats are interpreted as float32
777
+ >>> torch.tensor([1.2, 3]).dtype
778
+ torch.float32
779
+ >>> # initial default for floating point is torch.complex64
780
+ >>> # Complex Python numbers are interpreted as complex64
781
+ >>> torch.tensor([1.2, 3j]).dtype
782
+ torch.complex64
783
+
784
+ >>> torch.set_default_dtype(torch.float64)
785
+
786
+ >>> # Python floats are now interpreted as float64
787
+ >>> torch.tensor([1.2, 3]).dtype # a new floating point tensor
788
+ torch.float64
789
+ >>> # Complex Python numbers are now interpreted as complex128
790
+ >>> torch.tensor([1.2, 3j]).dtype # a new complex tensor
791
+ torch.complex128
792
+
793
+ """
794
+ _C._set_default_dtype(d)
795
+
796
+ def use_deterministic_algorithms(mode: builtins.bool, *, warn_only: builtins.bool = False) -> None:
797
+ r""" Sets whether PyTorch operations must use "deterministic"
798
+ algorithms. That is, algorithms which, given the same input, and when
799
+ run on the same software and hardware, always produce the same output.
800
+ When enabled, operations will use deterministic algorithms when available,
801
+ and if only nondeterministic algorithms are available they will throw a
802
+ :class:`RuntimeError` when called.
803
+
804
+ .. note:: This setting alone is not always enough to make an application
805
+ reproducible. Refer to :ref:`reproducibility` for more information.
806
+
807
+ .. note:: :func:`torch.set_deterministic_debug_mode` offers an alternative
808
+ interface for this feature.
809
+
810
+ The following normally-nondeterministic operations will act
811
+ deterministically when ``mode=True``:
812
+
813
+ * :class:`torch.nn.Conv1d` when called on CUDA tensor
814
+ * :class:`torch.nn.Conv2d` when called on CUDA tensor
815
+ * :class:`torch.nn.Conv3d` when called on CUDA tensor
816
+ * :class:`torch.nn.ConvTranspose1d` when called on CUDA tensor
817
+ * :class:`torch.nn.ConvTranspose2d` when called on CUDA tensor
818
+ * :class:`torch.nn.ConvTranspose3d` when called on CUDA tensor
819
+ * :class:`torch.nn.ReplicationPad2d` when attempting to differentiate a CUDA tensor
820
+ * :func:`torch.bmm` when called on sparse-dense CUDA tensors
821
+ * :func:`torch.Tensor.__getitem__` when attempting to differentiate a CPU tensor
822
+ and the index is a list of tensors
823
+ * :func:`torch.Tensor.index_put` with ``accumulate=False``
824
+ * :func:`torch.Tensor.index_put` with ``accumulate=True`` when called on a CPU
825
+ tensor
826
+ * :func:`torch.Tensor.put_` with ``accumulate=True`` when called on a CPU
827
+ tensor
828
+ * :func:`torch.Tensor.scatter_add_` when called on a CUDA tensor
829
+ * :func:`torch.gather` when called on a CUDA tensor that requires grad
830
+ * :func:`torch.index_add` when called on CUDA tensor
831
+ * :func:`torch.index_select` when attempting to differentiate a CUDA tensor
832
+ * :func:`torch.repeat_interleave` when attempting to differentiate a CUDA tensor
833
+ * :func:`torch.Tensor.index_copy` when called on a CPU or CUDA tensor
834
+ * :func:`torch.Tensor.scatter` when `src` type is Tensor and called on CUDA tensor
835
+ * :func:`torch.Tensor.scatter_reduce` when ``reduce='sum'`` or ``reduce='mean'`` and called on CUDA tensor
836
+
837
+ The following normally-nondeterministic operations will throw a
838
+ :class:`RuntimeError` when ``mode=True``:
839
+
840
+ * :class:`torch.nn.AvgPool3d` when attempting to differentiate a CUDA tensor
841
+ * :class:`torch.nn.AdaptiveAvgPool2d` when attempting to differentiate a CUDA tensor
842
+ * :class:`torch.nn.AdaptiveAvgPool3d` when attempting to differentiate a CUDA tensor
843
+ * :class:`torch.nn.MaxPool3d` when attempting to differentiate a CUDA tensor
844
+ * :class:`torch.nn.AdaptiveMaxPool2d` when attempting to differentiate a CUDA tensor
845
+ * :class:`torch.nn.FractionalMaxPool2d` when attempting to differentiate a CUDA tensor
846
+ * :class:`torch.nn.FractionalMaxPool3d` when attempting to differentiate a CUDA tensor
847
+ * :class:`torch.nn.MaxUnpool1d`
848
+ * :class:`torch.nn.MaxUnpool2d`
849
+ * :class:`torch.nn.MaxUnpool3d`
850
+ * :func:`torch.nn.functional.interpolate` when attempting to differentiate a CUDA tensor
851
+ and one of the following modes is used:
852
+
853
+ - ``linear``
854
+ - ``bilinear``
855
+ - ``bicubic``
856
+ - ``trilinear``
857
+
858
+ * :class:`torch.nn.ReflectionPad1d` when attempting to differentiate a CUDA tensor
859
+ * :class:`torch.nn.ReflectionPad2d` when attempting to differentiate a CUDA tensor
860
+ * :class:`torch.nn.ReflectionPad3d` when attempting to differentiate a CUDA tensor
861
+ * :class:`torch.nn.ReplicationPad1d` when attempting to differentiate a CUDA tensor
862
+ * :class:`torch.nn.ReplicationPad3d` when attempting to differentiate a CUDA tensor
863
+ * :class:`torch.nn.NLLLoss` when called on a CUDA tensor
864
+ * :class:`torch.nn.CTCLoss` when attempting to differentiate a CUDA tensor
865
+ * :class:`torch.nn.EmbeddingBag` when attempting to differentiate a CUDA tensor when
866
+ ``mode='max'``
867
+ * :func:`torch.Tensor.put_` when ``accumulate=False``
868
+ * :func:`torch.Tensor.put_` when ``accumulate=True`` and called on a CUDA tensor
869
+ * :func:`torch.histc` when called on a CUDA tensor
870
+ * :func:`torch.bincount` when called on a CUDA tensor and ``weights``
871
+ tensor is given
872
+ * :func:`torch.kthvalue` with called on a CUDA tensor
873
+ * :func:`torch.median` with indices output when called on a CUDA tensor
874
+ * :func:`torch.nn.functional.grid_sample` when attempting to differentiate a CUDA tensor
875
+ * :func:`torch.cumsum` when called on a CUDA tensor when dtype is floating point or complex
876
+ * :func:`torch.Tensor.scatter_reduce` when ``reduce='prod'`` and called on CUDA tensor
877
+ * :func:`torch.Tensor.resize_` when called with a quantized tensor
878
+
879
+ In addition, several operations fill uninitialized memory when this setting
880
+ is turned on and when
881
+ :attr:`torch.utils.deterministic.fill_uninitialized_memory` is turned on.
882
+ See the documentation for that attribute for more information.
883
+
884
+ A handful of CUDA operations are nondeterministic if the CUDA version is
885
+ 10.2 or greater, unless the environment variable ``CUBLAS_WORKSPACE_CONFIG=:4096:8``
886
+ or ``CUBLAS_WORKSPACE_CONFIG=:16:8`` is set. See the CUDA documentation for more
887
+ details: `<https://docs.nvidia.com/cuda/cublas/index.html#results-reproducibility>`_
888
+ If one of these environment variable configurations is not set, a :class:`RuntimeError`
889
+ will be raised from these operations when called with CUDA tensors:
890
+
891
+ * :func:`torch.mm`
892
+ * :func:`torch.mv`
893
+ * :func:`torch.bmm`
894
+
895
+ Note that deterministic operations tend to have worse performance than
896
+ nondeterministic operations.
897
+
898
+ .. note::
899
+
900
+ This flag does not detect or prevent nondeterministic behavior caused
901
+ by calling an inplace operation on a tensor with an internal memory
902
+ overlap or by giving such a tensor as the :attr:`out` argument for an
903
+ operation. In these cases, multiple writes of different data may target
904
+ a single memory location, and the order of writes is not guaranteed.
905
+
906
+ Args:
907
+ mode (:class:`bool`): If True, makes potentially nondeterministic
908
+ operations switch to a deterministic algorithm or throw a runtime
909
+ error. If False, allows nondeterministic operations.
910
+
911
+ Keyword args:
912
+ warn_only (:class:`bool`, optional): If True, operations that do not
913
+ have a deterministic implementation will throw a warning instead of
914
+ an error. Default: ``False``
915
+
916
+ Example::
917
+
918
+ >>> # xdoctest: +SKIP
919
+ >>> torch.use_deterministic_algorithms(True)
920
+
921
+ # Forward mode nondeterministic error
922
+ >>> torch.randn(10, device='cuda').kthvalue(1)
923
+ ...
924
+ RuntimeError: kthvalue CUDA does not have a deterministic implementation...
925
+
926
+ # Backward mode nondeterministic error
927
+ >>> torch.nn.AvgPool3d(1)(torch.randn(3, 4, 5, 6, requires_grad=True).cuda()).sum().backward()
928
+ ...
929
+ RuntimeError: avg_pool3d_backward_cuda does not have a deterministic implementation...
930
+ """
931
+ _C._set_deterministic_algorithms(mode, warn_only=warn_only)
932
+
933
+ def are_deterministic_algorithms_enabled() -> builtins.bool:
934
+ r"""Returns True if the global deterministic flag is turned on. Refer to
935
+ :func:`torch.use_deterministic_algorithms` documentation for more details.
936
+ """
937
+ return _C._get_deterministic_algorithms()
938
+
939
+ def is_deterministic_algorithms_warn_only_enabled() -> builtins.bool:
940
+ r"""Returns True if the global deterministic flag is set to warn only.
941
+ Refer to :func:`torch.use_deterministic_algorithms` documentation for more
942
+ details.
943
+ """
944
+ return _C._get_deterministic_algorithms_warn_only()
945
+
946
+ def set_deterministic_debug_mode(debug_mode: Union[builtins.int, str]) -> None:
947
+ r"""Sets the debug mode for deterministic operations.
948
+
949
+ .. note:: This is an alternative interface for
950
+ :func:`torch.use_deterministic_algorithms`. Refer to that function's
951
+ documentation for details about affected operations.
952
+
953
+ Args:
954
+ debug_mode(str or int): If "default" or 0, don't error or warn on
955
+ nondeterministic operations. If "warn" or 1, warn on
956
+ nondeterministic operations. If "error" or 2, error on
957
+ nondeterministic operations.
958
+ """
959
+
960
+ # NOTE: builtins.int is used here because int in this scope resolves
961
+ # to torch.int
962
+ if not isinstance(debug_mode, (builtins.int, str)):
963
+ raise TypeError(f'debug_mode must be str or int, but got {type(debug_mode)}')
964
+
965
+ if isinstance(debug_mode, str):
966
+ if debug_mode == 'default':
967
+ debug_mode = 0
968
+ elif debug_mode == 'warn':
969
+ debug_mode = 1
970
+ elif debug_mode == 'error':
971
+ debug_mode = 2
972
+ else:
973
+ raise RuntimeError(
974
+ 'invalid value of debug_mode, expected one of `default`, '
975
+ f'`warn`, `error`, but got {debug_mode}')
976
+
977
+ if debug_mode == 0:
978
+ _C._set_deterministic_algorithms(False)
979
+ elif debug_mode == 1:
980
+ _C._set_deterministic_algorithms(True, warn_only=True)
981
+ elif debug_mode == 2:
982
+ _C._set_deterministic_algorithms(True)
983
+ else:
984
+ raise RuntimeError(
985
+ 'invalid value of debug_mode, expected 0, 1, or 2, '
986
+ f'but got {debug_mode}')
987
+
988
+ def get_deterministic_debug_mode() -> builtins.int:
989
+ r"""Returns the current value of the debug mode for deterministic
990
+ operations. Refer to :func:`torch.set_deterministic_debug_mode`
991
+ documentation for more details.
992
+ """
993
+
994
+ if _C._get_deterministic_algorithms():
995
+ if _C._get_deterministic_algorithms_warn_only():
996
+ return 1
997
+ else:
998
+ return 2
999
+ else:
1000
+ return 0
1001
+
1002
+ def get_float32_matmul_precision() -> builtins.str:
1003
+ r"""Returns the current value of float32 matrix multiplication precision. Refer to
1004
+ :func:`torch.set_float32_matmul_precision` documentation for more details.
1005
+ """
1006
+ return _C._get_float32_matmul_precision()
1007
+
1008
+ def set_float32_matmul_precision(precision: str) -> None:
1009
+ r"""Sets the internal precision of float32 matrix multiplications.
1010
+
1011
+ Running float32 matrix multiplications in lower precision may significantly increase
1012
+ performance, and in some programs the loss of precision has a negligible impact.
1013
+
1014
+ Supports three settings:
1015
+
1016
+ * "highest", float32 matrix multiplications use the float32 datatype (24 mantissa
1017
+ bits with 23 bits explicitly stored) for internal computations.
1018
+ * "high", float32 matrix multiplications either use the TensorFloat32 datatype (10
1019
+ mantissa bits explicitly stored) or treat each float32 number as the sum of two bfloat16 numbers
1020
+ (approximately 16 mantissa bits with 14 bits explicitly stored), if the appropriate fast matrix multiplication
1021
+ algorithms are available. Otherwise float32 matrix multiplications are computed
1022
+ as if the precision is "highest". See below for more information on the bfloat16
1023
+ approach.
1024
+ * "medium", float32 matrix multiplications use the bfloat16 datatype (8 mantissa
1025
+ bits with 7 bits explicitly stored) for internal computations, if a fast matrix multiplication algorithm
1026
+ using that datatype internally is available. Otherwise float32
1027
+ matrix multiplications are computed as if the precision is "high".
1028
+
1029
+ When using "high" precision, float32 multiplications may use a bfloat16-based algorithm
1030
+ that is more complicated than simply truncating to some smaller number mantissa bits
1031
+ (e.g. 10 for TensorFloat32, 7 for bfloat16 explicitly stored). Refer to [Henry2019]_ for a complete
1032
+ description of this algorithm. To briefly explain here, the first step is to realize
1033
+ that we can perfectly encode a single float32 number as the sum of three bfloat16
1034
+ numbers (because float32 has 23 mantissa bits while bfloat16 has 7 explicitly stored, and both have the
1035
+ same number of exponent bits). This means that the product of two float32 numbers can
1036
+ be exactly given by the sum of nine products of bfloat16 numbers. We can then trade
1037
+ accuracy for speed by dropping some of these products. The "high" precision algorithm
1038
+ specifically keeps only the three most significant products, which conveniently excludes
1039
+ all of the products involving the last 8 mantissa bits of either input. This means that
1040
+ we can represent our inputs as the sum of two bfloat16 numbers rather than three.
1041
+ Because bfloat16 fused-multiply-add (FMA) instructions are typically >10x faster than
1042
+ float32 ones, it's faster to do three multiplications and 2 additions with bfloat16
1043
+ precision than it is to do a single multiplication with float32 precision.
1044
+
1045
+ .. [Henry2019] http://arxiv.org/abs/1904.06376
1046
+
1047
+ .. note::
1048
+
1049
+ This does not change the output dtype of float32 matrix multiplications,
1050
+ it controls how the internal computation of the matrix multiplication is performed.
1051
+
1052
+ .. note::
1053
+
1054
+ This does not change the precision of convolution operations. Other flags,
1055
+ like `torch.backends.cudnn.allow_tf32`, may control the precision of convolution
1056
+ operations.
1057
+
1058
+ .. note::
1059
+
1060
+ This flag currently only affects one native device type: CUDA.
1061
+ If "high" or "medium" are set then the TensorFloat32 datatype will be used
1062
+ when computing float32 matrix multiplications, equivalent to setting
1063
+ `torch.backends.cuda.matmul.allow_tf32 = True`. When "highest" (the default)
1064
+ is set then the float32 datatype is used for internal computations, equivalent
1065
+ to setting `torch.backends.cuda.matmul.allow_tf32 = False`.
1066
+
1067
+ Args:
1068
+ precision(str): can be set to "highest" (default), "high", or "medium" (see above).
1069
+
1070
+ """
1071
+ _C._set_float32_matmul_precision(precision)
1072
+
1073
+ def set_warn_always(b: builtins.bool) -> None:
1074
+ r"""When this flag is False (default) then some PyTorch warnings may only
1075
+ appear once per process. This helps avoid excessive warning information.
1076
+ Setting it to True causes these warnings to always appear, which may be
1077
+ helpful when debugging.
1078
+
1079
+ Args:
1080
+ b (:class:`bool`): If True, force warnings to always be emitted
1081
+ If False, set to the default behaviour
1082
+ """
1083
+ _C._set_warnAlways(b)
1084
+
1085
+ def is_warn_always_enabled() -> builtins.bool:
1086
+ r"""Returns True if the global warn_always flag is turned on. Refer to
1087
+ :func:`torch.set_warn_always` documentation for more details.
1088
+ """
1089
+ return _C._get_warnAlways()
1090
+
1091
+ ################################################################################
1092
+ # Define error checking functions
1093
+ ################################################################################
1094
+
1095
+ # These error checking functions must be kept consistent with their C++
1096
+ # equivalents. Their C++ equivalents are mentioned where applicable.
1097
+
1098
+ def _check_with(error_type, cond: Union[builtins.bool, SymBool], message: Callable[[], str]): # noqa: F811
1099
+ if not isinstance(cond, (builtins.bool, torch.SymBool)):
1100
+ raise TypeError(f'cond must be a bool, but got {type(cond)}')
1101
+
1102
+ from torch.fx.experimental.symbolic_shapes import expect_true
1103
+ if expect_true(cond):
1104
+ return
1105
+
1106
+ # error_type must be a subclass of Exception and not subclass of Warning
1107
+ assert issubclass(error_type, Exception) and not issubclass(error_type, Warning)
1108
+
1109
+ if message is None:
1110
+ message_evaluated = (
1111
+ 'Expected cond to be True, but got False. (Could this error '
1112
+ 'message be improved? If so, please report an enhancement request '
1113
+ 'to PyTorch.)')
1114
+
1115
+ else:
1116
+ if not callable(message):
1117
+ raise TypeError('message must be a callable')
1118
+
1119
+ message_evaluated = str(message())
1120
+
1121
+ raise error_type(message_evaluated)
1122
+
1123
+ def _check(cond, message=None): # noqa: F811
1124
+ r"""Throws error containing an optional message if the specified condition
1125
+ is False.
1126
+
1127
+ Error type: ``RuntimeError``
1128
+
1129
+ C++ equivalent: ``TORCH_CHECK``
1130
+
1131
+ Args:
1132
+ cond (:class:`bool`): If False, throw error
1133
+
1134
+ message (Callable, optional): Callable that returns either a string or
1135
+ an object that has a ``__str__()`` method to be used as the error
1136
+ message. Default: ``None``
1137
+ """
1138
+ _check_with(RuntimeError, cond, message)
1139
+
1140
+ def _check_is_size(i, message=None):
1141
+ """Checks that a given integer is a valid size (i.e., is non-negative).
1142
+ You should use this over _check(i >= 0) because we can use the semantic
1143
+ information (that i is a size) to make some further inferences in case
1144
+ i is an unbacked SymInt.
1145
+
1146
+ NB: Do NOT use this in contexts where a -1 size would be valid (indicating
1147
+ to infer the size from context, or if you should wrap-around or truncate).
1148
+ Only use this if the only valid value is an honest to goodness size.
1149
+ """
1150
+ # This is responsible for the expect_true
1151
+ _check(i >= 0, message)
1152
+ from torch.fx.experimental.symbolic_shapes import _advise_is_size
1153
+ _advise_is_size(i)
1154
+
1155
+ def _check_index(cond, message=None): # noqa: F811
1156
+ r"""Throws error containing an optional message if the specified condition
1157
+ is False.
1158
+
1159
+ Error type: ``IndexError``
1160
+
1161
+ C++ equivalent: ``TORCH_CHECK_INDEX``
1162
+
1163
+ Args:
1164
+ cond (:class:`bool`): If False, throw error
1165
+
1166
+ message (Callable, optional): Callable that returns either a string or
1167
+ an object that has a ``__str__()`` method to be used as the error
1168
+ message. Default: ``None``
1169
+ """
1170
+ _check_with(IndexError, cond, message)
1171
+
1172
+ def _check_value(cond, message=None): # noqa: F811
1173
+ r"""Throws error containing an optional message if the specified condition
1174
+ is False.
1175
+
1176
+ Error type: ``ValueError``
1177
+
1178
+ C++ equivalent: ``TORCH_CHECK_VALUE``
1179
+
1180
+ Args:
1181
+ cond (:class:`bool`): If False, throw error
1182
+
1183
+ message (Callable, optional): Callable that returns either a string or
1184
+ an object that has a ``__str__()`` method to be used as the error
1185
+ message. Default: ``None``
1186
+ """
1187
+ _check_with(ValueError, cond, message)
1188
+
1189
+ def _check_type(cond, message=None): # noqa: F811
1190
+ r"""Throws error containing an optional message if the specified condition
1191
+ is False.
1192
+
1193
+ Error type: ``TypeError``
1194
+
1195
+ C++ equivalent: ``TORCH_CHECK_TYPE``
1196
+
1197
+ Args:
1198
+ cond (:class:`bool`): If False, throw error
1199
+
1200
+ message (Callable, optional): Callable that returns either a string or
1201
+ an object that has a ``__str__()`` method to be used as the error
1202
+ message. Default: ``None``
1203
+ """
1204
+ _check_with(TypeError, cond, message)
1205
+
1206
+ def _check_not_implemented(cond, message=None): # noqa: F811
1207
+ r"""Throws error containing an optional message if the specified condition
1208
+ is False.
1209
+
1210
+ Error type: ``NotImplementedError``
1211
+
1212
+ C++ equivalent: ``TORCH_CHECK_NOT_IMPLEMENTED``
1213
+
1214
+ Args:
1215
+ cond (:class:`bool`): If False, throw error
1216
+
1217
+ message (Callable, optional): Callable that returns either a string or
1218
+ an object that has a ``__str__()`` method to be used as the error
1219
+ message. Default: ``None``
1220
+ """
1221
+ _check_with(NotImplementedError, cond, message)
1222
+
1223
+ def _check_tensor_all_with(error_type, cond, message=None): # noqa: F811
1224
+ if not torch.is_tensor(cond):
1225
+ raise TypeError(f'cond must be a tensor, but got {type(cond)}')
1226
+
1227
+ if not cond.dtype == torch.bool:
1228
+ raise TypeError(
1229
+ f'cond tensor must have dtype torch.bool, but got {cond.dtype}')
1230
+
1231
+ _check_with(error_type, cond._is_all_true().item(), message)
1232
+
1233
+ # C++ equivalent: `TORCH_CHECK_TENSOR_ALL`
1234
+ def _check_tensor_all(cond, message=None): # noqa: F811
1235
+ r"""Throws error containing an optional message if the specified condition
1236
+ is False.
1237
+
1238
+ Error type: ``RuntimeError``
1239
+
1240
+ C++ equivalent: ``TORCH_CHECK_TENSOR_ALL``
1241
+
1242
+ Args:
1243
+ cond (:class:`torch.Tensor`): Tensor of dtype ``torch.bool``. If any
1244
+ element is ``False``, throw error
1245
+
1246
+ message (Callable, optional): Callable that returns either a string or
1247
+ an object that has a ``__str__()`` method to be used as the error
1248
+ message. Default: ``None``
1249
+ """
1250
+ _check_tensor_all_with(RuntimeError, cond, message)
1251
+
1252
+ ################################################################################
1253
+ # Define numeric constants
1254
+ ################################################################################
1255
+
1256
+ # For Python Array API (https://data-apis.org/array-api/latest/API_specification/constants.html) and
1257
+ # NumPy consistency (https://numpy.org/devdocs/reference/constants.html)
1258
+ from math import e , nan , inf , pi
1259
+ __all__.extend(['e', 'pi', 'nan', 'inf'])
1260
+
1261
+ ################################################################################
1262
+ # Define Storage and Tensor classes
1263
+ ################################################################################
1264
+
1265
+ from ._tensor import Tensor
1266
+ from .storage import _StorageBase, TypedStorage, _LegacyStorage, UntypedStorage, _warn_typed_storage_removal
1267
+
1268
+ # NOTE: New <type>Storage classes should never be added. When adding a new
1269
+ # dtype, use torch.storage.TypedStorage directly.
1270
+
1271
+ class ByteStorage(_LegacyStorage):
1272
+ @classproperty
1273
+ def dtype(self):
1274
+ _warn_typed_storage_removal(stacklevel=3)
1275
+ return self._dtype
1276
+
1277
+ @classproperty
1278
+ def _dtype(self):
1279
+ return torch.uint8
1280
+
1281
+ class DoubleStorage(_LegacyStorage):
1282
+ @classproperty
1283
+ def dtype(self):
1284
+ _warn_typed_storage_removal(stacklevel=3)
1285
+ return self._dtype
1286
+
1287
+ @classproperty
1288
+ def _dtype(self):
1289
+ return torch.double
1290
+
1291
+ class FloatStorage(_LegacyStorage):
1292
+ @classproperty
1293
+ def dtype(self):
1294
+ _warn_typed_storage_removal(stacklevel=3)
1295
+ return self._dtype
1296
+
1297
+ @classproperty
1298
+ def _dtype(self):
1299
+ return torch.float
1300
+
1301
+ class HalfStorage(_LegacyStorage):
1302
+ @classproperty
1303
+ def dtype(self):
1304
+ _warn_typed_storage_removal(stacklevel=3)
1305
+ return self._dtype
1306
+
1307
+ @classproperty
1308
+ def _dtype(self):
1309
+ return torch.half
1310
+
1311
+ class LongStorage(_LegacyStorage):
1312
+ @classproperty
1313
+ def dtype(self):
1314
+ _warn_typed_storage_removal(stacklevel=3)
1315
+ return self._dtype
1316
+
1317
+ @classproperty
1318
+ def _dtype(self):
1319
+ return torch.long
1320
+
1321
+ class IntStorage(_LegacyStorage):
1322
+ @classproperty
1323
+ def dtype(self):
1324
+ _warn_typed_storage_removal(stacklevel=3)
1325
+ return self._dtype
1326
+
1327
+ @classproperty
1328
+ def _dtype(self):
1329
+ return torch.int
1330
+
1331
+ class ShortStorage(_LegacyStorage):
1332
+ @classproperty
1333
+ def dtype(self):
1334
+ _warn_typed_storage_removal(stacklevel=3)
1335
+ return self._dtype
1336
+
1337
+ @classproperty
1338
+ def _dtype(self):
1339
+ return torch.short
1340
+
1341
+ class CharStorage(_LegacyStorage):
1342
+ @classproperty
1343
+ def dtype(self):
1344
+ _warn_typed_storage_removal(stacklevel=3)
1345
+ return self._dtype
1346
+
1347
+ @classproperty
1348
+ def _dtype(self):
1349
+ return torch.int8
1350
+
1351
+ class BoolStorage(_LegacyStorage):
1352
+ @classproperty
1353
+ def dtype(self):
1354
+ _warn_typed_storage_removal(stacklevel=3)
1355
+ return self._dtype
1356
+
1357
+ @classproperty
1358
+ def _dtype(self):
1359
+ return torch.bool
1360
+
1361
+ class BFloat16Storage(_LegacyStorage):
1362
+ @classproperty
1363
+ def dtype(self):
1364
+ _warn_typed_storage_removal(stacklevel=3)
1365
+ return self._dtype
1366
+
1367
+ @classproperty
1368
+ def _dtype(self):
1369
+ return torch.bfloat16
1370
+
1371
+ class ComplexDoubleStorage(_LegacyStorage):
1372
+ @classproperty
1373
+ def dtype(self):
1374
+ _warn_typed_storage_removal(stacklevel=3)
1375
+ return self._dtype
1376
+
1377
+ @classproperty
1378
+ def _dtype(self):
1379
+ return torch.cdouble
1380
+
1381
+ class ComplexFloatStorage(_LegacyStorage):
1382
+ @classproperty
1383
+ def dtype(self):
1384
+ _warn_typed_storage_removal(stacklevel=3)
1385
+ return self._dtype
1386
+
1387
+ @classproperty
1388
+ def _dtype(self):
1389
+ return torch.cfloat
1390
+
1391
+ class QUInt8Storage(_LegacyStorage):
1392
+ @classproperty
1393
+ def dtype(self):
1394
+ _warn_typed_storage_removal(stacklevel=3)
1395
+ return self._dtype
1396
+
1397
+ @classproperty
1398
+ def _dtype(self):
1399
+ return torch.quint8
1400
+
1401
+ class QInt8Storage(_LegacyStorage):
1402
+ @classproperty
1403
+ def dtype(self):
1404
+ _warn_typed_storage_removal(stacklevel=3)
1405
+ return self._dtype
1406
+
1407
+ @classproperty
1408
+ def _dtype(self):
1409
+ return torch.qint8
1410
+
1411
+ class QInt32Storage(_LegacyStorage):
1412
+ @classproperty
1413
+ def dtype(self):
1414
+ _warn_typed_storage_removal(stacklevel=3)
1415
+ return self._dtype
1416
+
1417
+ @classproperty
1418
+ def _dtype(self):
1419
+ return torch.qint32
1420
+
1421
+ class QUInt4x2Storage(_LegacyStorage):
1422
+ @classproperty
1423
+ def dtype(self):
1424
+ _warn_typed_storage_removal(stacklevel=3)
1425
+ return self._dtype
1426
+
1427
+ @classproperty
1428
+ def _dtype(self):
1429
+ return torch.quint4x2
1430
+
1431
+ class QUInt2x4Storage(_LegacyStorage):
1432
+ @classproperty
1433
+ def dtype(self):
1434
+ _warn_typed_storage_removal(stacklevel=3)
1435
+ return self._dtype
1436
+
1437
+ @classproperty
1438
+ def _dtype(self):
1439
+ return torch.quint2x4
1440
+
1441
+ _storage_classes = {
1442
+ UntypedStorage, DoubleStorage, FloatStorage, LongStorage, IntStorage,
1443
+ ShortStorage, CharStorage, ByteStorage, HalfStorage, BoolStorage,
1444
+ QUInt8Storage, QInt8Storage, QInt32Storage, BFloat16Storage,
1445
+ ComplexFloatStorage, ComplexDoubleStorage, QUInt4x2Storage, QUInt2x4Storage,
1446
+ TypedStorage
1447
+ }
1448
+
1449
+ # The _tensor_classes set is initialized by the call to initialize_python_bindings.
1450
+ _tensor_classes: Set[Type] = set()
1451
+
1452
+ # If you edit these imports, please update torch/__init__.py.in as well
1453
+ from .random import set_rng_state, get_rng_state, manual_seed, initial_seed, seed
1454
+ from .serialization import save, load
1455
+ from ._tensor_str import set_printoptions
1456
+
1457
+ ################################################################################
1458
+ # Initialize extension
1459
+ ################################################################################
1460
+
1461
+ def manager_path():
1462
+ if _running_with_deploy() or platform.system() == 'Windows':
1463
+ return b""
1464
+ path = get_file_path('torch', 'bin', 'torch_shm_manager')
1465
+ prepare_multiprocessing_environment(get_file_path('torch'))
1466
+ if not os.path.exists(path):
1467
+ raise RuntimeError("Unable to find torch_shm_manager at " + path)
1468
+ return path.encode('utf-8')
1469
+
1470
+ from torch.amp import autocast, GradScaler
1471
+
1472
+ # Initializing the extension shadows the built-in python float / int classes;
1473
+ # store them for later use by SymInt / SymFloat.
1474
+ py_float = float
1475
+ py_int = int
1476
+
1477
+ # Shared memory manager needs to know the exact location of manager executable
1478
+ _C._initExtension(manager_path())
1479
+ del manager_path
1480
+
1481
+ # Appease the type checker: it can't deal with direct setting of globals().
1482
+ # Note that we will see "too many" functions when reexporting this way; there
1483
+ # is not a good way to fix this problem. Perhaps, try to redesign VariableFunctions
1484
+ # so that this import is good enough
1485
+ if TYPE_CHECKING:
1486
+ # Some type signatures pulled in from _VariableFunctions here clash with
1487
+ # signatures already imported. For now these clashes are ignored; see
1488
+ # PR #43339 for details.
1489
+ from torch._C._VariableFunctions import * # type: ignore[assignment, misc] # noqa: F403
1490
+ # Fixup segment_reduce visibility
1491
+ _segment_reduce = segment_reduce
1492
+ del segment_reduce # noqa: F821
1493
+
1494
+ # Ops not to be exposed in `torch` namespace,
1495
+ # mostly helper ops.
1496
+ PRIVATE_OPS = (
1497
+ 'unique_dim',
1498
+ )
1499
+
1500
+ for name in dir(_C._VariableFunctions):
1501
+ if name.startswith('__') or name in PRIVATE_OPS:
1502
+ continue
1503
+ obj = getattr(_C._VariableFunctions, name)
1504
+ obj.__module__ = 'torch'
1505
+ # Hide some APIs that should not be public
1506
+ if name == "segment_reduce":
1507
+ # TODO: Once the undocumented FC window is passed, remove the line bellow
1508
+ globals()[name] = obj
1509
+ name = "_" + name
1510
+ globals()[name] = obj
1511
+ if not name.startswith("_"):
1512
+ __all__.append(name)
1513
+
1514
+
1515
+ ################################################################################
1516
+ # Add torch.dtype instances to the public API
1517
+ ################################################################################
1518
+
1519
+ import torch
1520
+
1521
+ for attribute in dir(torch):
1522
+ if isinstance(getattr(torch, attribute), torch.dtype):
1523
+ __all__.append(attribute)
1524
+
1525
+ ################################################################################
1526
+ # Import TorchDynamo's lazy APIs to avoid circular dependenices
1527
+ ################################################################################
1528
+
1529
+ # needs to be before from .functional import * to avoid circular dependencies
1530
+ from ._compile import _disable_dynamo
1531
+
1532
+ ################################################################################
1533
+ # Import interface functions defined in Python
1534
+ ################################################################################
1535
+
1536
+ # needs to be after the above ATen bindings so we can overwrite from Python side
1537
+ from .functional import * # noqa: F403
1538
+
1539
+
1540
+ ################################################################################
1541
+ # Remove unnecessary members
1542
+ ################################################################################
1543
+
1544
+ del _StorageBase
1545
+ del _LegacyStorage
1546
+
1547
+ ################################################################################
1548
+ # Define _assert
1549
+ ################################################################################
1550
+
1551
+ # needs to be before the submodule imports to avoid circular dependencies
1552
+ def _assert(condition, message):
1553
+ r"""A wrapper around Python's assert which is symbolically traceable.
1554
+ """
1555
+ from .overrides import has_torch_function, handle_torch_function
1556
+
1557
+ if type(condition) is not torch.Tensor and has_torch_function((condition,)):
1558
+ return handle_torch_function(_assert, (condition,), condition, message)
1559
+ assert condition, message
1560
+
1561
+ ################################################################################
1562
+ # Import most common subpackages
1563
+ ################################################################################
1564
+
1565
+ # Use the redundant form so that type checkers know that these are a part of
1566
+ # the public API. The "regular" import lines are there solely for the runtime
1567
+ # side effect of adding to the imported module's members for other users.
1568
+ from torch import cuda as cuda
1569
+ from torch import cpu as cpu
1570
+ from torch import mps as mps
1571
+ from torch import xpu as xpu
1572
+ from torch import autograd as autograd
1573
+ from torch.autograd import (
1574
+ no_grad as no_grad,
1575
+ enable_grad as enable_grad,
1576
+ set_grad_enabled as set_grad_enabled,
1577
+ inference_mode as inference_mode,
1578
+ )
1579
+ from torch import fft as fft
1580
+ from torch import futures as futures
1581
+ from torch import _awaits as _awaits
1582
+ from torch import nested as nested
1583
+ from torch import nn as nn
1584
+ from torch.signal import windows as windows
1585
+ from torch import optim as optim
1586
+ import torch.optim._multi_tensor
1587
+ from torch import multiprocessing as multiprocessing
1588
+ from torch import sparse as sparse
1589
+ from torch import special as special
1590
+ import torch.utils.backcompat
1591
+ from torch import jit as jit
1592
+ from torch import linalg as linalg
1593
+ from torch import hub as hub
1594
+ from torch import random as random
1595
+ from torch import distributions as distributions
1596
+ from torch import testing as testing
1597
+ from torch import backends as backends
1598
+ import torch.utils.data
1599
+ from torch import __config__ as __config__
1600
+ from torch import __future__ as __future__
1601
+ from torch import profiler as profiler
1602
+
1603
+ # Quantized, sparse, AO, etc. should be last to get imported, as nothing
1604
+ # is expected to depend on them.
1605
+ from torch import ao as ao
1606
+ # nn.quant* depends on ao -- so should be after those.
1607
+ import torch.nn.quantizable
1608
+ import torch.nn.quantized
1609
+ import torch.nn.qat
1610
+ import torch.nn.intrinsic
1611
+
1612
+ _C._init_names(list(torch._storage_classes))
1613
+
1614
+ # attach docstrings to torch and tensor functions
1615
+ from . import _torch_docs, _tensor_docs, _storage_docs
1616
+ del _torch_docs, _tensor_docs, _storage_docs
1617
+
1618
+
1619
+ def compiled_with_cxx11_abi() -> builtins.bool:
1620
+ r"""Returns whether PyTorch was built with _GLIBCXX_USE_CXX11_ABI=1"""
1621
+ return _C._GLIBCXX_USE_CXX11_ABI
1622
+
1623
+
1624
+ # Import the ops "namespace"
1625
+ from torch._ops import ops
1626
+ from torch._classes import classes
1627
+ import torch._library
1628
+
1629
+ # quantization depends on torch.fx
1630
+ # Import quantization
1631
+ from torch import quantization as quantization
1632
+
1633
+ # Import the quasi random sampler
1634
+ from torch import quasirandom as quasirandom
1635
+
1636
+ # If you are seeing this, it means that this call site was not checked if
1637
+ # the memory format could be preserved, and it was switched to old default
1638
+ # behaviour of contiguous
1639
+ legacy_contiguous_format = contiguous_format
1640
+
1641
+ # Register fork handler to initialize OpenMP in child processes (see gh-28389)
1642
+ from torch.multiprocessing._atfork import register_after_fork
1643
+ register_after_fork(torch.get_num_threads)
1644
+ del register_after_fork
1645
+
1646
+ # Import tools that require fully imported torch (for applying
1647
+ # torch.jit.script as a decorator, for instance):
1648
+ from ._lobpcg import lobpcg as lobpcg
1649
+
1650
+ # These were previously defined in native_functions.yaml and appeared on the
1651
+ # `torch` namespace, but we moved them to c10 dispatch to facilitate custom
1652
+ # class usage. We add these lines here to preserve backward compatibility.
1653
+ quantized_lstm = torch.ops.aten.quantized_lstm
1654
+ quantized_gru = torch.ops.aten.quantized_gru
1655
+
1656
+ from torch.utils.dlpack import from_dlpack, to_dlpack
1657
+
1658
+ # Import experimental masked operations support. See
1659
+ # [RFC-0016](https://github.com/pytorch/rfcs/pull/27) for more
1660
+ # information.
1661
+ from . import masked
1662
+
1663
+ # Import removed ops with error message about removal
1664
+ from ._linalg_utils import ( # type: ignore[misc]
1665
+ matrix_rank,
1666
+ eig,
1667
+ solve,
1668
+ lstsq,
1669
+ )
1670
+ from ._linalg_utils import _symeig as symeig # type: ignore[misc]
1671
+
1672
+ class _TorchCompileInductorWrapper:
1673
+ compiler_name = "inductor"
1674
+
1675
+ def __init__(self, mode, options, dynamic):
1676
+ self.config: Dict[str, Any] = dict()
1677
+ self.dynamic = dynamic
1678
+ self.apply_mode(mode)
1679
+ self.apply_options(options)
1680
+
1681
+ if self.config.get("triton.cudagraphs", False):
1682
+ os.environ["DISABLE_CUPTI_LAZY_REINIT"] = "1"
1683
+ # FIXME: CUDA Graph does not work well with CUPTI teardown.
1684
+ # 1) crashes on 1st lazy CUPTI re-init after teardown (CUDA 11)
1685
+ # 2) crashes on 2nd non-lazy CUPTI re-init after teardown (CUDA 12)
1686
+ # Workaround: turn off CUPTI teardown when using CUDA Graphs.
1687
+ os.environ["TEARDOWN_CUPTI"] = "0"
1688
+
1689
+ def __eq__(self, other):
1690
+ return (isinstance(other, _TorchCompileInductorWrapper) and
1691
+ self.config == other.config and
1692
+ self.dynamic == other.dynamic)
1693
+
1694
+ def apply_mode(self, mode: Optional[str]):
1695
+ if mode is None or mode == "default":
1696
+ pass
1697
+ elif mode in ("reduce-overhead", "max-autotune", "max-autotune-no-cudagraphs"):
1698
+ from torch._inductor import list_mode_options
1699
+ self.apply_options(list_mode_options(mode, self.dynamic))
1700
+ else:
1701
+ raise RuntimeError(
1702
+ f"Unrecognized mode={mode}, should be one of: default, reduce-overhead, max-autotune, max-autotune-no-cudagraphs"
1703
+ )
1704
+
1705
+ def apply_options(self, options: Optional[Dict[str, Any]]):
1706
+ if not options:
1707
+ return
1708
+
1709
+ from torch._inductor import config
1710
+ current_config: Dict[str, Any] = config.shallow_copy_dict()
1711
+
1712
+ for key, val in options.items():
1713
+ attr_name = key.replace("-", "_")
1714
+ if attr_name not in current_config:
1715
+ raise RuntimeError(
1716
+ f"Unexpected optimization option {key}, known options are {list(current_config.keys())}"
1717
+ )
1718
+ if type(val) is not type(current_config[attr_name]):
1719
+ val_type_str = type(val).__name__
1720
+ expected_type_str = type(current_config[attr_name]).__name__
1721
+ raise RuntimeError(
1722
+ f"Unexpected type of attr {key}, got {val_type_str} should be {expected_type_str}"
1723
+ )
1724
+ self.config[attr_name] = val
1725
+
1726
+ def __call__(self, model_, inputs_):
1727
+ from torch._inductor.compile_fx import compile_fx
1728
+
1729
+ return compile_fx(model_, inputs_, config_patches=self.config)
1730
+
1731
+ def get_compiler_config(self):
1732
+ from torch._inductor.compile_fx import get_patched_config_dict
1733
+ return get_patched_config_dict(config_patches=self.config)
1734
+
1735
+ def reset(self):
1736
+ from torch._inductor import config
1737
+ if "triton.cudagraphs" in self.config or config.triton.cudagraphs:
1738
+ if self.config.get("triton.cudagraphs", True):
1739
+ from torch._inductor.cudagraph_trees import reset_cudagraph_trees
1740
+ reset_cudagraph_trees()
1741
+
1742
+ class _TorchCompileWrapper:
1743
+ def __init__(self, backend, mode, options, dynamic):
1744
+ from torch._dynamo.backends.registry import lookup_backend
1745
+
1746
+ if isinstance(backend, str):
1747
+ self.compiler_name = backend
1748
+ elif hasattr(backend, "__name__"):
1749
+ self.compiler_name = backend.__name__
1750
+ else:
1751
+ self.compiler_name = str(backend)
1752
+ self.dynamic = dynamic
1753
+ self.compiler_fn = lookup_backend(backend)
1754
+ self.kwargs = {}
1755
+ # only pass the args if they non-empty
1756
+ if mode and mode != "default":
1757
+ self.kwargs["mode"] = mode
1758
+ if options:
1759
+ self.kwargs["options"] = options
1760
+
1761
+ def __eq__(self, other):
1762
+ return (isinstance(other, _TorchCompileWrapper) and
1763
+ self.compiler_fn == other.compiler_fn and
1764
+ self.kwargs == other.kwargs and
1765
+ self.dynamic == other.dynamic)
1766
+
1767
+ def __call__(self, model_, inputs_):
1768
+ return self.compiler_fn(model_, inputs_, **self.kwargs)
1769
+
1770
+ def reset(self):
1771
+ if hasattr(self.compiler_fn, "reset"):
1772
+ self.compiler_fn.reset()
1773
+
1774
+
1775
+ def compile(model: Optional[Callable] = None, *,
1776
+ fullgraph: builtins.bool = False,
1777
+ dynamic: Optional[builtins.bool] = None,
1778
+ backend: Union[str, Callable] = "inductor",
1779
+ mode: Union[str, None] = None,
1780
+ options: Optional[Dict[str, Union[str, builtins.int, builtins.bool]]] = None,
1781
+ disable: builtins.bool = False) -> Callable:
1782
+ """
1783
+ Optimizes given model/function using TorchDynamo and specified backend.
1784
+
1785
+ Concretely, for every frame executed within the compiled region, we will attempt
1786
+ to compile it and cache the compiled result on the code object for future
1787
+ use. A single frame may be compiled multiple times if previous compiled
1788
+ results are not applicable for subsequent calls (this is called a "guard
1789
+ failure), you can use TORCH_LOGS=guards to debug these situations.
1790
+ Multiple compiled results can be associated with a frame up to
1791
+ ``torch._dynamo.config.cache_size_limit``, which defaults to 64; at which
1792
+ point we will fall back to eager. Note that compile caches are per
1793
+ *code object*, not frame; if you dynamically create multiple copies of a
1794
+ function, they will all share the same code cache.
1795
+
1796
+ Args:
1797
+ model (Callable): Module/function to optimize
1798
+ fullgraph (bool): If False (default), torch.compile attempts to discover compileable regions
1799
+ in the function that it will optimize. If True, then we require that the entire function be
1800
+ capturable into a single graph. If this is not possible (that is, if there are graph breaks),
1801
+ then this will raise an error.
1802
+ dynamic (bool or None): Use dynamic shape tracing. When this is True, we will up-front attempt
1803
+ to generate a kernel that is as dynamic as possible to avoid recompilations when
1804
+ sizes change. This may not always work as some operations/optimizations will
1805
+ force specialization; use TORCH_LOGS=dynamic to debug overspecialization.
1806
+ When this is False, we will NEVER generate dynamic kernels, we will always specialize.
1807
+ By default (None), we automatically detect if dynamism has occurred and compile a more
1808
+ dynamic kernel upon recompile.
1809
+ backend (str or Callable): backend to be used
1810
+
1811
+ - "inductor" is the default backend, which is a good balance between performance and overhead
1812
+
1813
+ - Non experimental in-tree backends can be seen with `torch._dynamo.list_backends()`
1814
+
1815
+ - Experimental or debug in-tree backends can be seen with `torch._dynamo.list_backends(None)`
1816
+
1817
+ - To register an out-of-tree custom backend: https://pytorch.org/docs/main/compile/custom-backends.html
1818
+ mode (str): Can be either "default", "reduce-overhead", "max-autotune" or "max-autotune-no-cudagraphs"
1819
+
1820
+ - "default" is the default mode, which is a good balance between performance and overhead
1821
+
1822
+ - "reduce-overhead" is a mode that reduces the overhead of python with CUDA graphs,
1823
+ useful for small batches. Reduction of overhead can come at the cost of more memory
1824
+ usage, as we will cache the workspace memory required for the invocation so that we
1825
+ do not have to reallocate it on subsequent runs. Reduction of overhead is not guaranteed
1826
+ to work; today, we only reduce overhead for CUDA only graphs which do not mutate inputs.
1827
+ There are other circumstances where CUDA graphs are not applicable; use TORCH_LOG=perf_hints
1828
+ to debug.
1829
+
1830
+ - "max-autotune" is a mode that leverages Triton based matrix multiplications and convolutions
1831
+ It enables CUDA graphs by default.
1832
+
1833
+ - "max-autotune-no-cudagraphs" is a mode similar to "max-autotune" but without CUDA graphs
1834
+
1835
+ - To see the exact configs that each mode sets you can call `torch._inductor.list_mode_options()`
1836
+
1837
+ options (dict): A dictionary of options to pass to the backend. Some notable ones to try out are
1838
+
1839
+ - `epilogue_fusion` which fuses pointwise ops into templates. Requires `max_autotune` to also be set
1840
+
1841
+ - `max_autotune` which will profile to pick the best matmul configuration
1842
+
1843
+ - `fallback_random` which is useful when debugging accuracy issues
1844
+
1845
+ - `shape_padding` which pads matrix shapes to better align loads on GPUs especially for tensor cores
1846
+
1847
+ - `triton.cudagraphs` which will reduce the overhead of python with CUDA graphs
1848
+
1849
+ - `trace.enabled` which is the most useful debugging flag to turn on
1850
+
1851
+ - `trace.graph_diagram` which will show you a picture of your graph after fusion
1852
+
1853
+ - For inductor you can see the full list of configs that it supports by calling `torch._inductor.list_options()`
1854
+ disable (bool): Turn torch.compile() into a no-op for testing
1855
+
1856
+ Example::
1857
+
1858
+ @torch.compile(options={"triton.cudagraphs": True}, fullgraph=True)
1859
+ def foo(x):
1860
+ return torch.sin(x) + torch.cos(x)
1861
+
1862
+ """
1863
+ _C._log_api_usage_once("torch.compile")
1864
+ # Temporary until we get proper support for python 3.12
1865
+ if sys.version_info >= (3, 12):
1866
+ raise RuntimeError("Dynamo is not supported on Python 3.12+")
1867
+
1868
+ # Decorator mode
1869
+ if model is None:
1870
+ def fn(model: Callable):
1871
+ if model is None:
1872
+ raise RuntimeError("Model can't be None")
1873
+ return compile(model,
1874
+ fullgraph=fullgraph,
1875
+ dynamic=dynamic,
1876
+ backend=backend,
1877
+ mode=mode,
1878
+ options=options,
1879
+ disable=disable)
1880
+ return fn
1881
+
1882
+ if mode is not None and options is not None:
1883
+ raise RuntimeError("Either mode or options can be specified, but both can't be specified at the same time.")
1884
+ if mode is None and options is None:
1885
+ mode = "default"
1886
+ if backend == "inductor":
1887
+ backend = _TorchCompileInductorWrapper(mode, options, dynamic)
1888
+ else:
1889
+ backend = _TorchCompileWrapper(backend, mode, options, dynamic)
1890
+
1891
+ return torch._dynamo.optimize(backend=backend, nopython=fullgraph, dynamic=dynamic, disable=disable)(model)
1892
+
1893
+
1894
+ from torch import export as export
1895
+
1896
+ from torch._higher_order_ops import cond
1897
+
1898
+ def _register_device_module(device_type, module):
1899
+ r"""Register an external runtime module of the specific :attr:`device_type`
1900
+ supported by torch.
1901
+
1902
+ After the :attr:`module` is registered correctly, the user can refer
1903
+ the external runtime module as part of torch with attribute torch.xxx.
1904
+ """
1905
+ # Make sure the device_type represent a supported device type for torch.
1906
+ device_type = torch.device(device_type).type
1907
+ m = sys.modules[__name__]
1908
+ if hasattr(m, device_type):
1909
+ raise RuntimeError(f"The runtime module of '{device_type}' has already "
1910
+ f"been registered with '{getattr(m, device_type)}'")
1911
+ setattr(m, device_type, module)
1912
+ torch_module_name = '.'.join([__name__, device_type])
1913
+ sys.modules[torch_module_name] = module
1914
+
1915
+ # expose return_types
1916
+ from . import return_types
1917
+ from . import library
1918
+ if not TYPE_CHECKING:
1919
+ from . import _meta_registrations
1920
+
1921
+ # Enable CUDA Sanitizer
1922
+ if 'TORCH_CUDA_SANITIZER' in os.environ:
1923
+ import torch.cuda._sanitizer as csan
1924
+
1925
+ csan.enable_cuda_sanitizer()
1926
+
1927
+ # Populate magic methods on SymInt and SymFloat
1928
+ import torch.fx.experimental.sym_node
1929
+
1930
+ from torch import func as func
1931
+ from torch.func import vmap
1932
+
1933
+
1934
+ # The function _sparse_coo_tensor_unsafe is removed from PyTorch
1935
+ # Python API (v. 1.13), here we temporarily provide its replacement
1936
+ # with a deprecation warning.
1937
+ # TODO: remove the function for PyTorch v 1.15.
1938
+ def _sparse_coo_tensor_unsafe(*args, **kwargs):
1939
+ import warnings
1940
+ warnings.warn('torch._sparse_coo_tensor_unsafe is deprecated, '
1941
+ 'use torch.sparse_coo_tensor(..., check_invariants=False) instead.')
1942
+ kwargs['check_invariants'] = False
1943
+ return torch.sparse_coo_tensor(*args, **kwargs)
1944
+
1945
+ # Register MPS specific decomps
1946
+ torch.backends.mps._init()
1947
+
1948
+ if not _running_with_deploy():
1949
+ from torch import compiler as compiler
1950
+
1951
+ class _TritonLibrary:
1952
+ lib = torch.library.Library("triton", "DEF")
1953
+ ops_table: Dict[Tuple[str, str], Callable] = {}
1954
+
1955
+ @classmethod
1956
+ def registerOp(cls, op_key, full_schema, op_impl, dispatch_key):
1957
+ if (op_key, dispatch_key) not in cls.ops_table:
1958
+ cls.lib.define(full_schema)
1959
+ cls.lib.impl("triton::" + op_key, op_impl, dispatch_key)
1960
+ cls.ops_table[(op_key, dispatch_key)] = op_impl
1961
+
1962
+ return cls.ops_table[(op_key, dispatch_key)]
1963
+
1964
+
1965
+ # Deprecated attributes
1966
+ _deprecated_attrs = {
1967
+ "has_mps": torch.backends.mps.is_built,
1968
+ "has_cuda": torch.backends.cuda.is_built,
1969
+ "has_cudnn": torch.backends.cudnn.is_available,
1970
+ "has_mkldnn": torch.backends.mkldnn.is_available,
1971
+ }
1972
+
1973
+ if TYPE_CHECKING:
1974
+ # Import the following modules during type checking to enable code intelligence features,
1975
+ # such as auto-completion in tools like pylance, even when these modules are not explicitly
1976
+ # imported in user code.
1977
+ from torch import _dynamo as _dynamo
1978
+ from torch import _inductor as _inductor
1979
+ from torch import onnx as onnx
1980
+
1981
+ else:
1982
+ _lazy_modules = {
1983
+ "_dynamo",
1984
+ "_inductor",
1985
+ "_export",
1986
+ # ONNX must be imported after _dynamo, _ops, _subclasses, fx, func and jit
1987
+ "onnx",
1988
+ }
1989
+
1990
+ def __getattr__(name):
1991
+ # Deprecated attrs
1992
+ replacement = _deprecated_attrs.get(name)
1993
+ if replacement is not None:
1994
+ import warnings
1995
+ warnings.warn(f"'{name}' is deprecated, please use '{replacement.__module__}.{replacement.__name__}()'", stacklevel=2)
1996
+ return replacement()
1997
+
1998
+ # Lazy modules
1999
+ if name in _lazy_modules:
2000
+ import importlib
2001
+ return importlib.import_module(f".{name}", __name__)
2002
+
2003
+ raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
2004
+
2005
+
2006
+ def _constrain_as_value(symbol, min: Optional[builtins.int] = None, max: Optional[builtins.int] = None):
2007
+ """
2008
+ Add min/max constraint on the intermediate symbol at tracing time. If called in eager mode,
2009
+ it will still check if the input value is within the specified range.
2010
+ """
2011
+ torch.sym_constrain_range(symbol, min=min, max=max)
2012
+
2013
+
2014
+ def _constrain_as_size(symbol, min: Optional[builtins.int] = None, max: Optional[builtins.int] = None):
2015
+ """
2016
+ This indicates that a given int is size-like, and can be used in any context where a size is expected.
2017
+ You will typically use this when reading out integers from Tensors, e.g., max.item() or lengths.tolist()
2018
+ which then need to be used as tensor constructors. Providing these assertions to PyTorch can help resolve
2019
+ GuardOnDataDependentSymNode errors upon export, since we cannot guard on unbacked SymInts.
2020
+
2021
+ This function has unusual semantics which distinguish it from
2022
+ constrain_as_value. Specifically, in some circumstances in framework
2023
+ code, we will treat this int as >= 2 (when we do a size-oblivious guard).
2024
+ This makes it easier to This makes it easier to use the unbacked int in
2025
+ size contexts, as we will often attempt to guard on a size being zero/one
2026
+ (e.g., when computing the contiguity of a tensor, or testing if
2027
+ broadcasting can occur), which will not work on unbacked SymInts.
2028
+ However, if we conservatively assume that the size is not zero/one, we will
2029
+ end up with a graph that will still work even if the size is zero/one.
2030
+
2031
+ For more details, see https://docs.google.com/document/d/1HSuTTVvYH1pTew89Rtpeu84Ht3nQEFTYhAX3Ypa_xJs/edit
2032
+ ```
2033
+ """
2034
+ torch.sym_constrain_range_for_size(symbol, min=min, max=max)
2035
+
2036
+
2037
+ from . import _logging
2038
+ _logging._init_logs()