applied-ai-018 commited on
Commit
a9484b6
·
verified ·
1 Parent(s): b7e257c

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/accelerate-0.30.0.dist-info/INSTALLER +1 -0
  2. llmeval-env/lib/python3.10/site-packages/accelerate-0.30.0.dist-info/LICENSE +201 -0
  3. llmeval-env/lib/python3.10/site-packages/accelerate-0.30.0.dist-info/METADATA +380 -0
  4. llmeval-env/lib/python3.10/site-packages/accelerate-0.30.0.dist-info/RECORD +163 -0
  5. llmeval-env/lib/python3.10/site-packages/accelerate-0.30.0.dist-info/WHEEL +5 -0
  6. llmeval-env/lib/python3.10/site-packages/accelerate-0.30.0.dist-info/entry_points.txt +5 -0
  7. llmeval-env/lib/python3.10/site-packages/accelerate-0.30.0.dist-info/top_level.txt +1 -0
  8. llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/__init__.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/_identifier.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/async_utils.cpython-310.pyc +0 -0
  11. llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/bccache.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/compiler.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/constants.cpython-310.pyc +0 -0
  14. llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/debug.cpython-310.pyc +0 -0
  15. llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/defaults.cpython-310.pyc +0 -0
  16. llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/environment.cpython-310.pyc +0 -0
  17. llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/exceptions.cpython-310.pyc +0 -0
  18. llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/ext.cpython-310.pyc +0 -0
  19. llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/filters.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/idtracking.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/lexer.cpython-310.pyc +0 -0
  22. llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/loaders.cpython-310.pyc +0 -0
  23. llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/meta.cpython-310.pyc +0 -0
  24. llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/nativetypes.cpython-310.pyc +0 -0
  25. llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/nodes.cpython-310.pyc +0 -0
  26. llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/optimizer.cpython-310.pyc +0 -0
  27. llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/parser.cpython-310.pyc +0 -0
  28. llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/runtime.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/sandbox.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/tests.cpython-310.pyc +0 -0
  31. llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/utils.cpython-310.pyc +0 -0
  32. llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/visitor.cpython-310.pyc +0 -0
  33. llmeval-env/lib/python3.10/site-packages/jinja2/lexer.py +868 -0
  34. llmeval-env/lib/python3.10/site-packages/jinja2/sandbox.py +429 -0
  35. llmeval-env/lib/python3.10/site-packages/lm_eval-0.4.2.dist-info/INSTALLER +1 -0
  36. llmeval-env/lib/python3.10/site-packages/lm_eval-0.4.2.dist-info/LICENSE.md +21 -0
  37. llmeval-env/lib/python3.10/site-packages/lm_eval-0.4.2.dist-info/METADATA +542 -0
  38. llmeval-env/lib/python3.10/site-packages/lm_eval-0.4.2.dist-info/RECORD +14 -0
  39. llmeval-env/lib/python3.10/site-packages/lm_eval-0.4.2.dist-info/REQUESTED +0 -0
  40. llmeval-env/lib/python3.10/site-packages/lm_eval-0.4.2.dist-info/WHEEL +5 -0
  41. llmeval-env/lib/python3.10/site-packages/lm_eval-0.4.2.dist-info/direct_url.json +1 -0
  42. llmeval-env/lib/python3.10/site-packages/lm_eval-0.4.2.dist-info/entry_points.txt +3 -0
  43. llmeval-env/lib/python3.10/site-packages/lm_eval-0.4.2.dist-info/top_level.txt +1 -0
  44. llmeval-env/lib/python3.10/site-packages/networkx/classes/tests/__pycache__/test_digraph_historical.cpython-310.pyc +0 -0
  45. llmeval-env/lib/python3.10/site-packages/networkx/classes/tests/__pycache__/test_function.cpython-310.pyc +0 -0
  46. llmeval-env/lib/python3.10/site-packages/networkx/classes/tests/__pycache__/test_graph_historical.cpython-310.pyc +0 -0
  47. llmeval-env/lib/python3.10/site-packages/networkx/classes/tests/__pycache__/test_multigraph.cpython-310.pyc +0 -0
  48. llmeval-env/lib/python3.10/site-packages/networkx/tests/test_all_random_functions.py +251 -0
  49. llmeval-env/lib/python3.10/site-packages/networkx/tests/test_convert.py +321 -0
  50. llmeval-env/lib/python3.10/site-packages/networkx/tests/test_convert_pandas.py +320 -0
llmeval-env/lib/python3.10/site-packages/accelerate-0.30.0.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
llmeval-env/lib/python3.10/site-packages/accelerate-0.30.0.dist-info/LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
llmeval-env/lib/python3.10/site-packages/accelerate-0.30.0.dist-info/METADATA ADDED
@@ -0,0 +1,380 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: accelerate
3
+ Version: 0.30.0
4
+ Summary: Accelerate
5
+ Home-page: https://github.com/huggingface/accelerate
6
+ Author: The HuggingFace team
7
+ Author-email: [email protected]
8
+ License: Apache
9
+ Keywords: deep learning
10
+ Classifier: Development Status :: 5 - Production/Stable
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: Intended Audience :: Education
13
+ Classifier: Intended Audience :: Science/Research
14
+ Classifier: License :: OSI Approved :: Apache Software License
15
+ Classifier: Operating System :: OS Independent
16
+ Classifier: Programming Language :: Python :: 3
17
+ Classifier: Programming Language :: Python :: 3.8
18
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
19
+ Requires-Python: >=3.8.0
20
+ Description-Content-Type: text/markdown
21
+ License-File: LICENSE
22
+ Requires-Dist: numpy (>=1.17)
23
+ Requires-Dist: packaging (>=20.0)
24
+ Requires-Dist: psutil
25
+ Requires-Dist: pyyaml
26
+ Requires-Dist: torch (>=1.10.0)
27
+ Requires-Dist: huggingface-hub
28
+ Requires-Dist: safetensors (>=0.3.1)
29
+ Provides-Extra: deepspeed
30
+ Requires-Dist: deepspeed (<=0.14.0) ; extra == 'deepspeed'
31
+ Provides-Extra: dev
32
+ Requires-Dist: black (~=23.1) ; extra == 'dev'
33
+ Requires-Dist: hf-doc-builder (>=0.3.0) ; extra == 'dev'
34
+ Requires-Dist: ruff (~=0.2.1) ; extra == 'dev'
35
+ Requires-Dist: pytest (<=8.0.0,>=7.2.0) ; extra == 'dev'
36
+ Requires-Dist: pytest-xdist ; extra == 'dev'
37
+ Requires-Dist: pytest-subtests ; extra == 'dev'
38
+ Requires-Dist: parameterized ; extra == 'dev'
39
+ Requires-Dist: datasets ; extra == 'dev'
40
+ Requires-Dist: diffusers ; extra == 'dev'
41
+ Requires-Dist: evaluate ; extra == 'dev'
42
+ Requires-Dist: torchpippy (>=0.2.0) ; extra == 'dev'
43
+ Requires-Dist: transformers ; extra == 'dev'
44
+ Requires-Dist: scipy ; extra == 'dev'
45
+ Requires-Dist: scikit-learn ; extra == 'dev'
46
+ Requires-Dist: tqdm ; extra == 'dev'
47
+ Requires-Dist: bitsandbytes ; extra == 'dev'
48
+ Requires-Dist: timm ; extra == 'dev'
49
+ Requires-Dist: rich ; extra == 'dev'
50
+ Provides-Extra: docs
51
+ Provides-Extra: quality
52
+ Requires-Dist: black (~=23.1) ; extra == 'quality'
53
+ Requires-Dist: hf-doc-builder (>=0.3.0) ; extra == 'quality'
54
+ Requires-Dist: ruff (~=0.2.1) ; extra == 'quality'
55
+ Provides-Extra: rich
56
+ Requires-Dist: rich ; extra == 'rich'
57
+ Provides-Extra: sagemaker
58
+ Requires-Dist: sagemaker ; extra == 'sagemaker'
59
+ Provides-Extra: test_dev
60
+ Requires-Dist: datasets ; extra == 'test_dev'
61
+ Requires-Dist: diffusers ; extra == 'test_dev'
62
+ Requires-Dist: evaluate ; extra == 'test_dev'
63
+ Requires-Dist: torchpippy (>=0.2.0) ; extra == 'test_dev'
64
+ Requires-Dist: transformers ; extra == 'test_dev'
65
+ Requires-Dist: scipy ; extra == 'test_dev'
66
+ Requires-Dist: scikit-learn ; extra == 'test_dev'
67
+ Requires-Dist: tqdm ; extra == 'test_dev'
68
+ Requires-Dist: bitsandbytes ; extra == 'test_dev'
69
+ Requires-Dist: timm ; extra == 'test_dev'
70
+ Provides-Extra: test_prod
71
+ Requires-Dist: pytest (<=8.0.0,>=7.2.0) ; extra == 'test_prod'
72
+ Requires-Dist: pytest-xdist ; extra == 'test_prod'
73
+ Requires-Dist: pytest-subtests ; extra == 'test_prod'
74
+ Requires-Dist: parameterized ; extra == 'test_prod'
75
+ Provides-Extra: test_trackers
76
+ Requires-Dist: wandb ; extra == 'test_trackers'
77
+ Requires-Dist: comet-ml ; extra == 'test_trackers'
78
+ Requires-Dist: tensorboard ; extra == 'test_trackers'
79
+ Requires-Dist: dvclive ; extra == 'test_trackers'
80
+ Provides-Extra: testing
81
+ Requires-Dist: pytest (<=8.0.0,>=7.2.0) ; extra == 'testing'
82
+ Requires-Dist: pytest-xdist ; extra == 'testing'
83
+ Requires-Dist: pytest-subtests ; extra == 'testing'
84
+ Requires-Dist: parameterized ; extra == 'testing'
85
+ Requires-Dist: datasets ; extra == 'testing'
86
+ Requires-Dist: diffusers ; extra == 'testing'
87
+ Requires-Dist: evaluate ; extra == 'testing'
88
+ Requires-Dist: torchpippy (>=0.2.0) ; extra == 'testing'
89
+ Requires-Dist: transformers ; extra == 'testing'
90
+ Requires-Dist: scipy ; extra == 'testing'
91
+ Requires-Dist: scikit-learn ; extra == 'testing'
92
+ Requires-Dist: tqdm ; extra == 'testing'
93
+ Requires-Dist: bitsandbytes ; extra == 'testing'
94
+ Requires-Dist: timm ; extra == 'testing'
95
+
96
+ <!---
97
+ Copyright 2021 The HuggingFace Team. All rights reserved.
98
+
99
+ Licensed under the Apache License, Version 2.0 (the "License");
100
+ you may not use this file except in compliance with the License.
101
+ You may obtain a copy of the License at
102
+
103
+ http://www.apache.org/licenses/LICENSE-2.0
104
+
105
+ Unless required by applicable law or agreed to in writing, software
106
+ distributed under the License is distributed on an "AS IS" BASIS,
107
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
108
+ See the License for the specific language governing permissions and
109
+ limitations under the License.
110
+ -->
111
+
112
+ <p align="center">
113
+ <br>
114
+ <img src="https://raw.githubusercontent.com/huggingface/accelerate/main/docs/source/imgs/accelerate_logo.png" width="400"/>
115
+ <br>
116
+ <p>
117
+
118
+ <p align="center">
119
+ <!-- Uncomment when CircleCI is set up
120
+ <a href="https://circleci.com/gh/huggingface/accelerate">
121
+ <img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/master">
122
+ </a>
123
+ -->
124
+ <a href="https://github.com/huggingface/accelerate/blob/main/LICENSE">
125
+ <img alt="License" src="https://img.shields.io/github/license/huggingface/accelerate.svg?color=blue">
126
+ </a>
127
+ <a href="https://huggingface.co/docs/accelerate/index.html">
128
+ <img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/accelerate/index.html.svg?down_color=red&down_message=offline&up_message=online">
129
+ </a>
130
+ <a href="https://github.com/huggingface/accelerate/releases">
131
+ <img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/accelerate.svg">
132
+ </a>
133
+ <a href="https://github.com/huggingface/accelerate/blob/main/CODE_OF_CONDUCT.md">
134
+ <img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg">
135
+ </a>
136
+ </p>
137
+
138
+ <h3 align="center">
139
+ <p>Run your *raw* PyTorch training script on any kind of device
140
+ </h3>
141
+
142
+ <h3 align="center">
143
+ <a href="https://hf.co/course"><img src="https://raw.githubusercontent.com/huggingface/accelerate/main/docs/source/imgs/course_banner.png"></a>
144
+ </h3>
145
+
146
+ ## Easy to integrate
147
+
148
+ 🤗 Accelerate was created for PyTorch users who like to write the training loop of PyTorch models but are reluctant to write and maintain the boilerplate code needed to use multi-GPUs/TPU/fp16.
149
+
150
+ 🤗 Accelerate abstracts exactly and only the boilerplate code related to multi-GPUs/TPU/fp16 and leaves the rest of your code unchanged.
151
+
152
+ Here is an example:
153
+
154
+ ```diff
155
+ import torch
156
+ import torch.nn.functional as F
157
+ from datasets import load_dataset
158
+ + from accelerate import Accelerator
159
+
160
+ + accelerator = Accelerator()
161
+ - device = 'cpu'
162
+ + device = accelerator.device
163
+
164
+ model = torch.nn.Transformer().to(device)
165
+ optimizer = torch.optim.Adam(model.parameters())
166
+
167
+ dataset = load_dataset('my_dataset')
168
+ data = torch.utils.data.DataLoader(dataset, shuffle=True)
169
+
170
+ + model, optimizer, data = accelerator.prepare(model, optimizer, data)
171
+
172
+ model.train()
173
+ for epoch in range(10):
174
+ for source, targets in data:
175
+ source = source.to(device)
176
+ targets = targets.to(device)
177
+
178
+ optimizer.zero_grad()
179
+
180
+ output = model(source)
181
+ loss = F.cross_entropy(output, targets)
182
+
183
+ - loss.backward()
184
+ + accelerator.backward(loss)
185
+
186
+ optimizer.step()
187
+ ```
188
+
189
+ As you can see in this example, by adding 5-lines to any standard PyTorch training script you can now run on any kind of single or distributed node setting (single CPU, single GPU, multi-GPUs and TPUs) as well as with or without mixed precision (fp8, fp16, bf16).
190
+
191
+ In particular, the same code can then be run without modification on your local machine for debugging or your training environment.
192
+
193
+ 🤗 Accelerate even handles the device placement for you (which requires a few more changes to your code, but is safer in general), so you can even simplify your training loop further:
194
+
195
+ ```diff
196
+ import torch
197
+ import torch.nn.functional as F
198
+ from datasets import load_dataset
199
+ + from accelerate import Accelerator
200
+
201
+ - device = 'cpu'
202
+ + accelerator = Accelerator()
203
+
204
+ - model = torch.nn.Transformer().to(device)
205
+ + model = torch.nn.Transformer()
206
+ optimizer = torch.optim.Adam(model.parameters())
207
+
208
+ dataset = load_dataset('my_dataset')
209
+ data = torch.utils.data.DataLoader(dataset, shuffle=True)
210
+
211
+ + model, optimizer, data = accelerator.prepare(model, optimizer, data)
212
+
213
+ model.train()
214
+ for epoch in range(10):
215
+ for source, targets in data:
216
+ - source = source.to(device)
217
+ - targets = targets.to(device)
218
+
219
+ optimizer.zero_grad()
220
+
221
+ output = model(source)
222
+ loss = F.cross_entropy(output, targets)
223
+
224
+ - loss.backward()
225
+ + accelerator.backward(loss)
226
+
227
+ optimizer.step()
228
+ ```
229
+
230
+ Want to learn more? Check out the [documentation](https://huggingface.co/docs/accelerate) or have a look at our [examples](https://github.com/huggingface/accelerate/tree/main/examples).
231
+
232
+ ## Launching script
233
+
234
+ 🤗 Accelerate also provides an optional CLI tool that allows you to quickly configure and test your training environment before launching the scripts. No need to remember how to use `torch.distributed.run` or to write a specific launcher for TPU training!
235
+ On your machine(s) just run:
236
+
237
+ ```bash
238
+ accelerate config
239
+ ```
240
+
241
+ and answer the questions asked. This will generate a config file that will be used automatically to properly set the default options when doing
242
+
243
+ ```bash
244
+ accelerate launch my_script.py --args_to_my_script
245
+ ```
246
+
247
+ For instance, here is how you would run the GLUE example on the MRPC task (from the root of the repo):
248
+
249
+ ```bash
250
+ accelerate launch examples/nlp_example.py
251
+ ```
252
+
253
+ This CLI tool is **optional**, and you can still use `python my_script.py` or `python -m torchrun my_script.py` at your convenience.
254
+
255
+ You can also directly pass in the arguments you would to `torchrun` as arguments to `accelerate launch` if you wish to not run` accelerate config`.
256
+
257
+ For example, here is how to launch on two GPUs:
258
+
259
+ ```bash
260
+ accelerate launch --multi_gpu --num_processes 2 examples/nlp_example.py
261
+ ```
262
+
263
+ To learn more, check the CLI documentation available [here](https://huggingface.co/docs/accelerate/package_reference/cli).
264
+
265
+ ## Launching multi-CPU run using MPI
266
+
267
+ 🤗 Here is another way to launch multi-CPU run using MPI. You can learn how to install Open MPI on [this page](https://www.open-mpi.org/faq/?category=building#easy-build). You can use Intel MPI or MVAPICH as well.
268
+ Once you have MPI setup on your cluster, just run:
269
+ ```bash
270
+ accelerate config
271
+ ```
272
+ Answer the questions that are asked, selecting to run using multi-CPU, and answer "yes" when asked if you want accelerate to launch mpirun.
273
+ Then, use `accelerate launch` with your script like:
274
+ ```bash
275
+ accelerate launch examples/nlp_example.py
276
+ ```
277
+ Alternatively, you can use mpirun directly, without using the CLI like:
278
+ ```bash
279
+ mpirun -np 2 python examples/nlp_example.py
280
+ ```
281
+
282
+ ## Launching training using DeepSpeed
283
+
284
+ 🤗 Accelerate supports training on single/multiple GPUs using DeepSpeed. To use it, you don't need to change anything in your training code; you can set everything using just `accelerate config`. However, if you desire to tweak your DeepSpeed related args from your Python script, we provide you the `DeepSpeedPlugin`.
285
+
286
+ ```python
287
+ from accelerate import Accelerator, DeepSpeedPlugin
288
+
289
+ # deepspeed needs to know your gradient accumulation steps beforehand, so don't forget to pass it
290
+ # Remember you still need to do gradient accumulation by yourself, just like you would have done without deepspeed
291
+ deepspeed_plugin = DeepSpeedPlugin(zero_stage=2, gradient_accumulation_steps=2)
292
+ accelerator = Accelerator(mixed_precision='fp16', deepspeed_plugin=deepspeed_plugin)
293
+
294
+ # How to save your 🤗 Transformer?
295
+ accelerator.wait_for_everyone()
296
+ unwrapped_model = accelerator.unwrap_model(model)
297
+ unwrapped_model.save_pretrained(save_dir, save_function=accelerator.save, state_dict=accelerator.get_state_dict(model))
298
+ ```
299
+
300
+ Note: DeepSpeed support is experimental for now. In case you get into some problem, please open an issue.
301
+
302
+ ## Launching your training from a notebook
303
+
304
+ 🤗 Accelerate also provides a `notebook_launcher` function you can use in a notebook to launch a distributed training. This is especially useful for Colab or Kaggle notebooks with a TPU backend. Just define your training loop in a `training_function` then in your last cell, add:
305
+
306
+ ```python
307
+ from accelerate import notebook_launcher
308
+
309
+ notebook_launcher(training_function)
310
+ ```
311
+
312
+ An example can be found in [this notebook](https://github.com/huggingface/notebooks/blob/main/examples/accelerate_examples/simple_nlp_example.ipynb). [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/accelerate_examples/simple_nlp_example.ipynb)
313
+
314
+ ## Why should I use 🤗 Accelerate?
315
+
316
+ You should use 🤗 Accelerate when you want to easily run your training scripts in a distributed environment without having to renounce full control over your training loop. This is not a high-level framework above PyTorch, just a thin wrapper so you don't have to learn a new library. In fact, the whole API of 🤗 Accelerate is in one class, the `Accelerator` object.
317
+
318
+ ## Why shouldn't I use 🤗 Accelerate?
319
+
320
+ You shouldn't use 🤗 Accelerate if you don't want to write a training loop yourself. There are plenty of high-level libraries above PyTorch that will offer you that, 🤗 Accelerate is not one of them.
321
+
322
+ ## Frameworks using 🤗 Accelerate
323
+
324
+ If you like the simplicity of 🤗 Accelerate but would prefer a higher-level abstraction around its capabilities, some frameworks and libraries that are built on top of 🤗 Accelerate are listed below:
325
+
326
+ * [Amphion](https://github.com/open-mmlab/Amphion) is a toolkit for Audio, Music, and Speech Generation. Its purpose is to support reproducible research and help junior researchers and engineers get started in the field of audio, music, and speech generation research and development.
327
+ * [Animus](https://github.com/Scitator/animus) is a minimalistic framework to run machine learning experiments. Animus highlights common "breakpoints" in ML experiments and provides a unified interface for them within [IExperiment](https://github.com/Scitator/animus/blob/main/animus/core.py#L76).
328
+ * [Catalyst](https://github.com/catalyst-team/catalyst#getting-started) is a PyTorch framework for Deep Learning Research and Development. It focuses on reproducibility, rapid experimentation, and codebase reuse so you can create something new rather than write yet another train loop. Catalyst provides a [Runner](https://catalyst-team.github.io/catalyst/api/core.html#runner) to connect all parts of the experiment: hardware backend, data transformations, model training, and inference logic.
329
+ * [fastai](https://github.com/fastai/fastai#installing) is a PyTorch framework for Deep Learning that simplifies training fast and accurate neural nets using modern best practices. fastai provides a [Learner](https://docs.fast.ai/learner.html#Learner) to handle the training, fine-tuning, and inference of deep learning algorithms.
330
+ * [Finetuner](https://github.com/jina-ai/finetuner) is a service that enables models to create higher-quality embeddings for semantic search, visual similarity search, cross-modal text<->image search, recommendation systems, clustering, duplication detection, anomaly detection, or other uses.
331
+ * [InvokeAI](https://github.com/invoke-ai/InvokeAI) is a creative engine for Stable Diffusion models, offering industry-leading WebUI, terminal usage support, and serves as the foundation for many commercial products.
332
+ * [Kornia](https://kornia.readthedocs.io/en/latest/get-started/introduction.html) is a differentiable library that allows classical computer vision to be integrated into deep learning models. Kornia provides a [Trainer](https://kornia.readthedocs.io/en/latest/x.html#kornia.x.Trainer) with the specific purpose to train and fine-tune the supported deep learning algorithms within the library.
333
+ * [Open Assistant](https://projects.laion.ai/Open-Assistant/) is a chat-based assistant that understands tasks, can interact with their party systems, and retrieve information dynamically to do so.
334
+ * [pytorch-accelerated](https://github.com/Chris-hughes10/pytorch-accelerated) is a lightweight training library, with a streamlined feature set centered around a general-purpose [Trainer](https://pytorch-accelerated.readthedocs.io/en/latest/trainer.html), that places a huge emphasis on simplicity and transparency; enabling users to understand exactly what is going on under the hood, but without having to write and maintain the boilerplate themselves!
335
+ * [Stable Diffusion web UI](https://github.com/AUTOMATIC1111/stable-diffusion-webui) is an open-source browser-based easy-to-use interface based on the Gradio library for Stable Diffusion.
336
+ * [torchkeras](https://github.com/lyhue1991/torchkeras) is a simple tool for training pytorch model just in a keras style, a dynamic and beautiful plot is provided in notebook to monitor your loss or metric.
337
+ * [transformers](https://github.com/huggingface/transformers) as a tool for helping train state-of-the-art machine learning models in PyTorch, Tensorflow, and JAX. (Accelerate is the backend for the PyTorch side).
338
+
339
+
340
+ ## Installation
341
+
342
+ This repository is tested on Python 3.8+ and PyTorch 1.10.0+
343
+
344
+ You should install 🤗 Accelerate in a [virtual environment](https://docs.python.org/3/library/venv.html). If you're unfamiliar with Python virtual environments, check out the [user guide](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/).
345
+
346
+ First, create a virtual environment with the version of Python you're going to use and activate it.
347
+
348
+ Then, you will need to install PyTorch: refer to the [official installation page](https://pytorch.org/get-started/locally/#start-locally) regarding the specific install command for your platform. Then 🤗 Accelerate can be installed using pip as follows:
349
+
350
+ ```bash
351
+ pip install accelerate
352
+ ```
353
+
354
+ ## Supported integrations
355
+
356
+ - CPU only
357
+ - multi-CPU on one node (machine)
358
+ - multi-CPU on several nodes (machines)
359
+ - single GPU
360
+ - multi-GPU on one node (machine)
361
+ - multi-GPU on several nodes (machines)
362
+ - TPU
363
+ - FP16/BFloat16 mixed precision
364
+ - FP8 mixed precision with [Transformer Engine](https://github.com/NVIDIA/TransformerEngine)
365
+ - DeepSpeed support (Experimental)
366
+ - PyTorch Fully Sharded Data Parallel (FSDP) support (Experimental)
367
+ - Megatron-LM support (Experimental)
368
+
369
+ ## Citing 🤗 Accelerate
370
+
371
+ If you use 🤗 Accelerate in your publication, please cite it by using the following BibTeX entry.
372
+
373
+ ```bibtex
374
+ @Misc{accelerate,
375
+ title = {Accelerate: Training and inference at scale made simple, efficient and adaptable.},
376
+ author = {Sylvain Gugger and Lysandre Debut and Thomas Wolf and Philipp Schmid and Zachary Mueller and Sourab Mangrulkar and Marc Sun and Benjamin Bossan},
377
+ howpublished = {\url{https://github.com/huggingface/accelerate}},
378
+ year = {2022}
379
+ }
380
+ ```
llmeval-env/lib/python3.10/site-packages/accelerate-0.30.0.dist-info/RECORD ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ../../../bin/accelerate,sha256=SeUzAx6R_i7Vlqe6VF3ZS81N9QFbgeIfhICh4mwLdAY,267
2
+ ../../../bin/accelerate-config,sha256=xEx_hJ25Ts8lIRpJ8uolPlJ2DJ7CJ0jp8pvJDxaC7Eo,259
3
+ ../../../bin/accelerate-estimate-memory,sha256=aJhKtEg2uP21UE5dKl89t0aBTaonT_yZmDo1HOb3XLo,261
4
+ ../../../bin/accelerate-launch,sha256=ZN1YBjUMnzCsnepNVaGugrohZN8fsJ4bqY_bToNONZs,259
5
+ accelerate-0.30.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
6
+ accelerate-0.30.0.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
7
+ accelerate-0.30.0.dist-info/METADATA,sha256=07aKAe1JqEYKHqmoOLUE8-bGQt9K9TRvWgLZvIfKIHs,19027
8
+ accelerate-0.30.0.dist-info/RECORD,,
9
+ accelerate-0.30.0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
10
+ accelerate-0.30.0.dist-info/entry_points.txt,sha256=Z_KV59tIt4oZtUDEQ0w8JThJ6_1dd8vR8heH24DeAXI,238
11
+ accelerate-0.30.0.dist-info/top_level.txt,sha256=esVfdxTidsjQ90zsN_rPpjLFJ4ijRlx4mnLrG09hlt4,11
12
+ accelerate/__init__.py,sha256=huw8mydS-iGDZnEB5shtW0j9CXHcrxX1NwL_ik0W7gk,1456
13
+ accelerate/__pycache__/__init__.cpython-310.pyc,,
14
+ accelerate/__pycache__/accelerator.cpython-310.pyc,,
15
+ accelerate/__pycache__/big_modeling.cpython-310.pyc,,
16
+ accelerate/__pycache__/checkpointing.cpython-310.pyc,,
17
+ accelerate/__pycache__/data_loader.cpython-310.pyc,,
18
+ accelerate/__pycache__/hooks.cpython-310.pyc,,
19
+ accelerate/__pycache__/inference.cpython-310.pyc,,
20
+ accelerate/__pycache__/launchers.cpython-310.pyc,,
21
+ accelerate/__pycache__/local_sgd.cpython-310.pyc,,
22
+ accelerate/__pycache__/logging.cpython-310.pyc,,
23
+ accelerate/__pycache__/memory_utils.cpython-310.pyc,,
24
+ accelerate/__pycache__/optimizer.cpython-310.pyc,,
25
+ accelerate/__pycache__/scheduler.cpython-310.pyc,,
26
+ accelerate/__pycache__/state.cpython-310.pyc,,
27
+ accelerate/__pycache__/tracking.cpython-310.pyc,,
28
+ accelerate/accelerator.py,sha256=gaHqlGdIH1RnRATSEC6ltGyzzAyfpviDdJDnkLUKCto,151402
29
+ accelerate/big_modeling.py,sha256=pmtLTKTf8mJK1E2o51E3H5TBAuw_zLX_7pWtogtbP1w,29278
30
+ accelerate/checkpointing.py,sha256=0IMj7BMLXOozDnPCI7t32RVYB0867zsCoTFY8ZkxuKQ,11377
31
+ accelerate/commands/__init__.py,sha256=m1PPTDT4ziIAvM0-FDSgIMIZ69Konn126s6LwuzH6v8,606
32
+ accelerate/commands/__pycache__/__init__.cpython-310.pyc,,
33
+ accelerate/commands/__pycache__/accelerate_cli.cpython-310.pyc,,
34
+ accelerate/commands/__pycache__/env.cpython-310.pyc,,
35
+ accelerate/commands/__pycache__/estimate.cpython-310.pyc,,
36
+ accelerate/commands/__pycache__/launch.cpython-310.pyc,,
37
+ accelerate/commands/__pycache__/test.cpython-310.pyc,,
38
+ accelerate/commands/__pycache__/tpu.cpython-310.pyc,,
39
+ accelerate/commands/__pycache__/utils.cpython-310.pyc,,
40
+ accelerate/commands/accelerate_cli.py,sha256=i3nge5Wj8i4zkV0CVIk9P8veleRZbTZY0AU4fJOrKF8,1749
41
+ accelerate/commands/config/__init__.py,sha256=iJK8dgj3pc5Vdr1E7UuGoFu-BlybyXLxYDoTg9gXngE,1645
42
+ accelerate/commands/config/__pycache__/__init__.cpython-310.pyc,,
43
+ accelerate/commands/config/__pycache__/cluster.cpython-310.pyc,,
44
+ accelerate/commands/config/__pycache__/config.cpython-310.pyc,,
45
+ accelerate/commands/config/__pycache__/config_args.cpython-310.pyc,,
46
+ accelerate/commands/config/__pycache__/config_utils.cpython-310.pyc,,
47
+ accelerate/commands/config/__pycache__/default.cpython-310.pyc,,
48
+ accelerate/commands/config/__pycache__/sagemaker.cpython-310.pyc,,
49
+ accelerate/commands/config/__pycache__/update.cpython-310.pyc,,
50
+ accelerate/commands/config/cluster.py,sha256=5mr0ixcfWaiI8P7VwHJUkqKFkQZ9dv49ep_1DbyHvUo,30567
51
+ accelerate/commands/config/config.py,sha256=FuRlQvOjgATEtyqOSsGD-KEtOCvACOHjs2C-krrtldk,3035
52
+ accelerate/commands/config/config_args.py,sha256=hE42coVnn0UU-ysqp2ZH-jlqaXoPaHt5E_3qxT42GIM,10024
53
+ accelerate/commands/config/config_utils.py,sha256=DcjIV1mDInFmct2_XQ-9KYAkREINs6YuHRbZe5HFjT8,2926
54
+ accelerate/commands/config/default.py,sha256=3-SdEhl_zXM9S3f-FxkSVtiBQ5VY-QNsC4O26u60bss,5350
55
+ accelerate/commands/config/sagemaker.py,sha256=GjHE2-h4tRr1P_PFtMF3miiAtJlzkbHbMb6kFXqn8eo,10341
56
+ accelerate/commands/config/update.py,sha256=NXW1J7GkUHpg71QlIXsmMB_0z8S8IZo2FWax5POwrhc,2395
57
+ accelerate/commands/env.py,sha256=J4Gz8wQUUvkzvmy2SNtAIMaE042aGaaBuHBFRU1DSdk,3670
58
+ accelerate/commands/estimate.py,sha256=shEn2nXyHmz94zpAzV2R8__lcNYW9f9djl7bOHoo04k,12398
59
+ accelerate/commands/launch.py,sha256=7o5M1NygwkkWk33bu08eVREI6gzgVdBE7_GysFLmPkM,41813
60
+ accelerate/commands/menu/__init__.py,sha256=uqSlBM0TFHBwzdv3p3SXfpAk1lZFp4h1a7mbBdscPHs,645
61
+ accelerate/commands/menu/__pycache__/__init__.cpython-310.pyc,,
62
+ accelerate/commands/menu/__pycache__/cursor.cpython-310.pyc,,
63
+ accelerate/commands/menu/__pycache__/helpers.cpython-310.pyc,,
64
+ accelerate/commands/menu/__pycache__/input.cpython-310.pyc,,
65
+ accelerate/commands/menu/__pycache__/keymap.cpython-310.pyc,,
66
+ accelerate/commands/menu/__pycache__/selection_menu.cpython-310.pyc,,
67
+ accelerate/commands/menu/cursor.py,sha256=-lmpJVAzvNc0c3EOtSuLoKB59zqylVCbYyWLPnrOmvQ,2028
68
+ accelerate/commands/menu/helpers.py,sha256=KrSB5fJjH4MUEUAQJ6bYaN16AYcnl9UalDrPD3DYeeg,1483
69
+ accelerate/commands/menu/input.py,sha256=Uj9eDp8-Mb0Fe49nuogqo9W_RCfYd6udfjiPKx7Wjmg,2537
70
+ accelerate/commands/menu/keymap.py,sha256=eXj-suyYs1m5dEHoUKN4mKAMLc8DWHnwhP6G6JSU0jQ,4086
71
+ accelerate/commands/menu/selection_menu.py,sha256=bxy-DHaKKC6SCToOlMBv5_z0MdUzylEg6Sio9OuV3GM,4921
72
+ accelerate/commands/test.py,sha256=YrPYEaAACOGZ6btn2MV6NbMSEdBUcMWADLbQWaZSHtk,2149
73
+ accelerate/commands/tpu.py,sha256=KyxDP7IuveidZrbW4rx2s8Ku3o_ptI6tzwr_R7ck0os,5548
74
+ accelerate/commands/utils.py,sha256=ilcfE32oHh28EToM00nc_SR6upfZiuxUI0AjjZu8KYY,3995
75
+ accelerate/data_loader.py,sha256=xZUGfnmUQF6Duf5vPQ6XX7IDS4YtqkpxmhE78LcH1Zg,50316
76
+ accelerate/hooks.py,sha256=x0FBwwoy6PKSwulavYTpc4gERIoB7RHGPF0Qe6qjXNA,31244
77
+ accelerate/inference.py,sha256=Ci7kkw2cocNpuvmbo1ytW2QgcI_HKWoXkIdonFOr0tg,7977
78
+ accelerate/launchers.py,sha256=iFDZ7seDdRwHAHy1BbVPmPccAONiPdV2aBOHNuT2ZD8,11375
79
+ accelerate/local_sgd.py,sha256=v0-AxldUSCYCI-rqjLiEHsVtSqyEIWTC5ppn7CW7qfY,4002
80
+ accelerate/logging.py,sha256=kvUvk33r_7T2BNzIwqRZBOhuC-50Ju4rm4HbsM6h2G8,4897
81
+ accelerate/memory_utils.py,sha256=3R5LoeHl6GgTZ-IMPrDZMdaEehWarGdPqODushb-6pg,862
82
+ accelerate/optimizer.py,sha256=vpEUhlmbh68ut7DPtTNRoUNcarI1aO58c_qJ0BYQKxc,8071
83
+ accelerate/scheduler.py,sha256=des_4M_Tt1W8gCYZZbLla0GHBEgJY3Wx2EGBQPTzeiY,4238
84
+ accelerate/state.py,sha256=LdcOLeVPgTHUNIyy9P7hopoUIOKxfM83ujCThEQWowo,50362
85
+ accelerate/test_utils/__init__.py,sha256=V_ndBKNVKT7Hs_Kn0_DQtHkBt2IOk21k7ntjx5YGC3U,1484
86
+ accelerate/test_utils/__pycache__/__init__.cpython-310.pyc,,
87
+ accelerate/test_utils/__pycache__/examples.cpython-310.pyc,,
88
+ accelerate/test_utils/__pycache__/testing.cpython-310.pyc,,
89
+ accelerate/test_utils/__pycache__/training.cpython-310.pyc,,
90
+ accelerate/test_utils/examples.py,sha256=jRm1S9TkmeoLaqprBvtVFN4LesiaDZtKMNIoLNY2euw,7281
91
+ accelerate/test_utils/scripts/__init__.py,sha256=m1PPTDT4ziIAvM0-FDSgIMIZ69Konn126s6LwuzH6v8,606
92
+ accelerate/test_utils/scripts/__pycache__/__init__.cpython-310.pyc,,
93
+ accelerate/test_utils/scripts/__pycache__/test_cli.cpython-310.pyc,,
94
+ accelerate/test_utils/scripts/__pycache__/test_distributed_data_loop.cpython-310.pyc,,
95
+ accelerate/test_utils/scripts/__pycache__/test_notebook.cpython-310.pyc,,
96
+ accelerate/test_utils/scripts/__pycache__/test_ops.cpython-310.pyc,,
97
+ accelerate/test_utils/scripts/__pycache__/test_script.cpython-310.pyc,,
98
+ accelerate/test_utils/scripts/__pycache__/test_sync.cpython-310.pyc,,
99
+ accelerate/test_utils/scripts/external_deps/__init__.py,sha256=m1PPTDT4ziIAvM0-FDSgIMIZ69Konn126s6LwuzH6v8,606
100
+ accelerate/test_utils/scripts/external_deps/__pycache__/__init__.cpython-310.pyc,,
101
+ accelerate/test_utils/scripts/external_deps/__pycache__/test_checkpointing.cpython-310.pyc,,
102
+ accelerate/test_utils/scripts/external_deps/__pycache__/test_metrics.cpython-310.pyc,,
103
+ accelerate/test_utils/scripts/external_deps/__pycache__/test_peak_memory_usage.cpython-310.pyc,,
104
+ accelerate/test_utils/scripts/external_deps/__pycache__/test_performance.cpython-310.pyc,,
105
+ accelerate/test_utils/scripts/external_deps/__pycache__/test_pippy.cpython-310.pyc,,
106
+ accelerate/test_utils/scripts/external_deps/__pycache__/test_zero3_integration.cpython-310.pyc,,
107
+ accelerate/test_utils/scripts/external_deps/test_checkpointing.py,sha256=zILzHevzqxB1NPPDrJ1furaitI8MTvhBeG9QzzL0bmE,10668
108
+ accelerate/test_utils/scripts/external_deps/test_metrics.py,sha256=67-S1qeCpCL9ceaH22RsIsBJscMS7VQWaO4Krcszzbw,12133
109
+ accelerate/test_utils/scripts/external_deps/test_peak_memory_usage.py,sha256=D0YnKCxkI4ZwDOmZ5Ev6hL9jPyP7SU4WffpVFiK14bs,11072
110
+ accelerate/test_utils/scripts/external_deps/test_performance.py,sha256=8fV3wCM1H9HVRRyC5C4EGWt-9aHILX_y3-E7LfSiv7M,9803
111
+ accelerate/test_utils/scripts/external_deps/test_pippy.py,sha256=RdMoD1rlLKMyjyl0soSqR3iDbGidS6-z5GHo3bJUOw8,4647
112
+ accelerate/test_utils/scripts/external_deps/test_zero3_integration.py,sha256=bJ0Jio-6OCyS2FIgFmZi3duqG1gbkOoTEcHsrORYIL4,1503
113
+ accelerate/test_utils/scripts/test_cli.py,sha256=qfk1aYFtdvYFCYPkl05602SNGvk08QTv0xZVVcFVtzM,833
114
+ accelerate/test_utils/scripts/test_distributed_data_loop.py,sha256=NIap96XXauEV5sTZXpRj_u85BX4C6Xz1g56pl5Keitk,10714
115
+ accelerate/test_utils/scripts/test_notebook.py,sha256=Q4OOWHa_GMmzwfiq71BTpKYmhCHLC02J42OO94ut9xk,1629
116
+ accelerate/test_utils/scripts/test_ops.py,sha256=BcGn3xJT2wUJ0Yk_6VLNkneSv9z24JeAoQjsgdIIRr4,6170
117
+ accelerate/test_utils/scripts/test_script.py,sha256=pSIoH6bydbigJd1ZzukKaHjW-7p8Q_eagHMb1KDtXhA,32126
118
+ accelerate/test_utils/scripts/test_sync.py,sha256=3kltq-GuUjOVuo6_FOuWiPyc5f3pGiqiwEAbex5x_-o,18263
119
+ accelerate/test_utils/testing.py,sha256=Ayov8y0kstrapwfyL5M8u8p_UNw8ADJpZDYAC_6uwQ8,21164
120
+ accelerate/test_utils/training.py,sha256=8k_YAQ21MzUdb2aFWq1t2fihW1b-iBGh1OJSL3whY68,4019
121
+ accelerate/tracking.py,sha256=WLY-H1DTsxrz4BVzle7QZMp0Irg84yFMbA1e6JaY3pM,39789
122
+ accelerate/utils/__init__.py,sha256=YMWXC1TOReikTo2FVSEi142n2TNP-QXr7soxDQeFmB8,6174
123
+ accelerate/utils/__pycache__/__init__.cpython-310.pyc,,
124
+ accelerate/utils/__pycache__/bnb.cpython-310.pyc,,
125
+ accelerate/utils/__pycache__/constants.cpython-310.pyc,,
126
+ accelerate/utils/__pycache__/dataclasses.cpython-310.pyc,,
127
+ accelerate/utils/__pycache__/deepspeed.cpython-310.pyc,,
128
+ accelerate/utils/__pycache__/environment.cpython-310.pyc,,
129
+ accelerate/utils/__pycache__/fsdp_utils.cpython-310.pyc,,
130
+ accelerate/utils/__pycache__/imports.cpython-310.pyc,,
131
+ accelerate/utils/__pycache__/launch.cpython-310.pyc,,
132
+ accelerate/utils/__pycache__/megatron_lm.cpython-310.pyc,,
133
+ accelerate/utils/__pycache__/memory.cpython-310.pyc,,
134
+ accelerate/utils/__pycache__/modeling.cpython-310.pyc,,
135
+ accelerate/utils/__pycache__/offload.cpython-310.pyc,,
136
+ accelerate/utils/__pycache__/operations.cpython-310.pyc,,
137
+ accelerate/utils/__pycache__/other.cpython-310.pyc,,
138
+ accelerate/utils/__pycache__/random.cpython-310.pyc,,
139
+ accelerate/utils/__pycache__/rich.cpython-310.pyc,,
140
+ accelerate/utils/__pycache__/torch_xla.cpython-310.pyc,,
141
+ accelerate/utils/__pycache__/tqdm.cpython-310.pyc,,
142
+ accelerate/utils/__pycache__/transformer_engine.cpython-310.pyc,,
143
+ accelerate/utils/__pycache__/versions.cpython-310.pyc,,
144
+ accelerate/utils/bnb.py,sha256=3i59dy8EcBYJEnT2alJ5_M-zeIpFsrceQ4bImiJJKOk,20570
145
+ accelerate/utils/constants.py,sha256=e6Bpf7gSZLFkvfr-1B1841b6lVoKJ5uyyf5kefe0aT4,2566
146
+ accelerate/utils/dataclasses.py,sha256=jMnO35VsgL9AiyrHAqZaxUT3Idj2Y8XXtVl1-NHZ4Rc,76301
147
+ accelerate/utils/deepspeed.py,sha256=1JFnz-dY6xP9yHywnX8bzZNq-d-8Cpg5CvVNLZ74b_0,10276
148
+ accelerate/utils/environment.py,sha256=8eVGMCu7xT1y0Hxochnxz_RghDePtWo2TghDlOm5Gf0,10409
149
+ accelerate/utils/fsdp_utils.py,sha256=QURWBtK8D00zppqJko0yeznEovXvnkRLI0NpPPkog1Q,10667
150
+ accelerate/utils/imports.py,sha256=zl7APSv9rdnkYzWsVpY6GNGiN1cwu80j5MfTwLYEwDo,12592
151
+ accelerate/utils/launch.py,sha256=ZqTT0HCVvzW5BswRy6tV4g_0wd1VSSPyW7QSBm0chWQ,27620
152
+ accelerate/utils/megatron_lm.py,sha256=IfHrtMiPSwuzh5ri96rTTIcEluuMNuIj3O8Y4jW6Fzk,57124
153
+ accelerate/utils/memory.py,sha256=VxJCU-tMX8uE34GbJnxtDXYPHh4D9p2Y-d6rkGxqSa0,5200
154
+ accelerate/utils/modeling.py,sha256=d5FEyttO0JnK3YV4BAduziFVBb-P_KWf_IqUsEl6wwg,80289
155
+ accelerate/utils/offload.py,sha256=qjaVai81wbkA0YH2WkmOXvZT0BRphygfRV_4Ua4j4U4,7837
156
+ accelerate/utils/operations.py,sha256=hAoF0RYKj8YpGPtbfYpT5H79BfHEQ8JzYQdFBHL_nHw,30531
157
+ accelerate/utils/other.py,sha256=kgON65EhzQN3oQZqzgAOmmNC2vsQkeO77qEuzN7Zv7c,12283
158
+ accelerate/utils/random.py,sha256=BowDGdBPbeBgGfn8M1K0ymOK4uAejHR2nu3YPvSVDUI,4958
159
+ accelerate/utils/rich.py,sha256=8JZX_uGMQX-BufdXxJpdne7BWd1KyLHSgbiGxrDMYr8,847
160
+ accelerate/utils/torch_xla.py,sha256=Pq1tuqN0X_pWDVza6YgjfO45uoJdoRVRForLeLQzFus,1908
161
+ accelerate/utils/tqdm.py,sha256=jhniZKNOGl7TQfF36yCu2XdtFkJOtCdR9jZ1SbkE-ig,1783
162
+ accelerate/utils/transformer_engine.py,sha256=gNPkOv_D1SDLm6nVZtxWIjyA6snxWtAQeBWUZLIErJE,3582
163
+ accelerate/utils/versions.py,sha256=UgmcbjBm--6CIx1ZamSAMjAK_B_2l48LbeaNygqej8M,2149
llmeval-env/lib/python3.10/site-packages/accelerate-0.30.0.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.38.4)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
llmeval-env/lib/python3.10/site-packages/accelerate-0.30.0.dist-info/entry_points.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ [console_scripts]
2
+ accelerate = accelerate.commands.accelerate_cli:main
3
+ accelerate-config = accelerate.commands.config:main
4
+ accelerate-estimate-memory = accelerate.commands.estimate:main
5
+ accelerate-launch = accelerate.commands.launch:main
llmeval-env/lib/python3.10/site-packages/accelerate-0.30.0.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ accelerate
llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.61 kB). View file
 
llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/_identifier.cpython-310.pyc ADDED
Binary file (2.08 kB). View file
 
llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/async_utils.cpython-310.pyc ADDED
Binary file (2.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/bccache.cpython-310.pyc ADDED
Binary file (14 kB). View file
 
llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/compiler.cpython-310.pyc ADDED
Binary file (54.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/constants.cpython-310.pyc ADDED
Binary file (1.54 kB). View file
 
llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/debug.cpython-310.pyc ADDED
Binary file (4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/defaults.cpython-310.pyc ADDED
Binary file (1.34 kB). View file
 
llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/environment.cpython-310.pyc ADDED
Binary file (53.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/exceptions.cpython-310.pyc ADDED
Binary file (5.54 kB). View file
 
llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/ext.cpython-310.pyc ADDED
Binary file (25.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/filters.cpython-310.pyc ADDED
Binary file (52 kB). View file
 
llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/idtracking.cpython-310.pyc ADDED
Binary file (11.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/lexer.cpython-310.pyc ADDED
Binary file (20.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/loaders.cpython-310.pyc ADDED
Binary file (20.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/meta.cpython-310.pyc ADDED
Binary file (3.82 kB). View file
 
llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/nativetypes.cpython-310.pyc ADDED
Binary file (5.02 kB). View file
 
llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/nodes.cpython-310.pyc ADDED
Binary file (40.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/optimizer.cpython-310.pyc ADDED
Binary file (1.96 kB). View file
 
llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/parser.cpython-310.pyc ADDED
Binary file (27.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/runtime.cpython-310.pyc ADDED
Binary file (32.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/sandbox.cpython-310.pyc ADDED
Binary file (12 kB). View file
 
llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/tests.cpython-310.pyc ADDED
Binary file (6.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/utils.cpython-310.pyc ADDED
Binary file (24.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/jinja2/__pycache__/visitor.cpython-310.pyc ADDED
Binary file (3.98 kB). View file
 
llmeval-env/lib/python3.10/site-packages/jinja2/lexer.py ADDED
@@ -0,0 +1,868 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Implements a Jinja / Python combination lexer. The ``Lexer`` class
2
+ is used to do some preprocessing. It filters out invalid operators like
3
+ the bitshift operators we don't allow in templates. It separates
4
+ template code and python code in expressions.
5
+ """
6
+
7
+ import re
8
+ import typing as t
9
+ from ast import literal_eval
10
+ from collections import deque
11
+ from sys import intern
12
+
13
+ from ._identifier import pattern as name_re
14
+ from .exceptions import TemplateSyntaxError
15
+ from .utils import LRUCache
16
+
17
+ if t.TYPE_CHECKING:
18
+ import typing_extensions as te
19
+
20
+ from .environment import Environment
21
+
22
+ # cache for the lexers. Exists in order to be able to have multiple
23
+ # environments with the same lexer
24
+ _lexer_cache: t.MutableMapping[t.Tuple, "Lexer"] = LRUCache(50) # type: ignore
25
+
26
+ # static regular expressions
27
+ whitespace_re = re.compile(r"\s+")
28
+ newline_re = re.compile(r"(\r\n|\r|\n)")
29
+ string_re = re.compile(
30
+ r"('([^'\\]*(?:\\.[^'\\]*)*)'" r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S
31
+ )
32
+ integer_re = re.compile(
33
+ r"""
34
+ (
35
+ 0b(_?[0-1])+ # binary
36
+ |
37
+ 0o(_?[0-7])+ # octal
38
+ |
39
+ 0x(_?[\da-f])+ # hex
40
+ |
41
+ [1-9](_?\d)* # decimal
42
+ |
43
+ 0(_?0)* # decimal zero
44
+ )
45
+ """,
46
+ re.IGNORECASE | re.VERBOSE,
47
+ )
48
+ float_re = re.compile(
49
+ r"""
50
+ (?<!\.) # doesn't start with a .
51
+ (\d+_)*\d+ # digits, possibly _ separated
52
+ (
53
+ (\.(\d+_)*\d+)? # optional fractional part
54
+ e[+\-]?(\d+_)*\d+ # exponent part
55
+ |
56
+ \.(\d+_)*\d+ # required fractional part
57
+ )
58
+ """,
59
+ re.IGNORECASE | re.VERBOSE,
60
+ )
61
+
62
+ # internal the tokens and keep references to them
63
+ TOKEN_ADD = intern("add")
64
+ TOKEN_ASSIGN = intern("assign")
65
+ TOKEN_COLON = intern("colon")
66
+ TOKEN_COMMA = intern("comma")
67
+ TOKEN_DIV = intern("div")
68
+ TOKEN_DOT = intern("dot")
69
+ TOKEN_EQ = intern("eq")
70
+ TOKEN_FLOORDIV = intern("floordiv")
71
+ TOKEN_GT = intern("gt")
72
+ TOKEN_GTEQ = intern("gteq")
73
+ TOKEN_LBRACE = intern("lbrace")
74
+ TOKEN_LBRACKET = intern("lbracket")
75
+ TOKEN_LPAREN = intern("lparen")
76
+ TOKEN_LT = intern("lt")
77
+ TOKEN_LTEQ = intern("lteq")
78
+ TOKEN_MOD = intern("mod")
79
+ TOKEN_MUL = intern("mul")
80
+ TOKEN_NE = intern("ne")
81
+ TOKEN_PIPE = intern("pipe")
82
+ TOKEN_POW = intern("pow")
83
+ TOKEN_RBRACE = intern("rbrace")
84
+ TOKEN_RBRACKET = intern("rbracket")
85
+ TOKEN_RPAREN = intern("rparen")
86
+ TOKEN_SEMICOLON = intern("semicolon")
87
+ TOKEN_SUB = intern("sub")
88
+ TOKEN_TILDE = intern("tilde")
89
+ TOKEN_WHITESPACE = intern("whitespace")
90
+ TOKEN_FLOAT = intern("float")
91
+ TOKEN_INTEGER = intern("integer")
92
+ TOKEN_NAME = intern("name")
93
+ TOKEN_STRING = intern("string")
94
+ TOKEN_OPERATOR = intern("operator")
95
+ TOKEN_BLOCK_BEGIN = intern("block_begin")
96
+ TOKEN_BLOCK_END = intern("block_end")
97
+ TOKEN_VARIABLE_BEGIN = intern("variable_begin")
98
+ TOKEN_VARIABLE_END = intern("variable_end")
99
+ TOKEN_RAW_BEGIN = intern("raw_begin")
100
+ TOKEN_RAW_END = intern("raw_end")
101
+ TOKEN_COMMENT_BEGIN = intern("comment_begin")
102
+ TOKEN_COMMENT_END = intern("comment_end")
103
+ TOKEN_COMMENT = intern("comment")
104
+ TOKEN_LINESTATEMENT_BEGIN = intern("linestatement_begin")
105
+ TOKEN_LINESTATEMENT_END = intern("linestatement_end")
106
+ TOKEN_LINECOMMENT_BEGIN = intern("linecomment_begin")
107
+ TOKEN_LINECOMMENT_END = intern("linecomment_end")
108
+ TOKEN_LINECOMMENT = intern("linecomment")
109
+ TOKEN_DATA = intern("data")
110
+ TOKEN_INITIAL = intern("initial")
111
+ TOKEN_EOF = intern("eof")
112
+
113
+ # bind operators to token types
114
+ operators = {
115
+ "+": TOKEN_ADD,
116
+ "-": TOKEN_SUB,
117
+ "/": TOKEN_DIV,
118
+ "//": TOKEN_FLOORDIV,
119
+ "*": TOKEN_MUL,
120
+ "%": TOKEN_MOD,
121
+ "**": TOKEN_POW,
122
+ "~": TOKEN_TILDE,
123
+ "[": TOKEN_LBRACKET,
124
+ "]": TOKEN_RBRACKET,
125
+ "(": TOKEN_LPAREN,
126
+ ")": TOKEN_RPAREN,
127
+ "{": TOKEN_LBRACE,
128
+ "}": TOKEN_RBRACE,
129
+ "==": TOKEN_EQ,
130
+ "!=": TOKEN_NE,
131
+ ">": TOKEN_GT,
132
+ ">=": TOKEN_GTEQ,
133
+ "<": TOKEN_LT,
134
+ "<=": TOKEN_LTEQ,
135
+ "=": TOKEN_ASSIGN,
136
+ ".": TOKEN_DOT,
137
+ ":": TOKEN_COLON,
138
+ "|": TOKEN_PIPE,
139
+ ",": TOKEN_COMMA,
140
+ ";": TOKEN_SEMICOLON,
141
+ }
142
+
143
+ reverse_operators = {v: k for k, v in operators.items()}
144
+ assert len(operators) == len(reverse_operators), "operators dropped"
145
+ operator_re = re.compile(
146
+ f"({'|'.join(re.escape(x) for x in sorted(operators, key=lambda x: -len(x)))})"
147
+ )
148
+
149
+ ignored_tokens = frozenset(
150
+ [
151
+ TOKEN_COMMENT_BEGIN,
152
+ TOKEN_COMMENT,
153
+ TOKEN_COMMENT_END,
154
+ TOKEN_WHITESPACE,
155
+ TOKEN_LINECOMMENT_BEGIN,
156
+ TOKEN_LINECOMMENT_END,
157
+ TOKEN_LINECOMMENT,
158
+ ]
159
+ )
160
+ ignore_if_empty = frozenset(
161
+ [TOKEN_WHITESPACE, TOKEN_DATA, TOKEN_COMMENT, TOKEN_LINECOMMENT]
162
+ )
163
+
164
+
165
+ def _describe_token_type(token_type: str) -> str:
166
+ if token_type in reverse_operators:
167
+ return reverse_operators[token_type]
168
+
169
+ return {
170
+ TOKEN_COMMENT_BEGIN: "begin of comment",
171
+ TOKEN_COMMENT_END: "end of comment",
172
+ TOKEN_COMMENT: "comment",
173
+ TOKEN_LINECOMMENT: "comment",
174
+ TOKEN_BLOCK_BEGIN: "begin of statement block",
175
+ TOKEN_BLOCK_END: "end of statement block",
176
+ TOKEN_VARIABLE_BEGIN: "begin of print statement",
177
+ TOKEN_VARIABLE_END: "end of print statement",
178
+ TOKEN_LINESTATEMENT_BEGIN: "begin of line statement",
179
+ TOKEN_LINESTATEMENT_END: "end of line statement",
180
+ TOKEN_DATA: "template data / text",
181
+ TOKEN_EOF: "end of template",
182
+ }.get(token_type, token_type)
183
+
184
+
185
+ def describe_token(token: "Token") -> str:
186
+ """Returns a description of the token."""
187
+ if token.type == TOKEN_NAME:
188
+ return token.value
189
+
190
+ return _describe_token_type(token.type)
191
+
192
+
193
+ def describe_token_expr(expr: str) -> str:
194
+ """Like `describe_token` but for token expressions."""
195
+ if ":" in expr:
196
+ type, value = expr.split(":", 1)
197
+
198
+ if type == TOKEN_NAME:
199
+ return value
200
+ else:
201
+ type = expr
202
+
203
+ return _describe_token_type(type)
204
+
205
+
206
+ def count_newlines(value: str) -> int:
207
+ """Count the number of newline characters in the string. This is
208
+ useful for extensions that filter a stream.
209
+ """
210
+ return len(newline_re.findall(value))
211
+
212
+
213
+ def compile_rules(environment: "Environment") -> t.List[t.Tuple[str, str]]:
214
+ """Compiles all the rules from the environment into a list of rules."""
215
+ e = re.escape
216
+ rules = [
217
+ (
218
+ len(environment.comment_start_string),
219
+ TOKEN_COMMENT_BEGIN,
220
+ e(environment.comment_start_string),
221
+ ),
222
+ (
223
+ len(environment.block_start_string),
224
+ TOKEN_BLOCK_BEGIN,
225
+ e(environment.block_start_string),
226
+ ),
227
+ (
228
+ len(environment.variable_start_string),
229
+ TOKEN_VARIABLE_BEGIN,
230
+ e(environment.variable_start_string),
231
+ ),
232
+ ]
233
+
234
+ if environment.line_statement_prefix is not None:
235
+ rules.append(
236
+ (
237
+ len(environment.line_statement_prefix),
238
+ TOKEN_LINESTATEMENT_BEGIN,
239
+ r"^[ \t\v]*" + e(environment.line_statement_prefix),
240
+ )
241
+ )
242
+ if environment.line_comment_prefix is not None:
243
+ rules.append(
244
+ (
245
+ len(environment.line_comment_prefix),
246
+ TOKEN_LINECOMMENT_BEGIN,
247
+ r"(?:^|(?<=\S))[^\S\r\n]*" + e(environment.line_comment_prefix),
248
+ )
249
+ )
250
+
251
+ return [x[1:] for x in sorted(rules, reverse=True)]
252
+
253
+
254
+ class Failure:
255
+ """Class that raises a `TemplateSyntaxError` if called.
256
+ Used by the `Lexer` to specify known errors.
257
+ """
258
+
259
+ def __init__(
260
+ self, message: str, cls: t.Type[TemplateSyntaxError] = TemplateSyntaxError
261
+ ) -> None:
262
+ self.message = message
263
+ self.error_class = cls
264
+
265
+ def __call__(self, lineno: int, filename: str) -> "te.NoReturn":
266
+ raise self.error_class(self.message, lineno, filename)
267
+
268
+
269
+ class Token(t.NamedTuple):
270
+ lineno: int
271
+ type: str
272
+ value: str
273
+
274
+ def __str__(self) -> str:
275
+ return describe_token(self)
276
+
277
+ def test(self, expr: str) -> bool:
278
+ """Test a token against a token expression. This can either be a
279
+ token type or ``'token_type:token_value'``. This can only test
280
+ against string values and types.
281
+ """
282
+ # here we do a regular string equality check as test_any is usually
283
+ # passed an iterable of not interned strings.
284
+ if self.type == expr:
285
+ return True
286
+
287
+ if ":" in expr:
288
+ return expr.split(":", 1) == [self.type, self.value]
289
+
290
+ return False
291
+
292
+ def test_any(self, *iterable: str) -> bool:
293
+ """Test against multiple token expressions."""
294
+ return any(self.test(expr) for expr in iterable)
295
+
296
+
297
+ class TokenStreamIterator:
298
+ """The iterator for tokenstreams. Iterate over the stream
299
+ until the eof token is reached.
300
+ """
301
+
302
+ def __init__(self, stream: "TokenStream") -> None:
303
+ self.stream = stream
304
+
305
+ def __iter__(self) -> "TokenStreamIterator":
306
+ return self
307
+
308
+ def __next__(self) -> Token:
309
+ token = self.stream.current
310
+
311
+ if token.type is TOKEN_EOF:
312
+ self.stream.close()
313
+ raise StopIteration
314
+
315
+ next(self.stream)
316
+ return token
317
+
318
+
319
+ class TokenStream:
320
+ """A token stream is an iterable that yields :class:`Token`\\s. The
321
+ parser however does not iterate over it but calls :meth:`next` to go
322
+ one token ahead. The current active token is stored as :attr:`current`.
323
+ """
324
+
325
+ def __init__(
326
+ self,
327
+ generator: t.Iterable[Token],
328
+ name: t.Optional[str],
329
+ filename: t.Optional[str],
330
+ ):
331
+ self._iter = iter(generator)
332
+ self._pushed: "te.Deque[Token]" = deque()
333
+ self.name = name
334
+ self.filename = filename
335
+ self.closed = False
336
+ self.current = Token(1, TOKEN_INITIAL, "")
337
+ next(self)
338
+
339
+ def __iter__(self) -> TokenStreamIterator:
340
+ return TokenStreamIterator(self)
341
+
342
+ def __bool__(self) -> bool:
343
+ return bool(self._pushed) or self.current.type is not TOKEN_EOF
344
+
345
+ @property
346
+ def eos(self) -> bool:
347
+ """Are we at the end of the stream?"""
348
+ return not self
349
+
350
+ def push(self, token: Token) -> None:
351
+ """Push a token back to the stream."""
352
+ self._pushed.append(token)
353
+
354
+ def look(self) -> Token:
355
+ """Look at the next token."""
356
+ old_token = next(self)
357
+ result = self.current
358
+ self.push(result)
359
+ self.current = old_token
360
+ return result
361
+
362
+ def skip(self, n: int = 1) -> None:
363
+ """Got n tokens ahead."""
364
+ for _ in range(n):
365
+ next(self)
366
+
367
+ def next_if(self, expr: str) -> t.Optional[Token]:
368
+ """Perform the token test and return the token if it matched.
369
+ Otherwise the return value is `None`.
370
+ """
371
+ if self.current.test(expr):
372
+ return next(self)
373
+
374
+ return None
375
+
376
+ def skip_if(self, expr: str) -> bool:
377
+ """Like :meth:`next_if` but only returns `True` or `False`."""
378
+ return self.next_if(expr) is not None
379
+
380
+ def __next__(self) -> Token:
381
+ """Go one token ahead and return the old one.
382
+
383
+ Use the built-in :func:`next` instead of calling this directly.
384
+ """
385
+ rv = self.current
386
+
387
+ if self._pushed:
388
+ self.current = self._pushed.popleft()
389
+ elif self.current.type is not TOKEN_EOF:
390
+ try:
391
+ self.current = next(self._iter)
392
+ except StopIteration:
393
+ self.close()
394
+
395
+ return rv
396
+
397
+ def close(self) -> None:
398
+ """Close the stream."""
399
+ self.current = Token(self.current.lineno, TOKEN_EOF, "")
400
+ self._iter = iter(())
401
+ self.closed = True
402
+
403
+ def expect(self, expr: str) -> Token:
404
+ """Expect a given token type and return it. This accepts the same
405
+ argument as :meth:`jinja2.lexer.Token.test`.
406
+ """
407
+ if not self.current.test(expr):
408
+ expr = describe_token_expr(expr)
409
+
410
+ if self.current.type is TOKEN_EOF:
411
+ raise TemplateSyntaxError(
412
+ f"unexpected end of template, expected {expr!r}.",
413
+ self.current.lineno,
414
+ self.name,
415
+ self.filename,
416
+ )
417
+
418
+ raise TemplateSyntaxError(
419
+ f"expected token {expr!r}, got {describe_token(self.current)!r}",
420
+ self.current.lineno,
421
+ self.name,
422
+ self.filename,
423
+ )
424
+
425
+ return next(self)
426
+
427
+
428
+ def get_lexer(environment: "Environment") -> "Lexer":
429
+ """Return a lexer which is probably cached."""
430
+ key = (
431
+ environment.block_start_string,
432
+ environment.block_end_string,
433
+ environment.variable_start_string,
434
+ environment.variable_end_string,
435
+ environment.comment_start_string,
436
+ environment.comment_end_string,
437
+ environment.line_statement_prefix,
438
+ environment.line_comment_prefix,
439
+ environment.trim_blocks,
440
+ environment.lstrip_blocks,
441
+ environment.newline_sequence,
442
+ environment.keep_trailing_newline,
443
+ )
444
+ lexer = _lexer_cache.get(key)
445
+
446
+ if lexer is None:
447
+ _lexer_cache[key] = lexer = Lexer(environment)
448
+
449
+ return lexer
450
+
451
+
452
+ class OptionalLStrip(tuple): # type: ignore[type-arg]
453
+ """A special tuple for marking a point in the state that can have
454
+ lstrip applied.
455
+ """
456
+
457
+ __slots__ = ()
458
+
459
+ # Even though it looks like a no-op, creating instances fails
460
+ # without this.
461
+ def __new__(cls, *members, **kwargs): # type: ignore
462
+ return super().__new__(cls, members)
463
+
464
+
465
+ class _Rule(t.NamedTuple):
466
+ pattern: t.Pattern[str]
467
+ tokens: t.Union[str, t.Tuple[str, ...], t.Tuple[Failure]]
468
+ command: t.Optional[str]
469
+
470
+
471
+ class Lexer:
472
+ """Class that implements a lexer for a given environment. Automatically
473
+ created by the environment class, usually you don't have to do that.
474
+
475
+ Note that the lexer is not automatically bound to an environment.
476
+ Multiple environments can share the same lexer.
477
+ """
478
+
479
+ def __init__(self, environment: "Environment") -> None:
480
+ # shortcuts
481
+ e = re.escape
482
+
483
+ def c(x: str) -> t.Pattern[str]:
484
+ return re.compile(x, re.M | re.S)
485
+
486
+ # lexing rules for tags
487
+ tag_rules: t.List[_Rule] = [
488
+ _Rule(whitespace_re, TOKEN_WHITESPACE, None),
489
+ _Rule(float_re, TOKEN_FLOAT, None),
490
+ _Rule(integer_re, TOKEN_INTEGER, None),
491
+ _Rule(name_re, TOKEN_NAME, None),
492
+ _Rule(string_re, TOKEN_STRING, None),
493
+ _Rule(operator_re, TOKEN_OPERATOR, None),
494
+ ]
495
+
496
+ # assemble the root lexing rule. because "|" is ungreedy
497
+ # we have to sort by length so that the lexer continues working
498
+ # as expected when we have parsing rules like <% for block and
499
+ # <%= for variables. (if someone wants asp like syntax)
500
+ # variables are just part of the rules if variable processing
501
+ # is required.
502
+ root_tag_rules = compile_rules(environment)
503
+
504
+ block_start_re = e(environment.block_start_string)
505
+ block_end_re = e(environment.block_end_string)
506
+ comment_end_re = e(environment.comment_end_string)
507
+ variable_end_re = e(environment.variable_end_string)
508
+
509
+ # block suffix if trimming is enabled
510
+ block_suffix_re = "\\n?" if environment.trim_blocks else ""
511
+
512
+ self.lstrip_blocks = environment.lstrip_blocks
513
+
514
+ self.newline_sequence = environment.newline_sequence
515
+ self.keep_trailing_newline = environment.keep_trailing_newline
516
+
517
+ root_raw_re = (
518
+ rf"(?P<raw_begin>{block_start_re}(\-|\+|)\s*raw\s*"
519
+ rf"(?:\-{block_end_re}\s*|{block_end_re}))"
520
+ )
521
+ root_parts_re = "|".join(
522
+ [root_raw_re] + [rf"(?P<{n}>{r}(\-|\+|))" for n, r in root_tag_rules]
523
+ )
524
+
525
+ # global lexing rules
526
+ self.rules: t.Dict[str, t.List[_Rule]] = {
527
+ "root": [
528
+ # directives
529
+ _Rule(
530
+ c(rf"(.*?)(?:{root_parts_re})"),
531
+ OptionalLStrip(TOKEN_DATA, "#bygroup"), # type: ignore
532
+ "#bygroup",
533
+ ),
534
+ # data
535
+ _Rule(c(".+"), TOKEN_DATA, None),
536
+ ],
537
+ # comments
538
+ TOKEN_COMMENT_BEGIN: [
539
+ _Rule(
540
+ c(
541
+ rf"(.*?)((?:\+{comment_end_re}|\-{comment_end_re}\s*"
542
+ rf"|{comment_end_re}{block_suffix_re}))"
543
+ ),
544
+ (TOKEN_COMMENT, TOKEN_COMMENT_END),
545
+ "#pop",
546
+ ),
547
+ _Rule(c(r"(.)"), (Failure("Missing end of comment tag"),), None),
548
+ ],
549
+ # blocks
550
+ TOKEN_BLOCK_BEGIN: [
551
+ _Rule(
552
+ c(
553
+ rf"(?:\+{block_end_re}|\-{block_end_re}\s*"
554
+ rf"|{block_end_re}{block_suffix_re})"
555
+ ),
556
+ TOKEN_BLOCK_END,
557
+ "#pop",
558
+ ),
559
+ ]
560
+ + tag_rules,
561
+ # variables
562
+ TOKEN_VARIABLE_BEGIN: [
563
+ _Rule(
564
+ c(rf"\-{variable_end_re}\s*|{variable_end_re}"),
565
+ TOKEN_VARIABLE_END,
566
+ "#pop",
567
+ )
568
+ ]
569
+ + tag_rules,
570
+ # raw block
571
+ TOKEN_RAW_BEGIN: [
572
+ _Rule(
573
+ c(
574
+ rf"(.*?)((?:{block_start_re}(\-|\+|))\s*endraw\s*"
575
+ rf"(?:\+{block_end_re}|\-{block_end_re}\s*"
576
+ rf"|{block_end_re}{block_suffix_re}))"
577
+ ),
578
+ OptionalLStrip(TOKEN_DATA, TOKEN_RAW_END), # type: ignore
579
+ "#pop",
580
+ ),
581
+ _Rule(c(r"(.)"), (Failure("Missing end of raw directive"),), None),
582
+ ],
583
+ # line statements
584
+ TOKEN_LINESTATEMENT_BEGIN: [
585
+ _Rule(c(r"\s*(\n|$)"), TOKEN_LINESTATEMENT_END, "#pop")
586
+ ]
587
+ + tag_rules,
588
+ # line comments
589
+ TOKEN_LINECOMMENT_BEGIN: [
590
+ _Rule(
591
+ c(r"(.*?)()(?=\n|$)"),
592
+ (TOKEN_LINECOMMENT, TOKEN_LINECOMMENT_END),
593
+ "#pop",
594
+ )
595
+ ],
596
+ }
597
+
598
+ def _normalize_newlines(self, value: str) -> str:
599
+ """Replace all newlines with the configured sequence in strings
600
+ and template data.
601
+ """
602
+ return newline_re.sub(self.newline_sequence, value)
603
+
604
+ def tokenize(
605
+ self,
606
+ source: str,
607
+ name: t.Optional[str] = None,
608
+ filename: t.Optional[str] = None,
609
+ state: t.Optional[str] = None,
610
+ ) -> TokenStream:
611
+ """Calls tokeniter + tokenize and wraps it in a token stream."""
612
+ stream = self.tokeniter(source, name, filename, state)
613
+ return TokenStream(self.wrap(stream, name, filename), name, filename)
614
+
615
+ def wrap(
616
+ self,
617
+ stream: t.Iterable[t.Tuple[int, str, str]],
618
+ name: t.Optional[str] = None,
619
+ filename: t.Optional[str] = None,
620
+ ) -> t.Iterator[Token]:
621
+ """This is called with the stream as returned by `tokenize` and wraps
622
+ every token in a :class:`Token` and converts the value.
623
+ """
624
+ for lineno, token, value_str in stream:
625
+ if token in ignored_tokens:
626
+ continue
627
+
628
+ value: t.Any = value_str
629
+
630
+ if token == TOKEN_LINESTATEMENT_BEGIN:
631
+ token = TOKEN_BLOCK_BEGIN
632
+ elif token == TOKEN_LINESTATEMENT_END:
633
+ token = TOKEN_BLOCK_END
634
+ # we are not interested in those tokens in the parser
635
+ elif token in (TOKEN_RAW_BEGIN, TOKEN_RAW_END):
636
+ continue
637
+ elif token == TOKEN_DATA:
638
+ value = self._normalize_newlines(value_str)
639
+ elif token == "keyword":
640
+ token = value_str
641
+ elif token == TOKEN_NAME:
642
+ value = value_str
643
+
644
+ if not value.isidentifier():
645
+ raise TemplateSyntaxError(
646
+ "Invalid character in identifier", lineno, name, filename
647
+ )
648
+ elif token == TOKEN_STRING:
649
+ # try to unescape string
650
+ try:
651
+ value = (
652
+ self._normalize_newlines(value_str[1:-1])
653
+ .encode("ascii", "backslashreplace")
654
+ .decode("unicode-escape")
655
+ )
656
+ except Exception as e:
657
+ msg = str(e).split(":")[-1].strip()
658
+ raise TemplateSyntaxError(msg, lineno, name, filename) from e
659
+ elif token == TOKEN_INTEGER:
660
+ value = int(value_str.replace("_", ""), 0)
661
+ elif token == TOKEN_FLOAT:
662
+ # remove all "_" first to support more Python versions
663
+ value = literal_eval(value_str.replace("_", ""))
664
+ elif token == TOKEN_OPERATOR:
665
+ token = operators[value_str]
666
+
667
+ yield Token(lineno, token, value)
668
+
669
+ def tokeniter(
670
+ self,
671
+ source: str,
672
+ name: t.Optional[str],
673
+ filename: t.Optional[str] = None,
674
+ state: t.Optional[str] = None,
675
+ ) -> t.Iterator[t.Tuple[int, str, str]]:
676
+ """This method tokenizes the text and returns the tokens in a
677
+ generator. Use this method if you just want to tokenize a template.
678
+
679
+ .. versionchanged:: 3.0
680
+ Only ``\\n``, ``\\r\\n`` and ``\\r`` are treated as line
681
+ breaks.
682
+ """
683
+ lines = newline_re.split(source)[::2]
684
+
685
+ if not self.keep_trailing_newline and lines[-1] == "":
686
+ del lines[-1]
687
+
688
+ source = "\n".join(lines)
689
+ pos = 0
690
+ lineno = 1
691
+ stack = ["root"]
692
+
693
+ if state is not None and state != "root":
694
+ assert state in ("variable", "block"), "invalid state"
695
+ stack.append(state + "_begin")
696
+
697
+ statetokens = self.rules[stack[-1]]
698
+ source_length = len(source)
699
+ balancing_stack: t.List[str] = []
700
+ newlines_stripped = 0
701
+ line_starting = True
702
+
703
+ while True:
704
+ # tokenizer loop
705
+ for regex, tokens, new_state in statetokens:
706
+ m = regex.match(source, pos)
707
+
708
+ # if no match we try again with the next rule
709
+ if m is None:
710
+ continue
711
+
712
+ # we only match blocks and variables if braces / parentheses
713
+ # are balanced. continue parsing with the lower rule which
714
+ # is the operator rule. do this only if the end tags look
715
+ # like operators
716
+ if balancing_stack and tokens in (
717
+ TOKEN_VARIABLE_END,
718
+ TOKEN_BLOCK_END,
719
+ TOKEN_LINESTATEMENT_END,
720
+ ):
721
+ continue
722
+
723
+ # tuples support more options
724
+ if isinstance(tokens, tuple):
725
+ groups: t.Sequence[str] = m.groups()
726
+
727
+ if isinstance(tokens, OptionalLStrip):
728
+ # Rule supports lstrip. Match will look like
729
+ # text, block type, whitespace control, type, control, ...
730
+ text = groups[0]
731
+ # Skipping the text and first type, every other group is the
732
+ # whitespace control for each type. One of the groups will be
733
+ # -, +, or empty string instead of None.
734
+ strip_sign = next(g for g in groups[2::2] if g is not None)
735
+
736
+ if strip_sign == "-":
737
+ # Strip all whitespace between the text and the tag.
738
+ stripped = text.rstrip()
739
+ newlines_stripped = text[len(stripped) :].count("\n")
740
+ groups = [stripped, *groups[1:]]
741
+ elif (
742
+ # Not marked for preserving whitespace.
743
+ strip_sign != "+"
744
+ # lstrip is enabled.
745
+ and self.lstrip_blocks
746
+ # Not a variable expression.
747
+ and not m.groupdict().get(TOKEN_VARIABLE_BEGIN)
748
+ ):
749
+ # The start of text between the last newline and the tag.
750
+ l_pos = text.rfind("\n") + 1
751
+
752
+ if l_pos > 0 or line_starting:
753
+ # If there's only whitespace between the newline and the
754
+ # tag, strip it.
755
+ if whitespace_re.fullmatch(text, l_pos):
756
+ groups = [text[:l_pos], *groups[1:]]
757
+
758
+ for idx, token in enumerate(tokens):
759
+ # failure group
760
+ if token.__class__ is Failure:
761
+ raise token(lineno, filename)
762
+ # bygroup is a bit more complex, in that case we
763
+ # yield for the current token the first named
764
+ # group that matched
765
+ elif token == "#bygroup":
766
+ for key, value in m.groupdict().items():
767
+ if value is not None:
768
+ yield lineno, key, value
769
+ lineno += value.count("\n")
770
+ break
771
+ else:
772
+ raise RuntimeError(
773
+ f"{regex!r} wanted to resolve the token dynamically"
774
+ " but no group matched"
775
+ )
776
+ # normal group
777
+ else:
778
+ data = groups[idx]
779
+
780
+ if data or token not in ignore_if_empty:
781
+ yield lineno, token, data
782
+
783
+ lineno += data.count("\n") + newlines_stripped
784
+ newlines_stripped = 0
785
+
786
+ # strings as token just are yielded as it.
787
+ else:
788
+ data = m.group()
789
+
790
+ # update brace/parentheses balance
791
+ if tokens == TOKEN_OPERATOR:
792
+ if data == "{":
793
+ balancing_stack.append("}")
794
+ elif data == "(":
795
+ balancing_stack.append(")")
796
+ elif data == "[":
797
+ balancing_stack.append("]")
798
+ elif data in ("}", ")", "]"):
799
+ if not balancing_stack:
800
+ raise TemplateSyntaxError(
801
+ f"unexpected '{data}'", lineno, name, filename
802
+ )
803
+
804
+ expected_op = balancing_stack.pop()
805
+
806
+ if expected_op != data:
807
+ raise TemplateSyntaxError(
808
+ f"unexpected '{data}', expected '{expected_op}'",
809
+ lineno,
810
+ name,
811
+ filename,
812
+ )
813
+
814
+ # yield items
815
+ if data or tokens not in ignore_if_empty:
816
+ yield lineno, tokens, data
817
+
818
+ lineno += data.count("\n")
819
+
820
+ line_starting = m.group()[-1:] == "\n"
821
+ # fetch new position into new variable so that we can check
822
+ # if there is a internal parsing error which would result
823
+ # in an infinite loop
824
+ pos2 = m.end()
825
+
826
+ # handle state changes
827
+ if new_state is not None:
828
+ # remove the uppermost state
829
+ if new_state == "#pop":
830
+ stack.pop()
831
+ # resolve the new state by group checking
832
+ elif new_state == "#bygroup":
833
+ for key, value in m.groupdict().items():
834
+ if value is not None:
835
+ stack.append(key)
836
+ break
837
+ else:
838
+ raise RuntimeError(
839
+ f"{regex!r} wanted to resolve the new state dynamically"
840
+ f" but no group matched"
841
+ )
842
+ # direct state name given
843
+ else:
844
+ stack.append(new_state)
845
+
846
+ statetokens = self.rules[stack[-1]]
847
+ # we are still at the same position and no stack change.
848
+ # this means a loop without break condition, avoid that and
849
+ # raise error
850
+ elif pos2 == pos:
851
+ raise RuntimeError(
852
+ f"{regex!r} yielded empty string without stack change"
853
+ )
854
+
855
+ # publish new function and start again
856
+ pos = pos2
857
+ break
858
+ # if loop terminated without break we haven't found a single match
859
+ # either we are at the end of the file or we have a problem
860
+ else:
861
+ # end of text
862
+ if pos >= source_length:
863
+ return
864
+
865
+ # something went wrong
866
+ raise TemplateSyntaxError(
867
+ f"unexpected char {source[pos]!r} at {pos}", lineno, name, filename
868
+ )
llmeval-env/lib/python3.10/site-packages/jinja2/sandbox.py ADDED
@@ -0,0 +1,429 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """A sandbox layer that ensures unsafe operations cannot be performed.
2
+ Useful when the template itself comes from an untrusted source.
3
+ """
4
+
5
+ import operator
6
+ import types
7
+ import typing as t
8
+ from collections import abc
9
+ from collections import deque
10
+ from string import Formatter
11
+
12
+ from _string import formatter_field_name_split # type: ignore
13
+ from markupsafe import EscapeFormatter
14
+ from markupsafe import Markup
15
+
16
+ from .environment import Environment
17
+ from .exceptions import SecurityError
18
+ from .runtime import Context
19
+ from .runtime import Undefined
20
+
21
+ F = t.TypeVar("F", bound=t.Callable[..., t.Any])
22
+
23
+ #: maximum number of items a range may produce
24
+ MAX_RANGE = 100000
25
+
26
+ #: Unsafe function attributes.
27
+ UNSAFE_FUNCTION_ATTRIBUTES: t.Set[str] = set()
28
+
29
+ #: Unsafe method attributes. Function attributes are unsafe for methods too.
30
+ UNSAFE_METHOD_ATTRIBUTES: t.Set[str] = set()
31
+
32
+ #: unsafe generator attributes.
33
+ UNSAFE_GENERATOR_ATTRIBUTES = {"gi_frame", "gi_code"}
34
+
35
+ #: unsafe attributes on coroutines
36
+ UNSAFE_COROUTINE_ATTRIBUTES = {"cr_frame", "cr_code"}
37
+
38
+ #: unsafe attributes on async generators
39
+ UNSAFE_ASYNC_GENERATOR_ATTRIBUTES = {"ag_code", "ag_frame"}
40
+
41
+ _mutable_spec: t.Tuple[t.Tuple[t.Type[t.Any], t.FrozenSet[str]], ...] = (
42
+ (
43
+ abc.MutableSet,
44
+ frozenset(
45
+ [
46
+ "add",
47
+ "clear",
48
+ "difference_update",
49
+ "discard",
50
+ "pop",
51
+ "remove",
52
+ "symmetric_difference_update",
53
+ "update",
54
+ ]
55
+ ),
56
+ ),
57
+ (
58
+ abc.MutableMapping,
59
+ frozenset(["clear", "pop", "popitem", "setdefault", "update"]),
60
+ ),
61
+ (
62
+ abc.MutableSequence,
63
+ frozenset(["append", "reverse", "insert", "sort", "extend", "remove"]),
64
+ ),
65
+ (
66
+ deque,
67
+ frozenset(
68
+ [
69
+ "append",
70
+ "appendleft",
71
+ "clear",
72
+ "extend",
73
+ "extendleft",
74
+ "pop",
75
+ "popleft",
76
+ "remove",
77
+ "rotate",
78
+ ]
79
+ ),
80
+ ),
81
+ )
82
+
83
+
84
+ def inspect_format_method(callable: t.Callable[..., t.Any]) -> t.Optional[str]:
85
+ if not isinstance(
86
+ callable, (types.MethodType, types.BuiltinMethodType)
87
+ ) or callable.__name__ not in ("format", "format_map"):
88
+ return None
89
+
90
+ obj = callable.__self__
91
+
92
+ if isinstance(obj, str):
93
+ return obj
94
+
95
+ return None
96
+
97
+
98
+ def safe_range(*args: int) -> range:
99
+ """A range that can't generate ranges with a length of more than
100
+ MAX_RANGE items.
101
+ """
102
+ rng = range(*args)
103
+
104
+ if len(rng) > MAX_RANGE:
105
+ raise OverflowError(
106
+ "Range too big. The sandbox blocks ranges larger than"
107
+ f" MAX_RANGE ({MAX_RANGE})."
108
+ )
109
+
110
+ return rng
111
+
112
+
113
+ def unsafe(f: F) -> F:
114
+ """Marks a function or method as unsafe.
115
+
116
+ .. code-block: python
117
+
118
+ @unsafe
119
+ def delete(self):
120
+ pass
121
+ """
122
+ f.unsafe_callable = True # type: ignore
123
+ return f
124
+
125
+
126
+ def is_internal_attribute(obj: t.Any, attr: str) -> bool:
127
+ """Test if the attribute given is an internal python attribute. For
128
+ example this function returns `True` for the `func_code` attribute of
129
+ python objects. This is useful if the environment method
130
+ :meth:`~SandboxedEnvironment.is_safe_attribute` is overridden.
131
+
132
+ >>> from jinja2.sandbox import is_internal_attribute
133
+ >>> is_internal_attribute(str, "mro")
134
+ True
135
+ >>> is_internal_attribute(str, "upper")
136
+ False
137
+ """
138
+ if isinstance(obj, types.FunctionType):
139
+ if attr in UNSAFE_FUNCTION_ATTRIBUTES:
140
+ return True
141
+ elif isinstance(obj, types.MethodType):
142
+ if attr in UNSAFE_FUNCTION_ATTRIBUTES or attr in UNSAFE_METHOD_ATTRIBUTES:
143
+ return True
144
+ elif isinstance(obj, type):
145
+ if attr == "mro":
146
+ return True
147
+ elif isinstance(obj, (types.CodeType, types.TracebackType, types.FrameType)):
148
+ return True
149
+ elif isinstance(obj, types.GeneratorType):
150
+ if attr in UNSAFE_GENERATOR_ATTRIBUTES:
151
+ return True
152
+ elif hasattr(types, "CoroutineType") and isinstance(obj, types.CoroutineType):
153
+ if attr in UNSAFE_COROUTINE_ATTRIBUTES:
154
+ return True
155
+ elif hasattr(types, "AsyncGeneratorType") and isinstance(
156
+ obj, types.AsyncGeneratorType
157
+ ):
158
+ if attr in UNSAFE_ASYNC_GENERATOR_ATTRIBUTES:
159
+ return True
160
+ return attr.startswith("__")
161
+
162
+
163
+ def modifies_known_mutable(obj: t.Any, attr: str) -> bool:
164
+ """This function checks if an attribute on a builtin mutable object
165
+ (list, dict, set or deque) or the corresponding ABCs would modify it
166
+ if called.
167
+
168
+ >>> modifies_known_mutable({}, "clear")
169
+ True
170
+ >>> modifies_known_mutable({}, "keys")
171
+ False
172
+ >>> modifies_known_mutable([], "append")
173
+ True
174
+ >>> modifies_known_mutable([], "index")
175
+ False
176
+
177
+ If called with an unsupported object, ``False`` is returned.
178
+
179
+ >>> modifies_known_mutable("foo", "upper")
180
+ False
181
+ """
182
+ for typespec, unsafe in _mutable_spec:
183
+ if isinstance(obj, typespec):
184
+ return attr in unsafe
185
+ return False
186
+
187
+
188
+ class SandboxedEnvironment(Environment):
189
+ """The sandboxed environment. It works like the regular environment but
190
+ tells the compiler to generate sandboxed code. Additionally subclasses of
191
+ this environment may override the methods that tell the runtime what
192
+ attributes or functions are safe to access.
193
+
194
+ If the template tries to access insecure code a :exc:`SecurityError` is
195
+ raised. However also other exceptions may occur during the rendering so
196
+ the caller has to ensure that all exceptions are caught.
197
+ """
198
+
199
+ sandboxed = True
200
+
201
+ #: default callback table for the binary operators. A copy of this is
202
+ #: available on each instance of a sandboxed environment as
203
+ #: :attr:`binop_table`
204
+ default_binop_table: t.Dict[str, t.Callable[[t.Any, t.Any], t.Any]] = {
205
+ "+": operator.add,
206
+ "-": operator.sub,
207
+ "*": operator.mul,
208
+ "/": operator.truediv,
209
+ "//": operator.floordiv,
210
+ "**": operator.pow,
211
+ "%": operator.mod,
212
+ }
213
+
214
+ #: default callback table for the unary operators. A copy of this is
215
+ #: available on each instance of a sandboxed environment as
216
+ #: :attr:`unop_table`
217
+ default_unop_table: t.Dict[str, t.Callable[[t.Any], t.Any]] = {
218
+ "+": operator.pos,
219
+ "-": operator.neg,
220
+ }
221
+
222
+ #: a set of binary operators that should be intercepted. Each operator
223
+ #: that is added to this set (empty by default) is delegated to the
224
+ #: :meth:`call_binop` method that will perform the operator. The default
225
+ #: operator callback is specified by :attr:`binop_table`.
226
+ #:
227
+ #: The following binary operators are interceptable:
228
+ #: ``//``, ``%``, ``+``, ``*``, ``-``, ``/``, and ``**``
229
+ #:
230
+ #: The default operation form the operator table corresponds to the
231
+ #: builtin function. Intercepted calls are always slower than the native
232
+ #: operator call, so make sure only to intercept the ones you are
233
+ #: interested in.
234
+ #:
235
+ #: .. versionadded:: 2.6
236
+ intercepted_binops: t.FrozenSet[str] = frozenset()
237
+
238
+ #: a set of unary operators that should be intercepted. Each operator
239
+ #: that is added to this set (empty by default) is delegated to the
240
+ #: :meth:`call_unop` method that will perform the operator. The default
241
+ #: operator callback is specified by :attr:`unop_table`.
242
+ #:
243
+ #: The following unary operators are interceptable: ``+``, ``-``
244
+ #:
245
+ #: The default operation form the operator table corresponds to the
246
+ #: builtin function. Intercepted calls are always slower than the native
247
+ #: operator call, so make sure only to intercept the ones you are
248
+ #: interested in.
249
+ #:
250
+ #: .. versionadded:: 2.6
251
+ intercepted_unops: t.FrozenSet[str] = frozenset()
252
+
253
+ def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
254
+ super().__init__(*args, **kwargs)
255
+ self.globals["range"] = safe_range
256
+ self.binop_table = self.default_binop_table.copy()
257
+ self.unop_table = self.default_unop_table.copy()
258
+
259
+ def is_safe_attribute(self, obj: t.Any, attr: str, value: t.Any) -> bool:
260
+ """The sandboxed environment will call this method to check if the
261
+ attribute of an object is safe to access. Per default all attributes
262
+ starting with an underscore are considered private as well as the
263
+ special attributes of internal python objects as returned by the
264
+ :func:`is_internal_attribute` function.
265
+ """
266
+ return not (attr.startswith("_") or is_internal_attribute(obj, attr))
267
+
268
+ def is_safe_callable(self, obj: t.Any) -> bool:
269
+ """Check if an object is safely callable. By default callables
270
+ are considered safe unless decorated with :func:`unsafe`.
271
+
272
+ This also recognizes the Django convention of setting
273
+ ``func.alters_data = True``.
274
+ """
275
+ return not (
276
+ getattr(obj, "unsafe_callable", False) or getattr(obj, "alters_data", False)
277
+ )
278
+
279
+ def call_binop(
280
+ self, context: Context, operator: str, left: t.Any, right: t.Any
281
+ ) -> t.Any:
282
+ """For intercepted binary operator calls (:meth:`intercepted_binops`)
283
+ this function is executed instead of the builtin operator. This can
284
+ be used to fine tune the behavior of certain operators.
285
+
286
+ .. versionadded:: 2.6
287
+ """
288
+ return self.binop_table[operator](left, right)
289
+
290
+ def call_unop(self, context: Context, operator: str, arg: t.Any) -> t.Any:
291
+ """For intercepted unary operator calls (:meth:`intercepted_unops`)
292
+ this function is executed instead of the builtin operator. This can
293
+ be used to fine tune the behavior of certain operators.
294
+
295
+ .. versionadded:: 2.6
296
+ """
297
+ return self.unop_table[operator](arg)
298
+
299
+ def getitem(
300
+ self, obj: t.Any, argument: t.Union[str, t.Any]
301
+ ) -> t.Union[t.Any, Undefined]:
302
+ """Subscribe an object from sandboxed code."""
303
+ try:
304
+ return obj[argument]
305
+ except (TypeError, LookupError):
306
+ if isinstance(argument, str):
307
+ try:
308
+ attr = str(argument)
309
+ except Exception:
310
+ pass
311
+ else:
312
+ try:
313
+ value = getattr(obj, attr)
314
+ except AttributeError:
315
+ pass
316
+ else:
317
+ if self.is_safe_attribute(obj, argument, value):
318
+ return value
319
+ return self.unsafe_undefined(obj, argument)
320
+ return self.undefined(obj=obj, name=argument)
321
+
322
+ def getattr(self, obj: t.Any, attribute: str) -> t.Union[t.Any, Undefined]:
323
+ """Subscribe an object from sandboxed code and prefer the
324
+ attribute. The attribute passed *must* be a bytestring.
325
+ """
326
+ try:
327
+ value = getattr(obj, attribute)
328
+ except AttributeError:
329
+ try:
330
+ return obj[attribute]
331
+ except (TypeError, LookupError):
332
+ pass
333
+ else:
334
+ if self.is_safe_attribute(obj, attribute, value):
335
+ return value
336
+ return self.unsafe_undefined(obj, attribute)
337
+ return self.undefined(obj=obj, name=attribute)
338
+
339
+ def unsafe_undefined(self, obj: t.Any, attribute: str) -> Undefined:
340
+ """Return an undefined object for unsafe attributes."""
341
+ return self.undefined(
342
+ f"access to attribute {attribute!r} of"
343
+ f" {type(obj).__name__!r} object is unsafe.",
344
+ name=attribute,
345
+ obj=obj,
346
+ exc=SecurityError,
347
+ )
348
+
349
+ def format_string(
350
+ self,
351
+ s: str,
352
+ args: t.Tuple[t.Any, ...],
353
+ kwargs: t.Dict[str, t.Any],
354
+ format_func: t.Optional[t.Callable[..., t.Any]] = None,
355
+ ) -> str:
356
+ """If a format call is detected, then this is routed through this
357
+ method so that our safety sandbox can be used for it.
358
+ """
359
+ formatter: SandboxedFormatter
360
+ if isinstance(s, Markup):
361
+ formatter = SandboxedEscapeFormatter(self, escape=s.escape)
362
+ else:
363
+ formatter = SandboxedFormatter(self)
364
+
365
+ if format_func is not None and format_func.__name__ == "format_map":
366
+ if len(args) != 1 or kwargs:
367
+ raise TypeError(
368
+ "format_map() takes exactly one argument"
369
+ f" {len(args) + (kwargs is not None)} given"
370
+ )
371
+
372
+ kwargs = args[0]
373
+ args = ()
374
+
375
+ rv = formatter.vformat(s, args, kwargs)
376
+ return type(s)(rv)
377
+
378
+ def call(
379
+ __self, # noqa: B902
380
+ __context: Context,
381
+ __obj: t.Any,
382
+ *args: t.Any,
383
+ **kwargs: t.Any,
384
+ ) -> t.Any:
385
+ """Call an object from sandboxed code."""
386
+ fmt = inspect_format_method(__obj)
387
+ if fmt is not None:
388
+ return __self.format_string(fmt, args, kwargs, __obj)
389
+
390
+ # the double prefixes are to avoid double keyword argument
391
+ # errors when proxying the call.
392
+ if not __self.is_safe_callable(__obj):
393
+ raise SecurityError(f"{__obj!r} is not safely callable")
394
+ return __context.call(__obj, *args, **kwargs)
395
+
396
+
397
+ class ImmutableSandboxedEnvironment(SandboxedEnvironment):
398
+ """Works exactly like the regular `SandboxedEnvironment` but does not
399
+ permit modifications on the builtin mutable objects `list`, `set`, and
400
+ `dict` by using the :func:`modifies_known_mutable` function.
401
+ """
402
+
403
+ def is_safe_attribute(self, obj: t.Any, attr: str, value: t.Any) -> bool:
404
+ if not super().is_safe_attribute(obj, attr, value):
405
+ return False
406
+
407
+ return not modifies_known_mutable(obj, attr)
408
+
409
+
410
+ class SandboxedFormatter(Formatter):
411
+ def __init__(self, env: Environment, **kwargs: t.Any) -> None:
412
+ self._env = env
413
+ super().__init__(**kwargs)
414
+
415
+ def get_field(
416
+ self, field_name: str, args: t.Sequence[t.Any], kwargs: t.Mapping[str, t.Any]
417
+ ) -> t.Tuple[t.Any, str]:
418
+ first, rest = formatter_field_name_split(field_name)
419
+ obj = self.get_value(first, args, kwargs)
420
+ for is_attr, i in rest:
421
+ if is_attr:
422
+ obj = self._env.getattr(obj, i)
423
+ else:
424
+ obj = self._env.getitem(obj, i)
425
+ return obj, first
426
+
427
+
428
+ class SandboxedEscapeFormatter(SandboxedFormatter, EscapeFormatter):
429
+ pass
llmeval-env/lib/python3.10/site-packages/lm_eval-0.4.2.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
llmeval-env/lib/python3.10/site-packages/lm_eval-0.4.2.dist-info/LICENSE.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2020 EleutherAI
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
llmeval-env/lib/python3.10/site-packages/lm_eval-0.4.2.dist-info/METADATA ADDED
@@ -0,0 +1,542 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: lm_eval
3
+ Version: 0.4.2
4
+ Summary: A framework for evaluating language models
5
+ Author-email: EleutherAI <[email protected]>
6
+ License: MIT
7
+ Project-URL: Homepage, https://github.com/EleutherAI/lm-evaluation-harness
8
+ Project-URL: Repository, https://github.com/EleutherAI/lm-evaluation-harness
9
+ Classifier: Development Status :: 3 - Alpha
10
+ Classifier: Programming Language :: Python :: 3
11
+ Classifier: License :: OSI Approved :: MIT License
12
+ Classifier: Operating System :: OS Independent
13
+ Requires-Python: >=3.8
14
+ Description-Content-Type: text/markdown
15
+ License-File: LICENSE.md
16
+ Requires-Dist: accelerate >=0.21.0
17
+ Requires-Dist: evaluate
18
+ Requires-Dist: datasets >=2.16.0
19
+ Requires-Dist: evaluate >=0.4.0
20
+ Requires-Dist: jsonlines
21
+ Requires-Dist: numexpr
22
+ Requires-Dist: peft >=0.2.0
23
+ Requires-Dist: pybind11 >=2.6.2
24
+ Requires-Dist: pytablewriter
25
+ Requires-Dist: rouge-score >=0.0.4
26
+ Requires-Dist: sacrebleu >=1.5.0
27
+ Requires-Dist: scikit-learn >=0.24.1
28
+ Requires-Dist: sqlitedict
29
+ Requires-Dist: torch >=1.8
30
+ Requires-Dist: tqdm-multiprocess
31
+ Requires-Dist: transformers >=4.1
32
+ Requires-Dist: zstandard
33
+ Requires-Dist: dill
34
+ Requires-Dist: word2number
35
+ Requires-Dist: more-itertools
36
+ Provides-Extra: all
37
+ Requires-Dist: lm-eval[anthropic] ; extra == 'all'
38
+ Requires-Dist: lm-eval[dev] ; extra == 'all'
39
+ Requires-Dist: lm-eval[gptq] ; extra == 'all'
40
+ Requires-Dist: lm-eval[hf_transfer] ; extra == 'all'
41
+ Requires-Dist: lm-eval[ifeval] ; extra == 'all'
42
+ Requires-Dist: lm-eval[mamba] ; extra == 'all'
43
+ Requires-Dist: lm-eval[math] ; extra == 'all'
44
+ Requires-Dist: lm-eval[multilingual] ; extra == 'all'
45
+ Requires-Dist: lm-eval[openai] ; extra == 'all'
46
+ Requires-Dist: lm-eval[promptsource] ; extra == 'all'
47
+ Requires-Dist: lm-eval[sentencepiece] ; extra == 'all'
48
+ Requires-Dist: lm-eval[testing] ; extra == 'all'
49
+ Requires-Dist: lm-eval[vllm] ; extra == 'all'
50
+ Requires-Dist: lm-eval[zeno] ; extra == 'all'
51
+ Requires-Dist: lm-eval[wandb] ; extra == 'all'
52
+ Provides-Extra: anthropic
53
+ Requires-Dist: anthropic ; extra == 'anthropic'
54
+ Provides-Extra: dev
55
+ Requires-Dist: pytest ; extra == 'dev'
56
+ Requires-Dist: pytest-cov ; extra == 'dev'
57
+ Requires-Dist: pytest-xdist ; extra == 'dev'
58
+ Requires-Dist: pre-commit ; extra == 'dev'
59
+ Requires-Dist: mypy ; extra == 'dev'
60
+ Provides-Extra: gptq
61
+ Requires-Dist: auto-gptq[triton] >=0.6.0 ; extra == 'gptq'
62
+ Provides-Extra: hf_transfer
63
+ Requires-Dist: hf-transfer ; extra == 'hf_transfer'
64
+ Provides-Extra: ifeval
65
+ Requires-Dist: langdetect ; extra == 'ifeval'
66
+ Requires-Dist: immutabledict ; extra == 'ifeval'
67
+ Provides-Extra: mamba
68
+ Requires-Dist: mamba-ssm ; extra == 'mamba'
69
+ Requires-Dist: causal-conv1d ==1.0.2 ; extra == 'mamba'
70
+ Provides-Extra: math
71
+ Requires-Dist: sympy >=1.12 ; extra == 'math'
72
+ Requires-Dist: antlr4-python3-runtime ==4.11 ; extra == 'math'
73
+ Provides-Extra: multilingual
74
+ Requires-Dist: nagisa >=0.2.7 ; extra == 'multilingual'
75
+ Requires-Dist: jieba >=0.42.1 ; extra == 'multilingual'
76
+ Requires-Dist: pycountry ; extra == 'multilingual'
77
+ Provides-Extra: neuronx
78
+ Requires-Dist: optimum[neuronx] ; extra == 'neuronx'
79
+ Provides-Extra: openai
80
+ Requires-Dist: openai ==1.3.9 ; extra == 'openai'
81
+ Requires-Dist: tiktoken ; extra == 'openai'
82
+ Provides-Extra: optimum
83
+ Requires-Dist: optimum[openvino] ; extra == 'optimum'
84
+ Provides-Extra: promptsource
85
+ Requires-Dist: promptsource >=0.2.3 ; extra == 'promptsource'
86
+ Provides-Extra: sentencepiece
87
+ Requires-Dist: sentencepiece >=0.1.98 ; extra == 'sentencepiece'
88
+ Requires-Dist: protobuf >=4.22.1 ; extra == 'sentencepiece'
89
+ Provides-Extra: testing
90
+ Requires-Dist: pytest ; extra == 'testing'
91
+ Requires-Dist: pytest-cov ; extra == 'testing'
92
+ Requires-Dist: pytest-xdist ; extra == 'testing'
93
+ Provides-Extra: vllm
94
+ Requires-Dist: vllm ==0.3.2 ; extra == 'vllm'
95
+ Provides-Extra: wandb
96
+ Requires-Dist: wandb >=0.16.3 ; extra == 'wandb'
97
+ Requires-Dist: pandas ; extra == 'wandb'
98
+ Requires-Dist: numpy ; extra == 'wandb'
99
+ Provides-Extra: zeno
100
+ Requires-Dist: pandas ; extra == 'zeno'
101
+ Requires-Dist: zeno-client ; extra == 'zeno'
102
+
103
+ # Language Model Evaluation Harness
104
+
105
+ [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.10256836.svg)](https://doi.org/10.5281/zenodo.10256836)
106
+
107
+ ## Announcement
108
+ **A new v0.4.0 release of lm-evaluation-harness is available** !
109
+
110
+ New updates and features include:
111
+
112
+ - Internal refactoring
113
+ - Config-based task creation and configuration
114
+ - Easier import and sharing of externally-defined task config YAMLs
115
+ - Support for Jinja2 prompt design, easy modification of prompts + prompt imports from Promptsource
116
+ - More advanced configuration options, including output post-processing, answer extraction, and multiple LM generations per document, configurable fewshot settings, and more
117
+ - Speedups and new modeling libraries supported, including: faster data-parallel HF model usage, vLLM support, MPS support with HuggingFace, and more
118
+ - Logging and usability changes
119
+ - New tasks including CoT BIG-Bench-Hard, Belebele, user-defined task groupings, and more
120
+
121
+ Please see our updated documentation pages in `docs/` for more details.
122
+
123
+ Development will be continuing on the `main` branch, and we encourage you to give us feedback on what features are desired and how to improve the library further, or ask questions, either in issues or PRs on GitHub, or in the [EleutherAI discord](https://discord.gg/eleutherai)!
124
+
125
+ ## Overview
126
+
127
+ This project provides a unified framework to test generative language models on a large number of different evaluation tasks.
128
+
129
+ **Features:**
130
+ - Over 60 standard academic benchmarks for LLMs, with hundreds of subtasks and variants implemented.
131
+ - Support for models loaded via [transformers](https://github.com/huggingface/transformers/) (including quantization via [AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ)), [GPT-NeoX](https://github.com/EleutherAI/gpt-neox), and [Megatron-DeepSpeed](https://github.com/microsoft/Megatron-DeepSpeed/), with a flexible tokenization-agnostic interface.
132
+ - Support for fast and memory-efficient inference with [vLLM](https://github.com/vllm-project/vllm).
133
+ - Support for commercial APIs including [OpenAI](https://openai.com), and [TextSynth](https://textsynth.com/).
134
+ - Support for evaluation on adapters (e.g. LoRA) supported in [HuggingFace's PEFT library](https://github.com/huggingface/peft).
135
+ - Support for local models and benchmarks.
136
+ - Evaluation with publicly available prompts ensures reproducibility and comparability between papers.
137
+ - Easy support for custom prompts and evaluation metrics.
138
+
139
+ The Language Model Evaluation Harness is the backend for 🤗 Hugging Face's popular [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard), has been used in [hundreds of papers](https://scholar.google.com/scholar?oi=bibs&hl=en&authuser=2&cites=15052937328817631261,4097184744846514103,1520777361382155671,17476825572045927382,18443729326628441434,14801318227356878622,7890865700763267262,12854182577605049984,15641002901115500560,5104500764547628290), and is used internally by dozens of organizations including NVIDIA, Cohere, BigScience, BigCode, Nous Research, and Mosaic ML.
140
+
141
+ ## Install
142
+
143
+ To install the `lm-eval` package from the github repository, run:
144
+
145
+ ```bash
146
+ git clone https://github.com/EleutherAI/lm-evaluation-harness
147
+ cd lm-evaluation-harness
148
+ pip install -e .
149
+ ```
150
+
151
+ We also provide a number of optional dependencies for extended functionality. A detailed table is available at the end of this document.
152
+
153
+ ## Basic Usage
154
+
155
+ ### Hugging Face `transformers`
156
+
157
+ To evaluate a model hosted on the [HuggingFace Hub](https://huggingface.co/models) (e.g. GPT-J-6B) on `hellaswag` you can use the following command (this assumes you are using a CUDA-compatible GPU):
158
+
159
+ ```bash
160
+ lm_eval --model hf \
161
+ --model_args pretrained=EleutherAI/gpt-j-6B \
162
+ --tasks hellaswag \
163
+ --device cuda:0 \
164
+ --batch_size 8
165
+ ```
166
+
167
+ Additional arguments can be provided to the model constructor using the `--model_args` flag. Most notably, this supports the common practice of using the `revisions` feature on the Hub to store partially trained checkpoints, or to specify the datatype for running a model:
168
+
169
+ ```bash
170
+ lm_eval --model hf \
171
+ --model_args pretrained=EleutherAI/pythia-160m,revision=step100000,dtype="float" \
172
+ --tasks lambada_openai,hellaswag \
173
+ --device cuda:0 \
174
+ --batch_size 8
175
+ ```
176
+
177
+ Models that are loaded via both `transformers.AutoModelForCausalLM` (autoregressive, decoder-only GPT style models) and `transformers.AutoModelForSeq2SeqLM` (such as encoder-decoder models like T5) in Huggingface are supported.
178
+
179
+ Batch size selection can be automated by setting the ```--batch_size``` flag to ```auto```. This will perform automatic detection of the largest batch size that will fit on your device. On tasks where there is a large difference between the longest and shortest example, it can be helpful to periodically recompute the largest batch size, to gain a further speedup. To do this, append ```:N``` to above flag to automatically recompute the largest batch size ```N``` times. For example, to recompute the batch size 4 times, the command would be:
180
+
181
+ ```bash
182
+ lm_eval --model hf \
183
+ --model_args pretrained=EleutherAI/pythia-160m,revision=step100000,dtype="float" \
184
+ --tasks lambada_openai,hellaswag \
185
+ --device cuda:0 \
186
+ --batch_size auto:4
187
+ ```
188
+
189
+ The full list of supported arguments are provided [here](./docs/interface.md), and on the terminal by calling `lm_eval -h`. Alternatively, you can use `lm-eval` instead of `lm_eval`.
190
+
191
+ > [!Note]
192
+ > Just like you can provide a local path to `transformers.AutoModel`, you can also provide a local path to `lm_eval` via `--model_args pretrained=/path/to/model`
193
+
194
+ #### Multi-GPU Evaluation with Hugging Face `accelerate`
195
+
196
+ We support two main ways of using Hugging Face's [accelerate 🚀](https://github.com/huggingface/accelerate) library for multi-GPU evaluation.
197
+
198
+ To perform *data-parallel evaluation* (where each GPU loads a **separate full copy** of the model), we leverage the `accelerate` launcher as follows:
199
+
200
+ ```
201
+ accelerate launch -m lm_eval --model hf \
202
+ --tasks lambada_openai,arc_easy \
203
+ --batch_size 16
204
+ ```
205
+ (or via `accelerate launch --no-python lm_eval`).
206
+
207
+ For cases where your model can fit on a single GPU, this allows you to evaluate on K GPUs K times faster than on one.
208
+
209
+ **WARNING**: This setup does not work with FSDP model sharding, so in `accelerate config` FSDP must be disabled, or the NO_SHARD FSDP option must be used.
210
+
211
+ The second way of using `accelerate` for multi-GPU evaluation is when your model is *too large to fit on a single GPU.*
212
+
213
+ In this setting, run the library *outside of the `accelerate` launcher*, but passing `parallelize=True` to `--model_args` as follows:
214
+
215
+ ```
216
+ lm_eval --model hf \
217
+ --tasks lambada_openai,arc_easy \
218
+ --model_args parallelize=True \
219
+ --batch_size 16
220
+ ```
221
+
222
+ This means that your model's weights will be split across all available GPUs.
223
+
224
+ For more advanced users or even larger models, we allow for the following arguments when `parallelize=True` as well:
225
+ - `device_map_option`: How to split model weights across available GPUs. defaults to "auto".
226
+ - `max_memory_per_gpu`: the max GPU memory to use per GPU in loading the model.
227
+ - `max_cpu_memory`: the max amount of CPU memory to use when offloading the model weights to RAM.
228
+ - `offload_folder`: a folder where model weights will be offloaded to disk if needed.
229
+
230
+ These two options (`accelerate launch` and `parallelize=True`) are mutually exclusive.
231
+
232
+ **Note: we do not currently support multi-node evaluations natively, and advise using either an externally hosted server to run inference requests against, or creating a custom integration with your distributed framework [as is done for the GPT-NeoX library](https://github.com/EleutherAI/gpt-neox/blob/main/eval_tasks/eval_adapter.py).**
233
+
234
+ ### NVIDIA `nemo` models
235
+
236
+ [NVIDIA NeMo Framework](https://github.com/NVIDIA/NeMo) is a generative AI framework built for researchers and pytorch developers working on language models.
237
+
238
+ To evaluate a `nemo` model, start by installing NeMo following [the documentation](https://github.com/NVIDIA/NeMo?tab=readme-ov-file#installation). We highly recommended to use the NVIDIA PyTorch or NeMo container, especially if having issues installing Apex or any other dependencies (see [latest released containers](https://github.com/NVIDIA/NeMo/releases)). Please also install the lm evaluation harness library following the instructions in [the Install section](https://github.com/EleutherAI/lm-evaluation-harness/tree/main?tab=readme-ov-file#install).
239
+
240
+ NeMo models can be obtained through [NVIDIA NGC Catalog](https://catalog.ngc.nvidia.com/models) or in [NVIDIA's Hugging Face page](https://huggingface.co/nvidia). In [NVIDIA NeMo Framework](https://github.com/NVIDIA/NeMo/tree/main/scripts/nlp_language_modeling) there are conversion scripts to convert the `hf` checkpoints of popular models like llama, falcon, mixtral or mpt to `nemo`.
241
+
242
+ Run a `nemo` model on one GPU:
243
+ ```bash
244
+ lm_eval --model nemo_lm \
245
+ --model_args path=<path_to_nemo_model> \
246
+ --tasks hellaswag \
247
+ --batch_size 32
248
+ ```
249
+
250
+ It is recommended to unpack the `nemo` model to avoid the unpacking inside the docker container - it may overflow disk space. For that you can run:
251
+
252
+ ```
253
+ mkdir MY_MODEL
254
+ tar -xvf MY_MODEL.nemo -c MY_MODEL
255
+ ```
256
+
257
+ #### Multi-GPU evaluation with NVIDIA `nemo` models
258
+
259
+ By default, only one GPU is used. But we do support either data replication or tensor/pipeline parallelism during evaluation, on one node.
260
+
261
+ 1) To enable data replication, set the `model_args` of `devices` to the number of data replicas to run. For example, the command to run 8 data replicas over 8 GPUs is:
262
+ ```bash
263
+ torchrun --nproc-per-node=8 --no-python lm_eval \
264
+ --model nemo_lm \
265
+ --model_args path=<path_to_nemo_model>,devices=8 \
266
+ --tasks hellaswag \
267
+ --batch_size 32
268
+ ```
269
+
270
+ 2) To enable tensor and/or pipeline parallelism, set the `model_args` of `tensor_model_parallel_size` and/or `pipeline_model_parallel_size`. In addition, you also have to set up `devices` to be equal to the product of `tensor_model_parallel_size` and/or `pipeline_model_parallel_size`. For example, the command to use one node of 4 GPUs with tensor parallelism of 2 and pipeline parallelism of 2 is:
271
+ ```bash
272
+ torchrun --nproc-per-node=4 --no-python lm_eval \
273
+ --model nemo_lm \
274
+ --model_args path=<path_to_nemo_model>,devices=4,tensor_model_parallel_size=2,pipeline_model_parallel_size=2 \
275
+ --tasks hellaswag \
276
+ --batch_size 32
277
+ ```
278
+ Note that it is recommended to substitute the `python` command by `torchrun --nproc-per-node=<number of devices> --no-python` to facilitate loading the model into the GPUs. This is especially important for large checkpoints loaded into multiple GPUs.
279
+
280
+ Not supported yet: multi-node evaluation and combinations of data replication with tensor or pipeline parallelism.
281
+
282
+ ### Tensor + Data Parallel and Optimized Inference with `vLLM`
283
+
284
+ We also support vLLM for faster inference on [supported model types](https://docs.vllm.ai/en/latest/models/supported_models.html), especially faster when splitting a model across multiple GPUs. For single-GPU or multi-GPU — tensor parallel, data parallel, or a combination of both — inference, for example:
285
+
286
+ ```bash
287
+ lm_eval --model vllm \
288
+ --model_args pretrained={model_name},tensor_parallel_size={GPUs_per_model},dtype=auto,gpu_memory_utilization=0.8,data_parallel_size={model_replicas} \
289
+ --tasks lambada_openai \
290
+ --batch_size auto
291
+ ```
292
+ To use vllm, do `pip install lm_eval[vllm]`. For a full list of supported vLLM configurations, please reference our [vLLM integration](https://github.com/EleutherAI/lm-evaluation-harness/blob/e74ec966556253fbe3d8ecba9de675c77c075bce/lm_eval/models/vllm_causallms.py) and the vLLM documentation.
293
+
294
+ vLLM occasionally differs in output from Huggingface. We treat Huggingface as the reference implementation, and provide a [script](./scripts/model_comparator.py) for checking the validity of vllm results against HF.
295
+
296
+ > [!Tip]
297
+ > For fastest performance, we recommend using `--batch_size auto` for vLLM whenever possible, to leverage its continuous batching functionality!
298
+
299
+ > [!Tip]
300
+ > Passing `max_model_len=4096` or some other reasonable default to vLLM through model args may cause speedups or prevent out-of-memory errors when trying to use auto batch size, such as for Mistral-7B-v0.1 which defaults to a maximum length of 32k.
301
+
302
+ ### Model APIs and Inference Servers
303
+
304
+ Our library also supports the evaluation of models served via several commercial APIs, and we hope to implement support for the most commonly used performant local/self-hosted inference servers.
305
+
306
+ To call a hosted model, use:
307
+
308
+ ```bash
309
+ export OPENAI_API_KEY=YOUR_KEY_HERE
310
+ lm_eval --model openai-completions \
311
+ --model_args model=davinci \
312
+ --tasks lambada_openai,hellaswag
313
+ ```
314
+
315
+ We also support using your own local inference server with servers that mirror the OpenAI Completions and ChatCompletions APIs.
316
+
317
+ ```bash
318
+ lm_eval --model local-chat-completions --tasks gsm8k --model_args model=facebook/opt-125m,base_url=http://{yourip}:8000/v1
319
+ ```
320
+ Note that for externally hosted models, configs such as `--device` and `--batch_size` should not be used and do not function. Just like you can use `--model_args` to pass arbitrary arguments to the model constructor for local models, you can use it to pass arbitrary arguments to the model API for hosted models. See the documentation of the hosting service for information on what arguments they support.
321
+
322
+ | API or Inference Server | Implemented? | `--model <xxx>` name | Models supported: | Request Types: |
323
+ |---------------------------------------------------------------------------------------------------------------------------|---------------------------------|---------------------------------------------------------------------|-----------------------------------------------------------------------------------------------|------------------------------------------------------------|
324
+ | OpenAI Completions | :heavy_check_mark: | `openai-completions`, `local-completions` | All OpenAI Completions API models | `generate_until`, `loglikelihood`, `loglikelihood_rolling` |
325
+ | OpenAI ChatCompletions | :heavy_check_mark: | `openai-chat-completions`, `local-chat-completions` | [All ChatCompletions API models](https://platform.openai.com/docs/guides/gpt) | `generate_until` (no logprobs) |
326
+ | Anthropic | :heavy_check_mark: | `anthropic` | [Supported Anthropic Engines](https://docs.anthropic.com/claude/reference/selecting-a-model) | `generate_until` (no logprobs) |
327
+ | Anthropic Chat | :heavy_check_mark: | `anthropic-chat`, `anthropic-chat-completions` | [Supported Anthropic Engines](https://docs.anthropic.com/claude/docs/models-overview) | `generate_until` (no logprobs) |
328
+ | Textsynth | :heavy_check_mark: | `textsynth` | [All supported engines](https://textsynth.com/documentation.html#engines) | `generate_until`, `loglikelihood`, `loglikelihood_rolling` |
329
+ | Cohere | [:hourglass: - blocked on Cohere API bug](https://github.com/EleutherAI/lm-evaluation-harness/pull/395) | N/A | [All `cohere.generate()` engines](https://docs.cohere.com/docs/models) | `generate_until`, `loglikelihood`, `loglikelihood_rolling` |
330
+ | [Llama.cpp](https://github.com/ggerganov/llama.cpp) (via [llama-cpp-python](https://github.com/abetlen/llama-cpp-python)) | :heavy_check_mark: | `gguf`, `ggml` | [All models supported by llama.cpp](https://github.com/ggerganov/llama.cpp) | `generate_until`, `loglikelihood`, (perplexity evaluation not yet implemented) |
331
+ | vLLM | :heavy_check_mark: | `vllm` | [Most HF Causal Language Models](https://docs.vllm.ai/en/latest/models/supported_models.html) | `generate_until`, `loglikelihood`, `loglikelihood_rolling` |
332
+ | Mamba | :heavy_check_mark: | `mamba_ssm` | [Mamba architecture Language Models via the `mamba_ssm` package](https://huggingface.co/state-spaces) | `generate_until`, `loglikelihood`, `loglikelihood_rolling` |
333
+ | Huggingface Optimum (Causal LMs) | ✔️ | `openvino` | Any decoder-only AutoModelForCausalLM converted with Huggingface Optimum into OpenVINO™ Intermediate Representation (IR) format | `generate_until`, `loglikelihood`, `loglikelihood_rolling` | ... |
334
+ | Neuron via AWS Inf2 (Causal LMs) | ✔️ | `neuronx` | Any decoder-only AutoModelForCausalLM supported to run on [huggingface-ami image for inferentia2](https://aws.amazon.com/marketplace/pp/prodview-gr3e6yiscria2) | `generate_until`, `loglikelihood`, `loglikelihood_rolling` | ... |
335
+ | Your local inference server! | :heavy_check_mark: | `local-completions` or `local-chat-completions` (using `openai-chat-completions` model type) | Any server address that accepts GET requests using HF models and mirror's OpenAI's Completions or ChatCompletions interface | `generate_until` | | ... |
336
+
337
+ Models which do not supply logits or logprobs can be used with tasks of type `generate_until` only, while local models, or APIs that supply logprobs/logits of their prompts, can be run on all task types: `generate_until`, `loglikelihood`, `loglikelihood_rolling`, and `multiple_choice`.
338
+
339
+ For more information on the different task `output_types` and model request types, see [our documentation](https://github.com/EleutherAI/lm-evaluation-harness/blob/main/docs/model_guide.md#interface).
340
+
341
+ > [!Note]
342
+ > For best performance with closed chat model APIs such as Anthropic Claude 3 and GPT-4, we recommend carefully looking at a few sample outputs using `--limit 10` first to confirm answer extraction and scoring on generative tasks is performing as expected. providing `system="<some system prompt here>"` within `--model_args` for anthropic-chat-completions, to instruct the model what format to respond in, may be useful.
343
+
344
+
345
+ ### Other Frameworks
346
+
347
+ A number of other libraries contain scripts for calling the eval harness through their library. These include [GPT-NeoX](https://github.com/EleutherAI/gpt-neox/blob/main/eval_tasks/eval_adapter.py), [Megatron-DeepSpeed](https://github.com/microsoft/Megatron-DeepSpeed/blob/main/examples/MoE/readme_evalharness.md), and [mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax/blob/master/eval_harness.py).
348
+
349
+ To create your own custom integration you can follow instructions from [this tutorial](https://github.com/EleutherAI/lm-evaluation-harness/blob/main/docs/interface.md#external-library-usage).
350
+
351
+ ### Additional Features
352
+ > [!Note]
353
+ > For tasks unsuitable for direct evaluation — either due risks associated with executing untrusted code or complexities in the evaluation process — the `--predict_only` flag is available to obtain decoded generations for post-hoc evaluation.
354
+
355
+ If you have a Metal compatible Mac, you can run the eval harness using the MPS back-end by replacing `--device cuda:0` with `--device mps` (requires PyTorch version 2.1 or higher).
356
+
357
+ > [!Note]
358
+ > You can inspect what the LM inputs look like by running the following command:
359
+ > ```bash
360
+ > python write_out.py \
361
+ > --tasks <task1,task2,...> \
362
+ > --num_fewshot 5 \
363
+ > --num_examples 10 \
364
+ > --output_base_path /path/to/output/folder
365
+ > ```
366
+ > This will write out one text file for each task.
367
+
368
+ To verify the data integrity of the tasks you're performing in addition to running the tasks themselves, you can use the `--check_integrity` flag:
369
+
370
+ ```bash
371
+ lm_eval --model openai \
372
+ --model_args engine=davinci \
373
+ --tasks lambada_openai,hellaswag \
374
+ --check_integrity
375
+ ```
376
+
377
+ ## Advanced Usage Tips
378
+
379
+ For models loaded with the HuggingFace `transformers` library, any arguments provided via `--model_args` get passed to the relevant constructor directly. This means that anything you can do with `AutoModel` can be done with our library. For example, you can pass a local path via `pretrained=` or use models finetuned with [PEFT](https://github.com/huggingface/peft) by taking the call you would run to evaluate the base model and add `,peft=PATH` to the `model_args` argument:
380
+ ```bash
381
+ lm_eval --model hf \
382
+ --model_args pretrained=EleutherAI/gpt-j-6b,parallelize=True,load_in_4bit=True,peft=nomic-ai/gpt4all-j-lora \
383
+ --tasks openbookqa,arc_easy,winogrande,hellaswag,arc_challenge,piqa,boolq \
384
+ --device cuda:0
385
+ ```
386
+
387
+ [GPTQ](https://github.com/PanQiWei/AutoGPTQ) quantized models can be loaded by specifying their file names in `,autogptq=NAME` (or `,autogptq=True` for default names) in the `model_args` argument:
388
+
389
+ ```bash
390
+ lm_eval --model hf \
391
+ --model_args pretrained=model-name-or-path,autogptq=model.safetensors,gptq_use_triton=True \
392
+ --tasks hellaswag
393
+ ```
394
+
395
+ We support wildcards in task names, for example you can run all of the machine-translated lambada tasks via `--task lambada_openai_mt_*`.
396
+
397
+ To save evaluation results provide an `--output_path`. We also support logging model responses with the `--log_samples` flag for post-hoc analysis.
398
+
399
+ Additionally, one can provide a directory with `--use_cache` to cache the results of prior runs. This allows you to avoid repeated execution of the same (model, task) pairs for re-scoring.
400
+
401
+ For a full list of supported arguments, check out the [interface](https://github.com/EleutherAI/lm-evaluation-harness/blob/main/docs/interface.md) guide in our documentation!
402
+
403
+ ## Visualizing Results
404
+
405
+ You can seamlessly visualize and analyze the results of your evaluation harness runs using both Weights & Biases (W&B) and Zeno.
406
+
407
+ ### Zeno
408
+
409
+ You can use [Zeno](https://zenoml.com) to visualize the results of your eval harness runs.
410
+
411
+ First, head to [hub.zenoml.com](https://hub.zenoml.com) to create an account and get an API key [on your account page](https://hub.zenoml.com/account).
412
+ Add this key as an environment variable:
413
+
414
+ ```bash
415
+ export ZENO_API_KEY=[your api key]
416
+ ```
417
+
418
+ You'll also need to install the `lm_eval[zeno]` package extra.
419
+
420
+ To visualize the results, run the eval harness with the `log_samples` and `output_path` flags.
421
+ We expect `output_path` to contain multiple folders that represent individual model names.
422
+ You can thus run your evaluation on any number of tasks and models and upload all of the results as projects on Zeno.
423
+
424
+ ```bash
425
+ lm_eval \
426
+ --model hf \
427
+ --model_args pretrained=EleutherAI/gpt-j-6B \
428
+ --tasks hellaswag \
429
+ --device cuda:0 \
430
+ --batch_size 8 \
431
+ --log_samples \
432
+ --output_path output/gpt-j-6B
433
+ ```
434
+
435
+ Then, you can upload the resulting data using the `zeno_visualize` script:
436
+
437
+ ```bash
438
+ python scripts/zeno_visualize.py \
439
+ --data_path output \
440
+ --project_name "Eleuther Project"
441
+ ```
442
+
443
+ This will use all subfolders in `data_path` as different models and upload all tasks within these model folders to Zeno.
444
+ If you run the eval harness on multiple tasks, the `project_name` will be used as a prefix and one project will be created per task.
445
+
446
+ You can find an example of this workflow in [examples/visualize-zeno.ipynb](examples/visualize-zeno.ipynb).
447
+
448
+ ### Weights and Biases
449
+
450
+ With the [Weights and Biases](https://wandb.ai/site) integration, you can now spend more time extracting deeper insights into your evaluation results. The integration is designed to streamline the process of logging and visualizing experiment results using the Weights & Biases (W&B) platform.
451
+
452
+ The integration provide functionalities
453
+
454
+ - to automatically log the evaluation results,
455
+ - log the samples as W&B Tables for easy visualization,
456
+ - log the `results.json` file as an artifact for version control,
457
+ - log the `<task_name>_eval_samples.json` file if the samples are logged,
458
+ - generate a comprehensive report for analysis and visualization with all the important metric,
459
+ - log task and cli specific configs,
460
+ - and more out of the box like the command used to run the evaluation, GPU/CPU counts, timestamp, etc.
461
+
462
+ First you'll need to install the lm_eval[wandb] package extra. Do `pip install lm_eval[wandb]`.
463
+
464
+ Authenticate your machine with an your unique W&B token. Visit https://wandb.ai/authorize to get one. Do `wandb login` in your command line terminal.
465
+
466
+ Run eval harness as usual with a `wandb_args` flag. Use this flag to provide arguments for initializing a wandb run ([wandb.init](https://docs.wandb.ai/ref/python/init)) as comma separated string arguments.
467
+
468
+ ```bash
469
+ lm_eval \
470
+ --model hf \
471
+ --model_args pretrained=microsoft/phi-2,trust_remote_code=True \
472
+ --tasks hellaswag,mmlu_abstract_algebra \
473
+ --device cuda:0 \
474
+ --batch_size 8 \
475
+ --output_path output/phi-2 \
476
+ --limit 10 \
477
+ --wandb_args project=lm-eval-harness-integration \
478
+ --log_samples
479
+ ```
480
+
481
+ In the stdout, you will find the link to the W&B run page as well as link to the generated report. You can find an example of this workflow in [examples/visualize-wandb.ipynb](examples/visualize-wandb.ipynb), and an example of how to integrate it beyond the CLI.
482
+
483
+ ## How to Contribute or Learn More?
484
+
485
+ For more information on the library and how everything fits together, check out all of our [documentation pages](https://github.com/EleutherAI/lm-evaluation-harness/tree/main/docs)! We plan to post a larger roadmap of desired + planned library improvements soon, with more information on how contributors can help.
486
+
487
+ ### Implementing new tasks
488
+
489
+ To implement a new task in the eval harness, see [this guide](./docs/new_task_guide.md).
490
+
491
+ In general, we follow this priority list for addressing concerns about prompting and other eval details:
492
+ 1. If there is widespread agreement among people who train LLMs, use the agreed upon procedure.
493
+ 2. If there is a clear and unambiguous official implementation, use that procedure.
494
+ 3. If there is widespread agreement among people who evaluate LLMs, use the agreed upon procedure.
495
+ 4. If there are multiple common implementations but not universal or widespread agreement, use our preferred option among the common implementations. As before, prioritize choosing from among the implementations found in LLM training papers.
496
+
497
+ These are guidelines and not rules, and can be overruled in special circumstances.
498
+
499
+ We try to prioritize agreement with the procedures used by other groups to decrease the harm when people inevitably compare runs across different papers despite our discouragement of the practice. Historically, we also prioritized the implementation from [Language Models are Few Shot Learners](https://arxiv.org/abs/2005.14165) as our original goal was specifically to compare results with that paper.
500
+
501
+ ### Support
502
+
503
+ The best way to get support is to open an issue on this repo or join the [EleutherAI Discord server](https://discord.gg/eleutherai). The `#lm-thunderdome` channel is dedicated to developing this project and the `#release-discussion` channel is for receiving support for our releases. If you've used the library and have had a positive (or negative) experience, we'd love to hear from you!
504
+
505
+ ## Optional Extras
506
+ Extras dependencies can be installed via `pip install -e ".[NAME]"`
507
+
508
+ | Name | Use |
509
+ |---------------|---------------------------------------|
510
+ | anthropic | For using Anthropic's models |
511
+ | dev | For linting PRs and contributions |
512
+ | gptq | For loading models with GPTQ |
513
+ | hf_transfer | For speeding up HF Hub file downloads |
514
+ | ifeval | For running the IFEval task |
515
+ | neuronx | For running on AWS inf2 instances |
516
+ | mamba | For loading Mamba SSM models |
517
+ | math | For running math task answer checking |
518
+ | multilingual | For multilingual tokenizers |
519
+ | openai | For using OpenAI's models |
520
+ | optimum | For running Intel OpenVINO models |
521
+ | promptsource | For using PromptSource prompts |
522
+ | sentencepiece | For using the sentencepiece tokenizer |
523
+ | testing | For running library test suite |
524
+ | vllm | For loading models with vLLM |
525
+ | zeno | For visualizing results with Zeno |
526
+ |---------------|---------------------------------------|
527
+ | all | Loads all extras (not recommended) |
528
+
529
+ ## Cite as
530
+
531
+ ```
532
+ @misc{eval-harness,
533
+ author = {Gao, Leo and Tow, Jonathan and Abbasi, Baber and Biderman, Stella and Black, Sid and DiPofi, Anthony and Foster, Charles and Golding, Laurence and Hsu, Jeffrey and Le Noac'h, Alain and Li, Haonan and McDonell, Kyle and Muennighoff, Niklas and Ociepa, Chris and Phang, Jason and Reynolds, Laria and Schoelkopf, Hailey and Skowron, Aviya and Sutawika, Lintang and Tang, Eric and Thite, Anish and Wang, Ben and Wang, Kevin and Zou, Andy},
534
+ title = {A framework for few-shot language model evaluation},
535
+ month = 12,
536
+ year = 2023,
537
+ publisher = {Zenodo},
538
+ version = {v0.4.0},
539
+ doi = {10.5281/zenodo.10256836},
540
+ url = {https://zenodo.org/records/10256836}
541
+ }
542
+ ```
llmeval-env/lib/python3.10/site-packages/lm_eval-0.4.2.dist-info/RECORD ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ../../../bin/lm-eval,sha256=YCHseol5Es5aPiLOSeK31BN_idM_07-Sqm_iLUi8yzY,265
2
+ ../../../bin/lm_eval,sha256=YCHseol5Es5aPiLOSeK31BN_idM_07-Sqm_iLUi8yzY,265
3
+ __editable__.lm_eval-0.4.2.pth,sha256=C4fSS19B6d-idgvdOOiL7g2yPVs0ZZ0_9S0BPtrZ1ew,85
4
+ __editable___lm_eval_0_4_2_finder.py,sha256=-cpF5qs4x6nwbtS29Q_gV7PPIQt_IqmyNt80QicpZmg,17818
5
+ __pycache__/__editable___lm_eval_0_4_2_finder.cpython-310.pyc,,
6
+ lm_eval-0.4.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
7
+ lm_eval-0.4.2.dist-info/LICENSE.md,sha256=qAbkJUdiDf-8LsAzMyLIs1I7SvEeBZvhTvgapbGuAh8,1067
8
+ lm_eval-0.4.2.dist-info/METADATA,sha256=2a20uOIwe3xQS75A3LIJO_yggs1dNcrTgoks_SlteXs,35028
9
+ lm_eval-0.4.2.dist-info/RECORD,,
10
+ lm_eval-0.4.2.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
+ lm_eval-0.4.2.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
12
+ lm_eval-0.4.2.dist-info/direct_url.json,sha256=HrW0qr1d7__nbpB6c0bgoXiA3UrHaXOl999Hgj6kRvs,90
13
+ lm_eval-0.4.2.dist-info/entry_points.txt,sha256=mMr4hFXQwSHYb924AKGzP6dbbBAWcXiyBO-AfXrV72E,98
14
+ lm_eval-0.4.2.dist-info/top_level.txt,sha256=risQwf3pU3EoXaYL_bKYFzW6EtBOvJxJotKdDz605Dk,8
llmeval-env/lib/python3.10/site-packages/lm_eval-0.4.2.dist-info/REQUESTED ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/lm_eval-0.4.2.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.43.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
llmeval-env/lib/python3.10/site-packages/lm_eval-0.4.2.dist-info/direct_url.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"dir_info": {"editable": true}, "url": "file:///mnt/weka/peacock/llm_eval/lm-evaluation"}
llmeval-env/lib/python3.10/site-packages/lm_eval-0.4.2.dist-info/entry_points.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [console_scripts]
2
+ lm-eval = lm_eval.__main__:cli_evaluate
3
+ lm_eval = lm_eval.__main__:cli_evaluate
llmeval-env/lib/python3.10/site-packages/lm_eval-0.4.2.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ lm_eval
llmeval-env/lib/python3.10/site-packages/networkx/classes/tests/__pycache__/test_digraph_historical.cpython-310.pyc ADDED
Binary file (4.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/networkx/classes/tests/__pycache__/test_function.cpython-310.pyc ADDED
Binary file (22.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/networkx/classes/tests/__pycache__/test_graph_historical.cpython-310.pyc ADDED
Binary file (712 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/networkx/classes/tests/__pycache__/test_multigraph.cpython-310.pyc ADDED
Binary file (17.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/networkx/tests/test_all_random_functions.py ADDED
@@ -0,0 +1,251 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ np = pytest.importorskip("numpy")
4
+ import random
5
+
6
+ import networkx as nx
7
+ from networkx.algorithms import approximation as approx
8
+ from networkx.algorithms import threshold
9
+
10
+ progress = 0
11
+
12
+ # store the random numbers after setting a global seed
13
+ np.random.seed(42)
14
+ np_rv = np.random.rand()
15
+ random.seed(42)
16
+ py_rv = random.random()
17
+
18
+
19
+ def t(f, *args, **kwds):
20
+ """call one function and check if global RNG changed"""
21
+ global progress
22
+ progress += 1
23
+ print(progress, ",", end="")
24
+
25
+ f(*args, **kwds)
26
+
27
+ after_np_rv = np.random.rand()
28
+ # if np_rv != after_np_rv:
29
+ # print(np_rv, after_np_rv, "don't match np!")
30
+ assert np_rv == after_np_rv
31
+ np.random.seed(42)
32
+
33
+ after_py_rv = random.random()
34
+ # if py_rv != after_py_rv:
35
+ # print(py_rv, after_py_rv, "don't match py!")
36
+ assert py_rv == after_py_rv
37
+ random.seed(42)
38
+
39
+
40
+ def run_all_random_functions(seed):
41
+ n = 20
42
+ m = 10
43
+ k = l = 2
44
+ s = v = 10
45
+ p = q = p1 = p2 = p_in = p_out = 0.4
46
+ alpha = radius = theta = 0.75
47
+ sizes = (20, 20, 10)
48
+ colors = [1, 2, 3]
49
+ G = nx.barbell_graph(12, 20)
50
+ H = nx.cycle_graph(3)
51
+ H.add_weighted_edges_from((u, v, 0.2) for u, v in H.edges)
52
+ deg_sequence = [3, 2, 1, 3, 2, 1, 3, 2, 1, 2, 1, 2, 1]
53
+ in_degree_sequence = w = sequence = aseq = bseq = deg_sequence
54
+
55
+ # print("starting...")
56
+ t(nx.maximal_independent_set, G, seed=seed)
57
+ t(nx.rich_club_coefficient, G, seed=seed, normalized=False)
58
+ t(nx.random_reference, G, seed=seed)
59
+ t(nx.lattice_reference, G, seed=seed)
60
+ t(nx.sigma, G, 1, 2, seed=seed)
61
+ t(nx.omega, G, 1, 2, seed=seed)
62
+ # print("out of smallworld.py")
63
+ t(nx.double_edge_swap, G, seed=seed)
64
+ # print("starting connected_double_edge_swap")
65
+ t(nx.connected_double_edge_swap, nx.complete_graph(9), seed=seed)
66
+ # print("ending connected_double_edge_swap")
67
+ t(nx.random_layout, G, seed=seed)
68
+ t(nx.fruchterman_reingold_layout, G, seed=seed)
69
+ t(nx.algebraic_connectivity, G, seed=seed)
70
+ t(nx.fiedler_vector, G, seed=seed)
71
+ t(nx.spectral_ordering, G, seed=seed)
72
+ # print('starting average_clustering')
73
+ t(approx.average_clustering, G, seed=seed)
74
+ t(approx.simulated_annealing_tsp, H, "greedy", source=1, seed=seed)
75
+ t(approx.threshold_accepting_tsp, H, "greedy", source=1, seed=seed)
76
+ t(
77
+ approx.traveling_salesman_problem,
78
+ H,
79
+ method=lambda G, weight: approx.simulated_annealing_tsp(
80
+ G, "greedy", weight, seed=seed
81
+ ),
82
+ )
83
+ t(
84
+ approx.traveling_salesman_problem,
85
+ H,
86
+ method=lambda G, weight: approx.threshold_accepting_tsp(
87
+ G, "greedy", weight, seed=seed
88
+ ),
89
+ )
90
+ t(nx.betweenness_centrality, G, seed=seed)
91
+ t(nx.edge_betweenness_centrality, G, seed=seed)
92
+ t(nx.approximate_current_flow_betweenness_centrality, G, seed=seed)
93
+ # print("kernighan")
94
+ t(nx.algorithms.community.kernighan_lin_bisection, G, seed=seed)
95
+ # nx.algorithms.community.asyn_lpa_communities(G, seed=seed)
96
+ t(nx.algorithms.tree.greedy_branching, G, seed=seed)
97
+ t(nx.algorithms.tree.Edmonds, G, seed=seed)
98
+ # print('done with graph argument functions')
99
+
100
+ t(nx.spectral_graph_forge, G, alpha, seed=seed)
101
+ t(nx.algorithms.community.asyn_fluidc, G, k, max_iter=1, seed=seed)
102
+ t(
103
+ nx.algorithms.connectivity.edge_augmentation.greedy_k_edge_augmentation,
104
+ G,
105
+ k,
106
+ seed=seed,
107
+ )
108
+ t(nx.algorithms.coloring.strategy_random_sequential, G, colors, seed=seed)
109
+
110
+ cs = ["d", "i", "i", "d", "d", "i"]
111
+ t(threshold.swap_d, cs, seed=seed)
112
+ t(nx.configuration_model, deg_sequence, seed=seed)
113
+ t(
114
+ nx.directed_configuration_model,
115
+ in_degree_sequence,
116
+ in_degree_sequence,
117
+ seed=seed,
118
+ )
119
+ t(nx.expected_degree_graph, w, seed=seed)
120
+ t(nx.random_degree_sequence_graph, sequence, seed=seed)
121
+ joint_degrees = {
122
+ 1: {4: 1},
123
+ 2: {2: 2, 3: 2, 4: 2},
124
+ 3: {2: 2, 4: 1},
125
+ 4: {1: 1, 2: 2, 3: 1},
126
+ }
127
+ t(nx.joint_degree_graph, joint_degrees, seed=seed)
128
+ joint_degree_sequence = [
129
+ (1, 0),
130
+ (1, 0),
131
+ (1, 0),
132
+ (2, 0),
133
+ (1, 0),
134
+ (2, 1),
135
+ (0, 1),
136
+ (0, 1),
137
+ ]
138
+ t(nx.random_clustered_graph, joint_degree_sequence, seed=seed)
139
+ constructor = [(3, 3, 0.5), (10, 10, 0.7)]
140
+ t(nx.random_shell_graph, constructor, seed=seed)
141
+ t(nx.random_triad, G.to_directed(), seed=seed)
142
+ mapping = {1: 0.4, 2: 0.3, 3: 0.3}
143
+ t(nx.utils.random_weighted_sample, mapping, k, seed=seed)
144
+ t(nx.utils.weighted_choice, mapping, seed=seed)
145
+ t(nx.algorithms.bipartite.configuration_model, aseq, bseq, seed=seed)
146
+ t(nx.algorithms.bipartite.preferential_attachment_graph, aseq, p, seed=seed)
147
+
148
+ def kernel_integral(u, w, z):
149
+ return z - w
150
+
151
+ t(nx.random_kernel_graph, n, kernel_integral, seed=seed)
152
+
153
+ sizes = [75, 75, 300]
154
+ probs = [[0.25, 0.05, 0.02], [0.05, 0.35, 0.07], [0.02, 0.07, 0.40]]
155
+ t(nx.stochastic_block_model, sizes, probs, seed=seed)
156
+ t(nx.random_partition_graph, sizes, p_in, p_out, seed=seed)
157
+
158
+ # print("starting generator functions")
159
+ t(threshold.random_threshold_sequence, n, p, seed=seed)
160
+ t(nx.tournament.random_tournament, n, seed=seed)
161
+ t(nx.relaxed_caveman_graph, l, k, p, seed=seed)
162
+ t(nx.planted_partition_graph, l, k, p_in, p_out, seed=seed)
163
+ t(nx.gaussian_random_partition_graph, n, s, v, p_in, p_out, seed=seed)
164
+ t(nx.gn_graph, n, seed=seed)
165
+ t(nx.gnr_graph, n, p, seed=seed)
166
+ t(nx.gnc_graph, n, seed=seed)
167
+ t(nx.scale_free_graph, n, seed=seed)
168
+ t(nx.directed.random_uniform_k_out_graph, n, k, seed=seed)
169
+ t(nx.random_k_out_graph, n, k, alpha, seed=seed)
170
+ N = 1000
171
+ t(nx.partial_duplication_graph, N, n, p, q, seed=seed)
172
+ t(nx.duplication_divergence_graph, n, p, seed=seed)
173
+ t(nx.random_geometric_graph, n, radius, seed=seed)
174
+ t(nx.soft_random_geometric_graph, n, radius, seed=seed)
175
+ t(nx.geographical_threshold_graph, n, theta, seed=seed)
176
+ t(nx.waxman_graph, n, seed=seed)
177
+ t(nx.navigable_small_world_graph, n, seed=seed)
178
+ t(nx.thresholded_random_geometric_graph, n, radius, theta, seed=seed)
179
+ t(nx.uniform_random_intersection_graph, n, m, p, seed=seed)
180
+ t(nx.k_random_intersection_graph, n, m, k, seed=seed)
181
+
182
+ t(nx.general_random_intersection_graph, n, 2, [0.1, 0.5], seed=seed)
183
+ t(nx.fast_gnp_random_graph, n, p, seed=seed)
184
+ t(nx.gnp_random_graph, n, p, seed=seed)
185
+ t(nx.dense_gnm_random_graph, n, m, seed=seed)
186
+ t(nx.gnm_random_graph, n, m, seed=seed)
187
+ t(nx.newman_watts_strogatz_graph, n, k, p, seed=seed)
188
+ t(nx.watts_strogatz_graph, n, k, p, seed=seed)
189
+ t(nx.connected_watts_strogatz_graph, n, k, p, seed=seed)
190
+ t(nx.random_regular_graph, 3, n, seed=seed)
191
+ t(nx.barabasi_albert_graph, n, m, seed=seed)
192
+ t(nx.extended_barabasi_albert_graph, n, m, p, q, seed=seed)
193
+ t(nx.powerlaw_cluster_graph, n, m, p, seed=seed)
194
+ t(nx.random_lobster, n, p1, p2, seed=seed)
195
+ t(nx.random_powerlaw_tree, n, seed=seed, tries=5000)
196
+ t(nx.random_powerlaw_tree_sequence, 10, seed=seed, tries=5000)
197
+ t(nx.random_tree, n, seed=seed)
198
+ t(nx.utils.powerlaw_sequence, n, seed=seed)
199
+ t(nx.utils.zipf_rv, 2.3, seed=seed)
200
+ cdist = [0.2, 0.4, 0.5, 0.7, 0.9, 1.0]
201
+ t(nx.utils.discrete_sequence, n, cdistribution=cdist, seed=seed)
202
+ t(nx.algorithms.bipartite.random_graph, n, m, p, seed=seed)
203
+ t(nx.algorithms.bipartite.gnmk_random_graph, n, m, k, seed=seed)
204
+ LFR = nx.generators.LFR_benchmark_graph
205
+ t(
206
+ LFR,
207
+ 25,
208
+ 3,
209
+ 1.5,
210
+ 0.1,
211
+ average_degree=3,
212
+ min_community=10,
213
+ seed=seed,
214
+ max_community=20,
215
+ )
216
+ t(nx.random_internet_as_graph, n, seed=seed)
217
+ # print("done")
218
+
219
+
220
+ # choose to test an integer seed, or whether a single RNG can be everywhere
221
+ # np_rng = np.random.RandomState(14)
222
+ # seed = np_rng
223
+ # seed = 14
224
+
225
+
226
+ @pytest.mark.slow
227
+ # print("NetworkX Version:", nx.__version__)
228
+ def test_rng_interface():
229
+ global progress
230
+
231
+ # try different kinds of seeds
232
+ for seed in [14, np.random.RandomState(14)]:
233
+ np.random.seed(42)
234
+ random.seed(42)
235
+ run_all_random_functions(seed)
236
+ progress = 0
237
+
238
+ # check that both global RNGs are unaffected
239
+ after_np_rv = np.random.rand()
240
+ # if np_rv != after_np_rv:
241
+ # print(np_rv, after_np_rv, "don't match np!")
242
+ assert np_rv == after_np_rv
243
+ after_py_rv = random.random()
244
+ # if py_rv != after_py_rv:
245
+ # print(py_rv, after_py_rv, "don't match py!")
246
+ assert py_rv == after_py_rv
247
+
248
+
249
+ # print("\nDone testing seed:", seed)
250
+
251
+ # test_rng_interface()
llmeval-env/lib/python3.10/site-packages/networkx/tests/test_convert.py ADDED
@@ -0,0 +1,321 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ import networkx as nx
4
+ from networkx.convert import (
5
+ from_dict_of_dicts,
6
+ from_dict_of_lists,
7
+ to_dict_of_dicts,
8
+ to_dict_of_lists,
9
+ to_networkx_graph,
10
+ )
11
+ from networkx.generators.classic import barbell_graph, cycle_graph
12
+ from networkx.utils import edges_equal, graphs_equal, nodes_equal
13
+
14
+
15
+ class TestConvert:
16
+ def edgelists_equal(self, e1, e2):
17
+ return sorted(sorted(e) for e in e1) == sorted(sorted(e) for e in e2)
18
+
19
+ def test_simple_graphs(self):
20
+ for dest, source in [
21
+ (to_dict_of_dicts, from_dict_of_dicts),
22
+ (to_dict_of_lists, from_dict_of_lists),
23
+ ]:
24
+ G = barbell_graph(10, 3)
25
+ G.graph = {}
26
+ dod = dest(G)
27
+
28
+ # Dict of [dicts, lists]
29
+ GG = source(dod)
30
+ assert graphs_equal(G, GG)
31
+ GW = to_networkx_graph(dod)
32
+ assert graphs_equal(G, GW)
33
+ GI = nx.Graph(dod)
34
+ assert graphs_equal(G, GI)
35
+
36
+ # With nodelist keyword
37
+ P4 = nx.path_graph(4)
38
+ P3 = nx.path_graph(3)
39
+ P4.graph = {}
40
+ P3.graph = {}
41
+ dod = dest(P4, nodelist=[0, 1, 2])
42
+ Gdod = nx.Graph(dod)
43
+ assert graphs_equal(Gdod, P3)
44
+
45
+ def test_exceptions(self):
46
+ # NX graph
47
+ class G:
48
+ adj = None
49
+
50
+ pytest.raises(nx.NetworkXError, to_networkx_graph, G)
51
+
52
+ # pygraphviz agraph
53
+ class G:
54
+ is_strict = None
55
+
56
+ pytest.raises(nx.NetworkXError, to_networkx_graph, G)
57
+
58
+ # Dict of [dicts, lists]
59
+ G = {"a": 0}
60
+ pytest.raises(TypeError, to_networkx_graph, G)
61
+
62
+ # list or generator of edges
63
+ class G:
64
+ next = None
65
+
66
+ pytest.raises(nx.NetworkXError, to_networkx_graph, G)
67
+
68
+ # no match
69
+ pytest.raises(nx.NetworkXError, to_networkx_graph, "a")
70
+
71
+ def test_digraphs(self):
72
+ for dest, source in [
73
+ (to_dict_of_dicts, from_dict_of_dicts),
74
+ (to_dict_of_lists, from_dict_of_lists),
75
+ ]:
76
+ G = cycle_graph(10)
77
+
78
+ # Dict of [dicts, lists]
79
+ dod = dest(G)
80
+ GG = source(dod)
81
+ assert nodes_equal(sorted(G.nodes()), sorted(GG.nodes()))
82
+ assert edges_equal(sorted(G.edges()), sorted(GG.edges()))
83
+ GW = to_networkx_graph(dod)
84
+ assert nodes_equal(sorted(G.nodes()), sorted(GW.nodes()))
85
+ assert edges_equal(sorted(G.edges()), sorted(GW.edges()))
86
+ GI = nx.Graph(dod)
87
+ assert nodes_equal(sorted(G.nodes()), sorted(GI.nodes()))
88
+ assert edges_equal(sorted(G.edges()), sorted(GI.edges()))
89
+
90
+ G = cycle_graph(10, create_using=nx.DiGraph)
91
+ dod = dest(G)
92
+ GG = source(dod, create_using=nx.DiGraph)
93
+ assert sorted(G.nodes()) == sorted(GG.nodes())
94
+ assert sorted(G.edges()) == sorted(GG.edges())
95
+ GW = to_networkx_graph(dod, create_using=nx.DiGraph)
96
+ assert sorted(G.nodes()) == sorted(GW.nodes())
97
+ assert sorted(G.edges()) == sorted(GW.edges())
98
+ GI = nx.DiGraph(dod)
99
+ assert sorted(G.nodes()) == sorted(GI.nodes())
100
+ assert sorted(G.edges()) == sorted(GI.edges())
101
+
102
+ def test_graph(self):
103
+ g = nx.cycle_graph(10)
104
+ G = nx.Graph()
105
+ G.add_nodes_from(g)
106
+ G.add_weighted_edges_from((u, v, u) for u, v in g.edges())
107
+
108
+ # Dict of dicts
109
+ dod = to_dict_of_dicts(G)
110
+ GG = from_dict_of_dicts(dod, create_using=nx.Graph)
111
+ assert nodes_equal(sorted(G.nodes()), sorted(GG.nodes()))
112
+ assert edges_equal(sorted(G.edges()), sorted(GG.edges()))
113
+ GW = to_networkx_graph(dod, create_using=nx.Graph)
114
+ assert nodes_equal(sorted(G.nodes()), sorted(GW.nodes()))
115
+ assert edges_equal(sorted(G.edges()), sorted(GW.edges()))
116
+ GI = nx.Graph(dod)
117
+ assert sorted(G.nodes()) == sorted(GI.nodes())
118
+ assert sorted(G.edges()) == sorted(GI.edges())
119
+
120
+ # Dict of lists
121
+ dol = to_dict_of_lists(G)
122
+ GG = from_dict_of_lists(dol, create_using=nx.Graph)
123
+ # dict of lists throws away edge data so set it to none
124
+ enone = [(u, v, {}) for (u, v, d) in G.edges(data=True)]
125
+ assert nodes_equal(sorted(G.nodes()), sorted(GG.nodes()))
126
+ assert edges_equal(enone, sorted(GG.edges(data=True)))
127
+ GW = to_networkx_graph(dol, create_using=nx.Graph)
128
+ assert nodes_equal(sorted(G.nodes()), sorted(GW.nodes()))
129
+ assert edges_equal(enone, sorted(GW.edges(data=True)))
130
+ GI = nx.Graph(dol)
131
+ assert nodes_equal(sorted(G.nodes()), sorted(GI.nodes()))
132
+ assert edges_equal(enone, sorted(GI.edges(data=True)))
133
+
134
+ def test_with_multiedges_self_loops(self):
135
+ G = cycle_graph(10)
136
+ XG = nx.Graph()
137
+ XG.add_nodes_from(G)
138
+ XG.add_weighted_edges_from((u, v, u) for u, v in G.edges())
139
+ XGM = nx.MultiGraph()
140
+ XGM.add_nodes_from(G)
141
+ XGM.add_weighted_edges_from((u, v, u) for u, v in G.edges())
142
+ XGM.add_edge(0, 1, weight=2) # multiedge
143
+ XGS = nx.Graph()
144
+ XGS.add_nodes_from(G)
145
+ XGS.add_weighted_edges_from((u, v, u) for u, v in G.edges())
146
+ XGS.add_edge(0, 0, weight=100) # self loop
147
+
148
+ # Dict of dicts
149
+ # with self loops, OK
150
+ dod = to_dict_of_dicts(XGS)
151
+ GG = from_dict_of_dicts(dod, create_using=nx.Graph)
152
+ assert nodes_equal(XGS.nodes(), GG.nodes())
153
+ assert edges_equal(XGS.edges(), GG.edges())
154
+ GW = to_networkx_graph(dod, create_using=nx.Graph)
155
+ assert nodes_equal(XGS.nodes(), GW.nodes())
156
+ assert edges_equal(XGS.edges(), GW.edges())
157
+ GI = nx.Graph(dod)
158
+ assert nodes_equal(XGS.nodes(), GI.nodes())
159
+ assert edges_equal(XGS.edges(), GI.edges())
160
+
161
+ # Dict of lists
162
+ # with self loops, OK
163
+ dol = to_dict_of_lists(XGS)
164
+ GG = from_dict_of_lists(dol, create_using=nx.Graph)
165
+ # dict of lists throws away edge data so set it to none
166
+ enone = [(u, v, {}) for (u, v, d) in XGS.edges(data=True)]
167
+ assert nodes_equal(sorted(XGS.nodes()), sorted(GG.nodes()))
168
+ assert edges_equal(enone, sorted(GG.edges(data=True)))
169
+ GW = to_networkx_graph(dol, create_using=nx.Graph)
170
+ assert nodes_equal(sorted(XGS.nodes()), sorted(GW.nodes()))
171
+ assert edges_equal(enone, sorted(GW.edges(data=True)))
172
+ GI = nx.Graph(dol)
173
+ assert nodes_equal(sorted(XGS.nodes()), sorted(GI.nodes()))
174
+ assert edges_equal(enone, sorted(GI.edges(data=True)))
175
+
176
+ # Dict of dicts
177
+ # with multiedges, OK
178
+ dod = to_dict_of_dicts(XGM)
179
+ GG = from_dict_of_dicts(dod, create_using=nx.MultiGraph, multigraph_input=True)
180
+ assert nodes_equal(sorted(XGM.nodes()), sorted(GG.nodes()))
181
+ assert edges_equal(sorted(XGM.edges()), sorted(GG.edges()))
182
+ GW = to_networkx_graph(dod, create_using=nx.MultiGraph, multigraph_input=True)
183
+ assert nodes_equal(sorted(XGM.nodes()), sorted(GW.nodes()))
184
+ assert edges_equal(sorted(XGM.edges()), sorted(GW.edges()))
185
+ GI = nx.MultiGraph(dod)
186
+ assert nodes_equal(sorted(XGM.nodes()), sorted(GI.nodes()))
187
+ assert sorted(XGM.edges()) == sorted(GI.edges())
188
+ GE = from_dict_of_dicts(dod, create_using=nx.MultiGraph, multigraph_input=False)
189
+ assert nodes_equal(sorted(XGM.nodes()), sorted(GE.nodes()))
190
+ assert sorted(XGM.edges()) != sorted(GE.edges())
191
+ GI = nx.MultiGraph(XGM)
192
+ assert nodes_equal(sorted(XGM.nodes()), sorted(GI.nodes()))
193
+ assert edges_equal(sorted(XGM.edges()), sorted(GI.edges()))
194
+ GM = nx.MultiGraph(G)
195
+ assert nodes_equal(sorted(GM.nodes()), sorted(G.nodes()))
196
+ assert edges_equal(sorted(GM.edges()), sorted(G.edges()))
197
+
198
+ # Dict of lists
199
+ # with multiedges, OK, but better write as DiGraph else you'll
200
+ # get double edges
201
+ dol = to_dict_of_lists(G)
202
+ GG = from_dict_of_lists(dol, create_using=nx.MultiGraph)
203
+ assert nodes_equal(sorted(G.nodes()), sorted(GG.nodes()))
204
+ assert edges_equal(sorted(G.edges()), sorted(GG.edges()))
205
+ GW = to_networkx_graph(dol, create_using=nx.MultiGraph)
206
+ assert nodes_equal(sorted(G.nodes()), sorted(GW.nodes()))
207
+ assert edges_equal(sorted(G.edges()), sorted(GW.edges()))
208
+ GI = nx.MultiGraph(dol)
209
+ assert nodes_equal(sorted(G.nodes()), sorted(GI.nodes()))
210
+ assert edges_equal(sorted(G.edges()), sorted(GI.edges()))
211
+
212
+ def test_edgelists(self):
213
+ P = nx.path_graph(4)
214
+ e = [(0, 1), (1, 2), (2, 3)]
215
+ G = nx.Graph(e)
216
+ assert nodes_equal(sorted(G.nodes()), sorted(P.nodes()))
217
+ assert edges_equal(sorted(G.edges()), sorted(P.edges()))
218
+ assert edges_equal(sorted(G.edges(data=True)), sorted(P.edges(data=True)))
219
+
220
+ e = [(0, 1, {}), (1, 2, {}), (2, 3, {})]
221
+ G = nx.Graph(e)
222
+ assert nodes_equal(sorted(G.nodes()), sorted(P.nodes()))
223
+ assert edges_equal(sorted(G.edges()), sorted(P.edges()))
224
+ assert edges_equal(sorted(G.edges(data=True)), sorted(P.edges(data=True)))
225
+
226
+ e = ((n, n + 1) for n in range(3))
227
+ G = nx.Graph(e)
228
+ assert nodes_equal(sorted(G.nodes()), sorted(P.nodes()))
229
+ assert edges_equal(sorted(G.edges()), sorted(P.edges()))
230
+ assert edges_equal(sorted(G.edges(data=True)), sorted(P.edges(data=True)))
231
+
232
+ def test_directed_to_undirected(self):
233
+ edges1 = [(0, 1), (1, 2), (2, 0)]
234
+ edges2 = [(0, 1), (1, 2), (0, 2)]
235
+ assert self.edgelists_equal(nx.Graph(nx.DiGraph(edges1)).edges(), edges1)
236
+ assert self.edgelists_equal(nx.Graph(nx.DiGraph(edges2)).edges(), edges1)
237
+ assert self.edgelists_equal(nx.MultiGraph(nx.DiGraph(edges1)).edges(), edges1)
238
+ assert self.edgelists_equal(nx.MultiGraph(nx.DiGraph(edges2)).edges(), edges1)
239
+
240
+ assert self.edgelists_equal(
241
+ nx.MultiGraph(nx.MultiDiGraph(edges1)).edges(), edges1
242
+ )
243
+ assert self.edgelists_equal(
244
+ nx.MultiGraph(nx.MultiDiGraph(edges2)).edges(), edges1
245
+ )
246
+
247
+ assert self.edgelists_equal(nx.Graph(nx.MultiDiGraph(edges1)).edges(), edges1)
248
+ assert self.edgelists_equal(nx.Graph(nx.MultiDiGraph(edges2)).edges(), edges1)
249
+
250
+ def test_attribute_dict_integrity(self):
251
+ # we must not replace dict-like graph data structures with dicts
252
+ G = nx.Graph()
253
+ G.add_nodes_from("abc")
254
+ H = to_networkx_graph(G, create_using=nx.Graph)
255
+ assert list(H.nodes) == list(G.nodes)
256
+ H = nx.DiGraph(G)
257
+ assert list(H.nodes) == list(G.nodes)
258
+
259
+ def test_to_edgelist(self):
260
+ G = nx.Graph([(1, 1)])
261
+ elist = nx.to_edgelist(G, nodelist=list(G))
262
+ assert edges_equal(G.edges(data=True), elist)
263
+
264
+ def test_custom_node_attr_dict_safekeeping(self):
265
+ class custom_dict(dict):
266
+ pass
267
+
268
+ class Custom(nx.Graph):
269
+ node_attr_dict_factory = custom_dict
270
+
271
+ g = nx.Graph()
272
+ g.add_node(1, weight=1)
273
+
274
+ h = Custom(g)
275
+ assert isinstance(g._node[1], dict)
276
+ assert isinstance(h._node[1], custom_dict)
277
+
278
+ # this raise exception
279
+ # h._node.update((n, dd.copy()) for n, dd in g.nodes.items())
280
+ # assert isinstance(h._node[1], custom_dict)
281
+
282
+
283
+ @pytest.mark.parametrize(
284
+ "edgelist",
285
+ (
286
+ # Graph with no edge data
287
+ [(0, 1), (1, 2)],
288
+ # Graph with edge data
289
+ [(0, 1, {"weight": 1.0}), (1, 2, {"weight": 2.0})],
290
+ ),
291
+ )
292
+ def test_to_dict_of_dicts_with_edgedata_param(edgelist):
293
+ G = nx.Graph()
294
+ G.add_edges_from(edgelist)
295
+ # Innermost dict value == edge_data when edge_data != None.
296
+ # In the case when G has edge data, it is overwritten
297
+ expected = {0: {1: 10}, 1: {0: 10, 2: 10}, 2: {1: 10}}
298
+ assert nx.to_dict_of_dicts(G, edge_data=10) == expected
299
+
300
+
301
+ def test_to_dict_of_dicts_with_edgedata_and_nodelist():
302
+ G = nx.path_graph(5)
303
+ nodelist = [2, 3, 4]
304
+ expected = {2: {3: 10}, 3: {2: 10, 4: 10}, 4: {3: 10}}
305
+ assert nx.to_dict_of_dicts(G, nodelist=nodelist, edge_data=10) == expected
306
+
307
+
308
+ def test_to_dict_of_dicts_with_edgedata_multigraph():
309
+ """Multi edge data overwritten when edge_data != None"""
310
+ G = nx.MultiGraph()
311
+ G.add_edge(0, 1, key="a")
312
+ G.add_edge(0, 1, key="b")
313
+ # Multi edge data lost when edge_data is not None
314
+ expected = {0: {1: 10}, 1: {0: 10}}
315
+ assert nx.to_dict_of_dicts(G, edge_data=10) == expected
316
+
317
+
318
+ def test_to_networkx_graph_non_edgelist():
319
+ invalid_edgelist = [1, 2, 3]
320
+ with pytest.raises(nx.NetworkXError, match="Input is not a valid edge list"):
321
+ nx.to_networkx_graph(invalid_edgelist)
llmeval-env/lib/python3.10/site-packages/networkx/tests/test_convert_pandas.py ADDED
@@ -0,0 +1,320 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ import networkx as nx
4
+ from networkx.utils import edges_equal, graphs_equal, nodes_equal
5
+
6
+ np = pytest.importorskip("numpy")
7
+ pd = pytest.importorskip("pandas")
8
+
9
+
10
+ class TestConvertPandas:
11
+ def setup_method(self):
12
+ self.rng = np.random.RandomState(seed=5)
13
+ ints = self.rng.randint(1, 11, size=(3, 2))
14
+ a = ["A", "B", "C"]
15
+ b = ["D", "A", "E"]
16
+ df = pd.DataFrame(ints, columns=["weight", "cost"])
17
+ df[0] = a # Column label 0 (int)
18
+ df["b"] = b # Column label 'b' (str)
19
+ self.df = df
20
+
21
+ mdf = pd.DataFrame([[4, 16, "A", "D"]], columns=["weight", "cost", 0, "b"])
22
+ self.mdf = pd.concat([df, mdf])
23
+
24
+ def test_exceptions(self):
25
+ G = pd.DataFrame(["a"]) # adj
26
+ pytest.raises(nx.NetworkXError, nx.to_networkx_graph, G)
27
+ G = pd.DataFrame(["a", 0.0]) # elist
28
+ pytest.raises(nx.NetworkXError, nx.to_networkx_graph, G)
29
+ df = pd.DataFrame([[1, 1], [1, 0]], dtype=int, index=[1, 2], columns=["a", "b"])
30
+ pytest.raises(nx.NetworkXError, nx.from_pandas_adjacency, df)
31
+
32
+ def test_from_edgelist_all_attr(self):
33
+ Gtrue = nx.Graph(
34
+ [
35
+ ("E", "C", {"cost": 9, "weight": 10}),
36
+ ("B", "A", {"cost": 1, "weight": 7}),
37
+ ("A", "D", {"cost": 7, "weight": 4}),
38
+ ]
39
+ )
40
+ G = nx.from_pandas_edgelist(self.df, 0, "b", True)
41
+ assert graphs_equal(G, Gtrue)
42
+ # MultiGraph
43
+ MGtrue = nx.MultiGraph(Gtrue)
44
+ MGtrue.add_edge("A", "D", cost=16, weight=4)
45
+ MG = nx.from_pandas_edgelist(self.mdf, 0, "b", True, nx.MultiGraph())
46
+ assert graphs_equal(MG, MGtrue)
47
+
48
+ def test_from_edgelist_multi_attr(self):
49
+ Gtrue = nx.Graph(
50
+ [
51
+ ("E", "C", {"cost": 9, "weight": 10}),
52
+ ("B", "A", {"cost": 1, "weight": 7}),
53
+ ("A", "D", {"cost": 7, "weight": 4}),
54
+ ]
55
+ )
56
+ G = nx.from_pandas_edgelist(self.df, 0, "b", ["weight", "cost"])
57
+ assert graphs_equal(G, Gtrue)
58
+
59
+ def test_from_edgelist_multi_attr_incl_target(self):
60
+ Gtrue = nx.Graph(
61
+ [
62
+ ("E", "C", {0: "C", "b": "E", "weight": 10}),
63
+ ("B", "A", {0: "B", "b": "A", "weight": 7}),
64
+ ("A", "D", {0: "A", "b": "D", "weight": 4}),
65
+ ]
66
+ )
67
+ G = nx.from_pandas_edgelist(self.df, 0, "b", [0, "b", "weight"])
68
+ assert graphs_equal(G, Gtrue)
69
+
70
+ def test_from_edgelist_multidigraph_and_edge_attr(self):
71
+ # example from issue #2374
72
+ edges = [
73
+ ("X1", "X4", {"Co": "zA", "Mi": 0, "St": "X1"}),
74
+ ("X1", "X4", {"Co": "zB", "Mi": 54, "St": "X2"}),
75
+ ("X1", "X4", {"Co": "zB", "Mi": 49, "St": "X3"}),
76
+ ("X1", "X4", {"Co": "zB", "Mi": 44, "St": "X4"}),
77
+ ("Y1", "Y3", {"Co": "zC", "Mi": 0, "St": "Y1"}),
78
+ ("Y1", "Y3", {"Co": "zC", "Mi": 34, "St": "Y2"}),
79
+ ("Y1", "Y3", {"Co": "zC", "Mi": 29, "St": "X2"}),
80
+ ("Y1", "Y3", {"Co": "zC", "Mi": 24, "St": "Y3"}),
81
+ ("Z1", "Z3", {"Co": "zD", "Mi": 0, "St": "Z1"}),
82
+ ("Z1", "Z3", {"Co": "zD", "Mi": 14, "St": "X3"}),
83
+ ]
84
+ Gtrue = nx.MultiDiGraph(edges)
85
+ data = {
86
+ "O": ["X1", "X1", "X1", "X1", "Y1", "Y1", "Y1", "Y1", "Z1", "Z1"],
87
+ "D": ["X4", "X4", "X4", "X4", "Y3", "Y3", "Y3", "Y3", "Z3", "Z3"],
88
+ "St": ["X1", "X2", "X3", "X4", "Y1", "Y2", "X2", "Y3", "Z1", "X3"],
89
+ "Co": ["zA", "zB", "zB", "zB", "zC", "zC", "zC", "zC", "zD", "zD"],
90
+ "Mi": [0, 54, 49, 44, 0, 34, 29, 24, 0, 14],
91
+ }
92
+ df = pd.DataFrame.from_dict(data)
93
+ G1 = nx.from_pandas_edgelist(
94
+ df, source="O", target="D", edge_attr=True, create_using=nx.MultiDiGraph
95
+ )
96
+ G2 = nx.from_pandas_edgelist(
97
+ df,
98
+ source="O",
99
+ target="D",
100
+ edge_attr=["St", "Co", "Mi"],
101
+ create_using=nx.MultiDiGraph,
102
+ )
103
+ assert graphs_equal(G1, Gtrue)
104
+ assert graphs_equal(G2, Gtrue)
105
+
106
+ def test_from_edgelist_one_attr(self):
107
+ Gtrue = nx.Graph(
108
+ [
109
+ ("E", "C", {"weight": 10}),
110
+ ("B", "A", {"weight": 7}),
111
+ ("A", "D", {"weight": 4}),
112
+ ]
113
+ )
114
+ G = nx.from_pandas_edgelist(self.df, 0, "b", "weight")
115
+ assert graphs_equal(G, Gtrue)
116
+
117
+ def test_from_edgelist_int_attr_name(self):
118
+ # note: this also tests that edge_attr can be `source`
119
+ Gtrue = nx.Graph(
120
+ [("E", "C", {0: "C"}), ("B", "A", {0: "B"}), ("A", "D", {0: "A"})]
121
+ )
122
+ G = nx.from_pandas_edgelist(self.df, 0, "b", 0)
123
+ assert graphs_equal(G, Gtrue)
124
+
125
+ def test_from_edgelist_invalid_attr(self):
126
+ pytest.raises(
127
+ nx.NetworkXError, nx.from_pandas_edgelist, self.df, 0, "b", "misspell"
128
+ )
129
+ pytest.raises(nx.NetworkXError, nx.from_pandas_edgelist, self.df, 0, "b", 1)
130
+ # see Issue #3562
131
+ edgeframe = pd.DataFrame([[0, 1], [1, 2], [2, 0]], columns=["s", "t"])
132
+ pytest.raises(
133
+ nx.NetworkXError, nx.from_pandas_edgelist, edgeframe, "s", "t", True
134
+ )
135
+ pytest.raises(
136
+ nx.NetworkXError, nx.from_pandas_edgelist, edgeframe, "s", "t", "weight"
137
+ )
138
+ pytest.raises(
139
+ nx.NetworkXError,
140
+ nx.from_pandas_edgelist,
141
+ edgeframe,
142
+ "s",
143
+ "t",
144
+ ["weight", "size"],
145
+ )
146
+
147
+ def test_from_edgelist_no_attr(self):
148
+ Gtrue = nx.Graph([("E", "C", {}), ("B", "A", {}), ("A", "D", {})])
149
+ G = nx.from_pandas_edgelist(self.df, 0, "b")
150
+ assert graphs_equal(G, Gtrue)
151
+
152
+ def test_from_edgelist(self):
153
+ # Pandas DataFrame
154
+ G = nx.cycle_graph(10)
155
+ G.add_weighted_edges_from((u, v, u) for u, v in list(G.edges))
156
+
157
+ edgelist = nx.to_edgelist(G)
158
+ source = [s for s, t, d in edgelist]
159
+ target = [t for s, t, d in edgelist]
160
+ weight = [d["weight"] for s, t, d in edgelist]
161
+ edges = pd.DataFrame({"source": source, "target": target, "weight": weight})
162
+
163
+ GG = nx.from_pandas_edgelist(edges, edge_attr="weight")
164
+ assert nodes_equal(G.nodes(), GG.nodes())
165
+ assert edges_equal(G.edges(), GG.edges())
166
+ GW = nx.to_networkx_graph(edges, create_using=nx.Graph)
167
+ assert nodes_equal(G.nodes(), GW.nodes())
168
+ assert edges_equal(G.edges(), GW.edges())
169
+
170
+ def test_to_edgelist_default_source_or_target_col_exists(self):
171
+ G = nx.path_graph(10)
172
+ G.add_weighted_edges_from((u, v, u) for u, v in list(G.edges))
173
+ nx.set_edge_attributes(G, 0, name="source")
174
+ pytest.raises(nx.NetworkXError, nx.to_pandas_edgelist, G)
175
+
176
+ # drop source column to test an exception raised for the target column
177
+ for u, v, d in G.edges(data=True):
178
+ d.pop("source", None)
179
+
180
+ nx.set_edge_attributes(G, 0, name="target")
181
+ pytest.raises(nx.NetworkXError, nx.to_pandas_edgelist, G)
182
+
183
+ def test_to_edgelist_custom_source_or_target_col_exists(self):
184
+ G = nx.path_graph(10)
185
+ G.add_weighted_edges_from((u, v, u) for u, v in list(G.edges))
186
+ nx.set_edge_attributes(G, 0, name="source_col_name")
187
+ pytest.raises(
188
+ nx.NetworkXError, nx.to_pandas_edgelist, G, source="source_col_name"
189
+ )
190
+
191
+ # drop source column to test an exception raised for the target column
192
+ for u, v, d in G.edges(data=True):
193
+ d.pop("source_col_name", None)
194
+
195
+ nx.set_edge_attributes(G, 0, name="target_col_name")
196
+ pytest.raises(
197
+ nx.NetworkXError, nx.to_pandas_edgelist, G, target="target_col_name"
198
+ )
199
+
200
+ def test_to_edgelist_edge_key_col_exists(self):
201
+ G = nx.path_graph(10, create_using=nx.MultiGraph)
202
+ G.add_weighted_edges_from((u, v, u) for u, v in list(G.edges()))
203
+ nx.set_edge_attributes(G, 0, name="edge_key_name")
204
+ pytest.raises(
205
+ nx.NetworkXError, nx.to_pandas_edgelist, G, edge_key="edge_key_name"
206
+ )
207
+
208
+ def test_from_adjacency(self):
209
+ nodelist = [1, 2]
210
+ dftrue = pd.DataFrame(
211
+ [[1, 1], [1, 0]], dtype=int, index=nodelist, columns=nodelist
212
+ )
213
+ G = nx.Graph([(1, 1), (1, 2)])
214
+ df = nx.to_pandas_adjacency(G, dtype=int)
215
+ pd.testing.assert_frame_equal(df, dftrue)
216
+
217
+ @pytest.mark.parametrize("graph", [nx.Graph, nx.MultiGraph])
218
+ def test_roundtrip(self, graph):
219
+ # edgelist
220
+ Gtrue = graph([(1, 1), (1, 2)])
221
+ df = nx.to_pandas_edgelist(Gtrue)
222
+ G = nx.from_pandas_edgelist(df, create_using=graph)
223
+ assert graphs_equal(Gtrue, G)
224
+ # adjacency
225
+ adj = {1: {1: {"weight": 1}, 2: {"weight": 1}}, 2: {1: {"weight": 1}}}
226
+ Gtrue = graph(adj)
227
+ df = nx.to_pandas_adjacency(Gtrue, dtype=int)
228
+ G = nx.from_pandas_adjacency(df, create_using=graph)
229
+ assert graphs_equal(Gtrue, G)
230
+
231
+ def test_from_adjacency_named(self):
232
+ # example from issue #3105
233
+ data = {
234
+ "A": {"A": 0, "B": 0, "C": 0},
235
+ "B": {"A": 1, "B": 0, "C": 0},
236
+ "C": {"A": 0, "B": 1, "C": 0},
237
+ }
238
+ dftrue = pd.DataFrame(data, dtype=np.intp)
239
+ df = dftrue[["A", "C", "B"]]
240
+ G = nx.from_pandas_adjacency(df, create_using=nx.DiGraph())
241
+ df = nx.to_pandas_adjacency(G, dtype=np.intp)
242
+ pd.testing.assert_frame_equal(df, dftrue)
243
+
244
+ def test_edgekey_with_multigraph(self):
245
+ df = pd.DataFrame(
246
+ {
247
+ "source": {"A": "N1", "B": "N2", "C": "N1", "D": "N1"},
248
+ "target": {"A": "N2", "B": "N3", "C": "N1", "D": "N2"},
249
+ "attr1": {"A": "F1", "B": "F2", "C": "F3", "D": "F4"},
250
+ "attr2": {"A": 1, "B": 0, "C": 0, "D": 0},
251
+ "attr3": {"A": 0, "B": 1, "C": 0, "D": 1},
252
+ }
253
+ )
254
+ Gtrue = nx.MultiGraph(
255
+ [
256
+ ("N1", "N2", "F1", {"attr2": 1, "attr3": 0}),
257
+ ("N2", "N3", "F2", {"attr2": 0, "attr3": 1}),
258
+ ("N1", "N1", "F3", {"attr2": 0, "attr3": 0}),
259
+ ("N1", "N2", "F4", {"attr2": 0, "attr3": 1}),
260
+ ]
261
+ )
262
+ # example from issue #4065
263
+ G = nx.from_pandas_edgelist(
264
+ df,
265
+ source="source",
266
+ target="target",
267
+ edge_attr=["attr2", "attr3"],
268
+ edge_key="attr1",
269
+ create_using=nx.MultiGraph(),
270
+ )
271
+ assert graphs_equal(G, Gtrue)
272
+
273
+ df_roundtrip = nx.to_pandas_edgelist(G, edge_key="attr1")
274
+ df_roundtrip = df_roundtrip.sort_values("attr1")
275
+ df_roundtrip.index = ["A", "B", "C", "D"]
276
+ pd.testing.assert_frame_equal(
277
+ df, df_roundtrip[["source", "target", "attr1", "attr2", "attr3"]]
278
+ )
279
+
280
+ def test_edgekey_with_normal_graph_no_action(self):
281
+ Gtrue = nx.Graph(
282
+ [
283
+ ("E", "C", {"cost": 9, "weight": 10}),
284
+ ("B", "A", {"cost": 1, "weight": 7}),
285
+ ("A", "D", {"cost": 7, "weight": 4}),
286
+ ]
287
+ )
288
+ G = nx.from_pandas_edgelist(self.df, 0, "b", True, edge_key="weight")
289
+ assert graphs_equal(G, Gtrue)
290
+
291
+ def test_nonexisting_edgekey_raises(self):
292
+ with pytest.raises(nx.exception.NetworkXError):
293
+ nx.from_pandas_edgelist(
294
+ self.df,
295
+ source="source",
296
+ target="target",
297
+ edge_key="Not_real",
298
+ edge_attr=True,
299
+ create_using=nx.MultiGraph(),
300
+ )
301
+
302
+
303
+ def test_to_pandas_adjacency_with_nodelist():
304
+ G = nx.complete_graph(5)
305
+ nodelist = [1, 4]
306
+ expected = pd.DataFrame(
307
+ [[0, 1], [1, 0]], dtype=int, index=nodelist, columns=nodelist
308
+ )
309
+ pd.testing.assert_frame_equal(
310
+ expected, nx.to_pandas_adjacency(G, nodelist, dtype=int)
311
+ )
312
+
313
+
314
+ def test_to_pandas_edgelist_with_nodelist():
315
+ G = nx.Graph()
316
+ G.add_edges_from([(0, 1), (1, 2), (1, 3)], weight=2.0)
317
+ G.add_edge(0, 5, weight=100)
318
+ df = nx.to_pandas_edgelist(G, nodelist=[1, 2])
319
+ assert 0 not in df["source"].to_numpy()
320
+ assert 100 not in df["weight"].to_numpy()