applied-ai-018 commited on
Commit
42f831d
·
verified ·
1 Parent(s): 3f11b2b

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/pytz/__pycache__/__init__.cpython-310.pyc +0 -0
  2. env-llmeval/lib/python3.10/site-packages/pytz/__pycache__/exceptions.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/pytz/__pycache__/lazy.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/pytz/__pycache__/reference.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/pytz/__pycache__/tzfile.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/pytz/__pycache__/tzinfo.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Aden +0 -0
  8. env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Beirut +0 -0
  9. env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Macao +0 -0
  10. env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Novosibirsk +0 -0
  11. env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Pontianak +0 -0
  12. env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Antananarivo +0 -0
  13. env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Chagos +0 -0
  14. env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Christmas +0 -0
  15. env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Cocos +0 -0
  16. env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Comoro +0 -0
  17. env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Kerguelen +0 -0
  18. env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Mahe +0 -0
  19. env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Maldives +0 -0
  20. env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Mauritius +0 -0
  21. env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Mayotte +0 -0
  22. env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Reunion +0 -0
  23. env-llmeval/lib/python3.10/site-packages/sacrebleu-2.4.2.dist-info/INSTALLER +1 -0
  24. env-llmeval/lib/python3.10/site-packages/sacrebleu-2.4.2.dist-info/LICENSE.txt +201 -0
  25. env-llmeval/lib/python3.10/site-packages/sacrebleu-2.4.2.dist-info/METADATA +1124 -0
  26. env-llmeval/lib/python3.10/site-packages/sacrebleu-2.4.2.dist-info/RECORD +75 -0
  27. env-llmeval/lib/python3.10/site-packages/sacrebleu-2.4.2.dist-info/WHEEL +5 -0
  28. env-llmeval/lib/python3.10/site-packages/sacrebleu-2.4.2.dist-info/entry_points.txt +2 -0
  29. env-llmeval/lib/python3.10/site-packages/sacrebleu-2.4.2.dist-info/top_level.txt +1 -0
  30. env-llmeval/lib/python3.10/site-packages/urllib3-2.2.1.dist-info/INSTALLER +1 -0
  31. env-llmeval/lib/python3.10/site-packages/urllib3-2.2.1.dist-info/METADATA +154 -0
  32. env-llmeval/lib/python3.10/site-packages/urllib3-2.2.1.dist-info/RECORD +75 -0
  33. env-llmeval/lib/python3.10/site-packages/urllib3-2.2.1.dist-info/WHEEL +4 -0
  34. env-llmeval/lib/python3.10/site-packages/urllib3-2.2.1.dist-info/licenses/LICENSE.txt +21 -0
  35. env-llmeval/lib/python3.10/site-packages/urllib3/__init__.py +211 -0
  36. env-llmeval/lib/python3.10/site-packages/urllib3/__pycache__/__init__.cpython-310.pyc +0 -0
  37. env-llmeval/lib/python3.10/site-packages/urllib3/__pycache__/_base_connection.cpython-310.pyc +0 -0
  38. env-llmeval/lib/python3.10/site-packages/urllib3/__pycache__/_collections.cpython-310.pyc +0 -0
  39. env-llmeval/lib/python3.10/site-packages/urllib3/__pycache__/_request_methods.cpython-310.pyc +0 -0
  40. env-llmeval/lib/python3.10/site-packages/urllib3/__pycache__/_version.cpython-310.pyc +0 -0
  41. env-llmeval/lib/python3.10/site-packages/urllib3/__pycache__/connection.cpython-310.pyc +0 -0
  42. env-llmeval/lib/python3.10/site-packages/urllib3/__pycache__/connectionpool.cpython-310.pyc +0 -0
  43. env-llmeval/lib/python3.10/site-packages/urllib3/__pycache__/exceptions.cpython-310.pyc +0 -0
  44. env-llmeval/lib/python3.10/site-packages/urllib3/__pycache__/fields.cpython-310.pyc +0 -0
  45. env-llmeval/lib/python3.10/site-packages/urllib3/__pycache__/filepost.cpython-310.pyc +0 -0
  46. env-llmeval/lib/python3.10/site-packages/urllib3/__pycache__/http2.cpython-310.pyc +0 -0
  47. env-llmeval/lib/python3.10/site-packages/urllib3/__pycache__/poolmanager.cpython-310.pyc +0 -0
  48. env-llmeval/lib/python3.10/site-packages/urllib3/__pycache__/response.cpython-310.pyc +0 -0
  49. env-llmeval/lib/python3.10/site-packages/urllib3/_base_connection.py +172 -0
  50. env-llmeval/lib/python3.10/site-packages/urllib3/_collections.py +483 -0
env-llmeval/lib/python3.10/site-packages/pytz/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (26.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pytz/__pycache__/exceptions.cpython-310.pyc ADDED
Binary file (2.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pytz/__pycache__/lazy.cpython-310.pyc ADDED
Binary file (4.72 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pytz/__pycache__/reference.cpython-310.pyc ADDED
Binary file (3.85 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pytz/__pycache__/tzfile.cpython-310.pyc ADDED
Binary file (3.14 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pytz/__pycache__/tzinfo.cpython-310.pyc ADDED
Binary file (14.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Aden ADDED
Binary file (151 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Beirut ADDED
Binary file (2.15 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Macao ADDED
Binary file (1.23 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Novosibirsk ADDED
Binary file (1.21 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Pontianak ADDED
Binary file (353 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Antananarivo ADDED
Binary file (265 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Chagos ADDED
Binary file (185 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Christmas ADDED
Binary file (185 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Cocos ADDED
Binary file (254 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Comoro ADDED
Binary file (265 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Kerguelen ADDED
Binary file (185 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Mahe ADDED
Binary file (151 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Maldives ADDED
Binary file (185 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Mauritius ADDED
Binary file (227 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Mayotte ADDED
Binary file (265 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Reunion ADDED
Binary file (151 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/sacrebleu-2.4.2.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
env-llmeval/lib/python3.10/site-packages/sacrebleu-2.4.2.dist-info/LICENSE.txt ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
env-llmeval/lib/python3.10/site-packages/sacrebleu-2.4.2.dist-info/METADATA ADDED
@@ -0,0 +1,1124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: sacrebleu
3
+ Version: 2.4.2
4
+ Summary: Hassle-free computation of shareable, comparable, and reproducible BLEU, chrF, and TER scores
5
+ Home-page: https://github.com/mjpost/sacrebleu
6
+ Author: Matt Post
7
+ Author-email: [email protected]
8
+ Maintainer-email: [email protected]
9
+ License: Apache License 2.0
10
+ Keywords: machine translation, evaluation, NLP, natural language processing, computational linguistics
11
+ Classifier: Development Status :: 5 - Production/Stable
12
+ Classifier: Intended Audience :: Developers
13
+ Classifier: Intended Audience :: Science/Research
14
+ Classifier: Topic :: Scientific/Engineering
15
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
16
+ Classifier: Topic :: Text Processing
17
+ Classifier: License :: OSI Approved :: Apache Software License
18
+ Classifier: Operating System :: POSIX
19
+ Classifier: Operating System :: MacOS :: MacOS X
20
+ Classifier: Operating System :: Microsoft :: Windows
21
+ Classifier: Programming Language :: Python :: 3 :: Only
22
+ Classifier: Typing :: Typed
23
+ Requires-Python: >=3.6
24
+ Description-Content-Type: text/markdown
25
+ License-File: LICENSE.txt
26
+ Requires-Dist: portalocker
27
+ Requires-Dist: regex
28
+ Requires-Dist: tabulate (>=0.8.9)
29
+ Requires-Dist: numpy (>=1.17)
30
+ Requires-Dist: colorama
31
+ Requires-Dist: lxml
32
+ Provides-Extra: dev
33
+ Requires-Dist: wheel ; extra == 'dev'
34
+ Requires-Dist: pytest ; extra == 'dev'
35
+ Requires-Dist: mypy ; extra == 'dev'
36
+ Requires-Dist: types-tabulate ; extra == 'dev'
37
+ Requires-Dist: lxml-stubs ; extra == 'dev'
38
+ Provides-Extra: ja
39
+ Requires-Dist: mecab-python3 (<=1.0.6,>=1.0.5) ; extra == 'ja'
40
+ Requires-Dist: ipadic (<2.0,>=1.0) ; extra == 'ja'
41
+ Provides-Extra: ko
42
+ Requires-Dist: mecab-ko (<=1.0.1,>=1.0.0) ; extra == 'ko'
43
+ Requires-Dist: mecab-ko-dic (<2.0,>=1.0) ; extra == 'ko'
44
+
45
+ # sacreBLEU
46
+
47
+ [![PyPI version](https://img.shields.io/pypi/v/sacrebleu)](https://img.shields.io/pypi/v/sacrebleu)
48
+ [![Python version](https://img.shields.io/pypi/pyversions/sacrebleu)](https://img.shields.io/pypi/pyversions/sacrebleu)
49
+ [![GitHub issues](https://img.shields.io/github/issues/mjpost/sacreBLEU.svg)](https://github.com/mjpost/sacrebleu/issues)
50
+
51
+ SacreBLEU ([Post, 2018](http://aclweb.org/anthology/W18-6319)) provides hassle-free computation of shareable, comparable, and reproducible **BLEU** scores.
52
+ Inspired by Rico Sennrich's `multi-bleu-detok.perl`, it produces the official WMT scores but works with plain text.
53
+ It also knows all the standard test sets and handles downloading, processing, and tokenization for you.
54
+
55
+ The official version is hosted at <https://github.com/mjpost/sacrebleu>.
56
+
57
+ # Motivation
58
+
59
+ Comparing BLEU scores is harder than it should be. Every decoder has its own implementation, often borrowed from Moses, but maybe with subtle changes.
60
+ Moses itself has a number of implementations as standalone scripts, with little indication of how they differ (note: they mostly don't, but `multi-bleu.pl` expects tokenized input). Different flags passed to each of these scripts can produce wide swings in the final score. All of these may handle tokenization in different ways. On top of this, downloading and managing test sets is a moderate annoyance.
61
+
62
+ Sacre bleu! What a mess.
63
+
64
+ **SacreBLEU** aims to solve these problems by wrapping the original reference implementation ([Papineni et al., 2002](https://www.aclweb.org/anthology/P02-1040.pdf)) together with other useful features.
65
+ The defaults are set the way that BLEU should be computed, and furthermore, the script outputs a short version string that allows others to know exactly what you did.
66
+ As an added bonus, it automatically downloads and manages test sets for you, so that you can simply tell it to score against `wmt14`, without having to hunt down a path on your local file system.
67
+ It is all designed to take BLEU a little more seriously.
68
+ After all, even with all its problems, BLEU is the default and---admit it---well-loved metric of our entire research community.
69
+ Sacre BLEU.
70
+
71
+ # Features
72
+
73
+ - It automatically downloads common WMT test sets and processes them to plain text
74
+ - It produces a short version string that facilitates cross-paper comparisons
75
+ - It properly computes scores on detokenized outputs, using WMT ([Conference on Machine Translation](http://statmt.org/wmt17)) standard tokenization
76
+ - It produces the same values as the official script (`mteval-v13a.pl`) used by WMT
77
+ - It outputs the BLEU score without the comma, so you don't have to remove it with `sed` (Looking at you, `multi-bleu.perl`)
78
+ - It supports different tokenizers for BLEU including support for Japanese and Chinese
79
+ - It supports **chrF, chrF++** and **Translation error rate (TER)** metrics
80
+ - It performs paired bootstrap resampling and paired approximate randomization tests for statistical significance reporting
81
+
82
+ # Breaking Changes
83
+
84
+ ## v2.0.0
85
+
86
+ As of v2.0.0, the default output format is changed to `json` for less painful parsing experience. This means that software that parse the output of sacreBLEU should be modified to either (i) parse the JSON using for example the `jq` utility or (ii) pass `-f text` to sacreBLEU to preserve the old textual output. The latter change can also be made **persistently** by exporting `SACREBLEU_FORMAT=text` in relevant shell configuration files.
87
+
88
+ Here's an example of parsing the `score` key of the JSON output using `jq`:
89
+
90
+ ```
91
+ $ sacrebleu -i output.detok.txt -t wmt17 -l en-de | jq -r .score
92
+ 20.8
93
+ ```
94
+
95
+ # Installation
96
+
97
+ Install the official Python module from PyPI (**Python>=3.6 only**):
98
+
99
+ pip install sacrebleu
100
+
101
+ In order to install Japanese tokenizer support through `mecab-python3`, you need to run the
102
+ following command instead, to perform a full installation with dependencies:
103
+
104
+ pip install "sacrebleu[ja]"
105
+
106
+ In order to install Korean tokenizer support through `pymecab-ko`, you need to run the
107
+ following command instead, to perform a full installation with dependencies:
108
+
109
+ pip install "sacrebleu[ko]"
110
+
111
+ # Command-line Usage
112
+
113
+ You can get a list of available test sets with `sacrebleu --list`. Please see [DATASETS.md](DATASETS.md)
114
+ for an up-to-date list of supported datasets. You can also list available test sets for a given language pair
115
+ with `sacrebleu --list -l en-fr`.
116
+
117
+ ## Basics
118
+
119
+ ### Downloading test sets
120
+
121
+ Downloading is triggered when you request a test set. If the dataset is not available, it is downloaded
122
+ and unpacked.
123
+
124
+ E.g., you can use the following commands to download the source, pass it through your translation system
125
+ in `translate.sh`, and then score it:
126
+
127
+ ```
128
+ $ sacrebleu -t wmt17 -l en-de --echo src > wmt17.en-de.en
129
+ $ cat wmt17.en-de.en | translate.sh | sacrebleu -t wmt17 -l en-de
130
+ ```
131
+
132
+ Some test sets also have the outputs of systems that were submitted to the task.
133
+ For example, the `wmt/systems` test set.
134
+
135
+ ```bash
136
+ $ sacrebleu -t wmt21/systems -l zh-en --echo NiuTrans
137
+ ```
138
+
139
+ This provides a convenient way to score:
140
+
141
+ ```bash
142
+ $ sacrebleu -t wmt21/system -l zh-en --echo NiuTrans | sacrebleu -t wmt21/systems -l zh-en
143
+ ``
144
+
145
+ You can see a list of the available outputs by passing an invalid value to `--echo`.
146
+
147
+ ### JSON output
148
+
149
+ As of version `>=2.0.0`, sacreBLEU prints the computed scores in JSON format to make parsing less painful:
150
+
151
+ ```
152
+ $ sacrebleu -i output.detok.txt -t wmt17 -l en-de
153
+ ```
154
+
155
+ ```json
156
+ {
157
+ "name": "BLEU",
158
+ "score": 20.8,
159
+ "signature": "nrefs:1|case:mixed|eff:no|tok:13a|smooth:exp|version:2.0.0",
160
+ "verbose_score": "54.4/26.6/14.9/8.7 (BP = 1.000 ratio = 1.026 hyp_len = 62880 ref_len = 61287)",
161
+ "nrefs": "1",
162
+ "case": "mixed",
163
+ "eff": "no",
164
+ "tok": "13a",
165
+ "smooth": "exp",
166
+ "version": "2.0.0"
167
+ }
168
+ ```
169
+
170
+ If you want to keep the old behavior, you can pass `-f text` or export `SACREBLEU_FORMAT=text`:
171
+
172
+ ```
173
+ $ sacrebleu -i output.detok.txt -t wmt17 -l en-de -f text
174
+ BLEU|nrefs:1|case:mixed|eff:no|tok:13a|smooth:exp|version:2.0.0 = 20.8 54.4/26.6/14.9/8.7 (BP = 1.000 ratio = 1.026 hyp_len = 62880 ref_len = 61287)
175
+ ```
176
+
177
+ ### Scoring
178
+
179
+ (All examples below assume old-style text output for a compact representation that save space)
180
+
181
+ Let's say that you just translated the `en-de` test set of WMT17 with your fancy MT system and the **detokenized** translations are in a file called `output.detok.txt`:
182
+
183
+ ```
184
+ # Option 1: Redirect system output to STDIN
185
+ $ cat output.detok.txt | sacrebleu -t wmt17 -l en-de
186
+ BLEU|nrefs:1|case:mixed|eff:no|tok:13a|smooth:exp|version:2.0.0 = 20.8 54.4/26.6/14.9/8.7 (BP = 1.000 ratio = 1.026 hyp_len = 62880 ref_len = 61287)
187
+
188
+ # Option 2: Use the --input/-i argument
189
+ $ sacrebleu -t wmt17 -l en-de -i output.detok.txt
190
+ BLEU|nrefs:1|case:mixed|eff:no|tok:13a|smooth:exp|version:2.0.0 = 20.8 54.4/26.6/14.9/8.7 (BP = 1.000 ratio = 1.026 hyp_len = 62880 ref_len = 61287)
191
+ ```
192
+
193
+ You can obtain a short version of the signature with `--short/-sh`:
194
+
195
+ ```
196
+ $ sacrebleu -t wmt17 -l en-de -i output.detok.txt -sh
197
+ BLEU|#:1|c:mixed|e:no|tok:13a|s:exp|v:2.0.0 = 20.8 54.4/26.6/14.9/8.7 (BP = 1.000 ratio = 1.026 hyp_len = 62880 ref_len = 61287)
198
+ ```
199
+
200
+ If you only want the score to be printed, you can use the `--score-only/-b` flag:
201
+
202
+ ```
203
+ $ sacrebleu -t wmt17 -l en-de -i output.detok.txt -b
204
+ 20.8
205
+ ```
206
+
207
+ The precision of the scores can be configured via the `--width/-w` flag:
208
+
209
+ ```
210
+ $ sacrebleu -t wmt17 -l en-de -i output.detok.txt -b -w 4
211
+ 20.7965
212
+ ```
213
+
214
+ ### Using your own reference file
215
+
216
+ SacreBLEU knows about common test sets (as detailed in the `--list` example above), but you can also use it to score system outputs with arbitrary references. In this case, do not forget to provide **detokenized** reference and hypotheses files:
217
+
218
+ ```
219
+ # Let's save the reference to a text file
220
+ $ sacrebleu -t wmt17 -l en-de --echo ref > ref.detok.txt
221
+
222
+ # Option 1: Pass the reference file as a positional argument to sacreBLEU
223
+ $ sacrebleu ref.detok.txt -i output.detok.txt -m bleu -b -w 4
224
+ 20.7965
225
+
226
+ # Option 2: Redirect the system into STDIN (Compatible with multi-bleu.perl way of doing things)
227
+ $ cat output.detok.txt | sacrebleu ref.detok.txt -m bleu -b -w 4
228
+ 20.7965
229
+ ```
230
+
231
+ ### Using multiple metrics
232
+
233
+ Let's first compute BLEU, chrF and TER with the default settings:
234
+
235
+ ```
236
+ $ sacrebleu -t wmt17 -l en-de -i output.detok.txt -m bleu chrf ter
237
+ BLEU|nrefs:1|case:mixed|eff:no|tok:13a|smooth:exp|version:2.0.0 = 20.8 <stripped>
238
+ chrF2|nrefs:1|case:mixed|eff:yes|nc:6|nw:0|space:no|version:2.0.0 = 52.0
239
+ TER|nrefs:1|case:lc|tok:tercom|norm:no|punct:yes|asian:no|version:2.0.0 = 69.0
240
+ ```
241
+
242
+ Let's now enable `chrF++` which is a revised version of chrF that takes into account word n-grams.
243
+ Observe how the `nw:0` gets changed into `nw:2` in the signature:
244
+
245
+ ```
246
+ $ sacrebleu -t wmt17 -l en-de -i output.detok.txt -m bleu chrf ter --chrf-word-order 2
247
+ BLEU|nrefs:1|case:mixed|eff:no|tok:13a|smooth:exp|version:2.0.0 = 20.8 <stripped>
248
+ chrF2++|nrefs:1|case:mixed|eff:yes|nc:6|nw:2|space:no|version:2.0.0 = 49.0
249
+ TER|nrefs:1|case:lc|tok:tercom|norm:no|punct:yes|asian:no|version:2.0.0 = 69.0
250
+ ```
251
+
252
+ Metric-specific arguments are detailed in the output of `--help`:
253
+
254
+ ```
255
+ BLEU related arguments:
256
+ --smooth-method {none,floor,add-k,exp}, -s {none,floor,add-k,exp}
257
+ Smoothing method: exponential decay, floor (increment zero counts), add-k (increment num/denom by k for n>1), or none. (Default: exp)
258
+ --smooth-value BLEU_SMOOTH_VALUE, -sv BLEU_SMOOTH_VALUE
259
+ The smoothing value. Only valid for floor and add-k. (Defaults: floor: 0.1, add-k: 1)
260
+ --tokenize {none,zh,13a,char,intl,ja-mecab,ko-mecab}, -tok {none,zh,13a,char,intl,ja-mecab,ko-mecab}
261
+ Tokenization method to use for BLEU. If not provided, defaults to `zh` for Chinese, `ja-mecab` for Japanese, `ko-mecab` for Korean and `13a` (mteval) otherwise.
262
+ --lowercase, -lc If True, enables case-insensitivity. (Default: False)
263
+ --force Insist that your tokenized input is actually detokenized.
264
+
265
+ chrF related arguments:
266
+ --chrf-char-order CHRF_CHAR_ORDER, -cc CHRF_CHAR_ORDER
267
+ Character n-gram order. (Default: 6)
268
+ --chrf-word-order CHRF_WORD_ORDER, -cw CHRF_WORD_ORDER
269
+ Word n-gram order (Default: 0). If equals to 2, the metric is referred to as chrF++.
270
+ --chrf-beta CHRF_BETA
271
+ Determine the importance of recall w.r.t precision. (Default: 2)
272
+ --chrf-whitespace Include whitespaces when extracting character n-grams. (Default: False)
273
+ --chrf-lowercase Enable case-insensitivity. (Default: False)
274
+ --chrf-eps-smoothing Enables epsilon smoothing similar to chrF++.py, NLTK and Moses; instead of effective order smoothing. (Default: False)
275
+
276
+ TER related arguments (The defaults replicate TERCOM's behavior):
277
+ --ter-case-sensitive Enables case sensitivity (Default: False)
278
+ --ter-asian-support Enables special treatment of Asian characters (Default: False)
279
+ --ter-no-punct Removes punctuation. (Default: False)
280
+ --ter-normalized Applies basic normalization and tokenization. (Default: False)
281
+ ```
282
+
283
+ ### Version Signatures
284
+ As you may have noticed, sacreBLEU generates version strings such as `BLEU|nrefs:1|case:mixed|eff:no|tok:13a|smooth:exp|version:2.0.0` for reproducibility reasons. It's strongly recommended to share these signatures in your papers!
285
+
286
+ ### Outputting other metadata
287
+
288
+ Sacrebleu knows about metadata for some test sets, and you can output it like this:
289
+
290
+ ```
291
+ $ sacrebleu -t wmt21 -l en-de --echo src docid ref | head 2
292
+ Couple MACED at California dog park for not wearing face masks while having lunch (VIDEO) - RT USA News rt.com.131279 Paar in Hundepark in Kalifornien mit Pfefferspray besprüht, weil es beim Mittagessen keine Masken trug (VIDEO) - RT USA News
293
+ There's mask-shaming and then there's full on assault. rt.com.131279 Masken-Shaming ist eine Sache, Körperverletzung eine andere.
294
+ ```
295
+
296
+ If multiple fields are requested, they are output as tab-separated columns (a TSV).
297
+
298
+ To see the available fields, add `--echo asdf` (or some other garbage data):
299
+
300
+ ```
301
+ $ sacrebleu -t wmt21 -l en-de --echo asdf
302
+ sacreBLEU: No such field asdf in test set wmt21 for language pair en-de.
303
+ sacreBLEU: available fields for wmt21/en-de: src, ref:A, ref, docid, origlang
304
+ ```
305
+
306
+ ## Translationese Support
307
+
308
+ If you are interested in the translationese effect, you can evaluate BLEU on a subset of sentences
309
+ with a given original language (identified based on the `origlang` tag in the raw SGM files).
310
+ E.g., to evaluate only against originally German sentences translated to English use:
311
+
312
+ $ sacrebleu -t wmt13 -l de-en --origlang=de -i my-wmt13-output.txt
313
+
314
+ and to evaluate against the complement (in this case `origlang` en, fr, cs, ru, de) use:
315
+
316
+ $ sacrebleu -t wmt13 -l de-en --origlang=non-de -i my-wmt13-output.txt
317
+
318
+ **Please note** that the evaluator will return a BLEU score only on the requested subset,
319
+ but it expects that you pass through the entire translated test set.
320
+
321
+ ## Languages & Preprocessing
322
+
323
+ ### BLEU
324
+
325
+ - You can compute case-insensitive BLEU by passing `--lowercase` to sacreBLEU
326
+ - The default tokenizer for BLEU is `13a` which mimics the `mteval-v13a` script from Moses.
327
+ - Other tokenizers are:
328
+ - `none` which will not apply any kind of tokenization at all
329
+ - `char` for language-agnostic character-level tokenization
330
+ - `intl` applies international tokenization and mimics the `mteval-v14` script from Moses
331
+ - `zh` separates out **Chinese** characters and tokenizes the non-Chinese parts using `13a` tokenizer
332
+ - `ja-mecab` tokenizes **Japanese** inputs using the [MeCab](https://pypi.org/project/mecab-python3) morphological analyzer
333
+ - `ko-mecab` tokenizes **Korean** inputs using the [MeCab-ko](https://pypi.org/project/mecab-ko) morphological analyzer
334
+ - `flores101` and `flores200` uses the SentencePiece model built from the Flores-101 and [Flores-200](https://github.com/facebookresearch/flores/blob/main/flores200/README.md#languages-in-flores-200) dataset, respectively. Note: the canonical .spm file will be automatically fetched if not found locally.
335
+ - You can switch tokenizers using the `--tokenize` flag of sacreBLEU. Alternatively, if you provide language-pair strings
336
+ using `--language-pair/-l`, `zh`, `ja-mecab` and `ko-mecab` tokenizers will be used if the target language is `zh` or `ja` or `ko`, respectively.
337
+ - **Note that** there's no automatic language detection from the hypotheses so you need to make sure that you are correctly
338
+ selecting the tokenizer for **Japanese**, **Korean** and **Chinese**.
339
+
340
+
341
+ Default 13a tokenizer will produce poor results for Japanese:
342
+
343
+ ```
344
+ $ sacrebleu kyoto-test.ref.ja -i kyoto-test.hyp.ja -b
345
+ 2.1
346
+ ```
347
+
348
+ Let's use the `ja-mecab` tokenizer:
349
+ ```
350
+ $ sacrebleu kyoto-test.ref.ja -i kyoto-test.hyp.ja --tokenize ja-mecab -b
351
+ 14.5
352
+ ```
353
+
354
+ If you provide the language-pair, sacreBLEU will use ja-mecab automatically:
355
+
356
+ ```
357
+ $ sacrebleu kyoto-test.ref.ja -i kyoto-test.hyp.ja -l en-ja -b
358
+ 14.5
359
+ ```
360
+
361
+ ### chrF / chrF++
362
+
363
+ chrF applies minimum to none pre-processing as it deals with character n-grams:
364
+
365
+ - If you pass `--chrf-whitespace`, whitespace characters will be preserved when computing character n-grams.
366
+ - If you pass `--chrf-lowercase`, sacreBLEU will compute case-insensitive chrF.
367
+ - If you enable non-zero `--chrf-word-order` (pass `2` for `chrF++`), a very simple punctuation tokenization will be internally applied.
368
+
369
+
370
+ ### TER
371
+
372
+ Translation Error Rate (TER) has its own special tokenizer that you can configure through the command line.
373
+ The defaults provided are **compatible with the upstream TER implementation (TERCOM)** but you can nevertheless modify the
374
+ behavior through the command-line:
375
+
376
+ - TER is by default case-insensitive. Pass `--ter-case-sensitive` to enable case-sensitivity.
377
+ - Pass `--ter-normalize` to apply a general Western tokenization
378
+ - Pass `--ter-asian-support` to enable the tokenization of Asian characters. If provided with `--ter-normalize`,
379
+ both will be applied.
380
+ - Pass `--ter-no-punct` to strip punctuation.
381
+
382
+ ## Multi-reference Evaluation
383
+
384
+ All three metrics support the use of multiple references during evaluation. Let's first pass all references as positional arguments:
385
+
386
+ ```
387
+ $ sacrebleu ref1 ref2 -i system -m bleu chrf ter
388
+ BLEU|nrefs:2|case:mixed|eff:no|tok:13a|smooth:exp|version:2.0.0 = 61.8 <stripped>
389
+ chrF2|nrefs:2|case:mixed|eff:yes|nc:6|nw:0|space:no|version:2.0.0 = 75.0
390
+ TER|nrefs:2|case:lc|tok:tercom|norm:no|punct:yes|asian:no|version:2.0.0 = 31.2
391
+ ```
392
+
393
+ Alternatively (less recommended), we can concatenate references using tabs as delimiters as well. Don't forget to pass `--num-refs/-nr` in this case!
394
+
395
+ ```
396
+ $ paste ref1 ref2 > refs.tsv
397
+
398
+ $ sacrebleu refs.tsv --num-refs 2 -i system -m bleu
399
+ BLEU|nrefs:2|case:mixed|eff:no|tok:13a|smooth:exp|version:2.0.0 = 61.8 <stripped>
400
+ ```
401
+
402
+ ## Multi-system Evaluation
403
+ As of version `>=2.0.0`, SacreBLEU supports evaluation of an arbitrary number of systems for a particular
404
+ test set and language-pair. This has the advantage of seeing all results in a
405
+ nicely formatted table.
406
+
407
+ Let's pass all system output files that match the shell glob `newstest2017.online-*` to sacreBLEU for evaluation:
408
+
409
+ ```
410
+ $ sacrebleu -t wmt17 -l en-de -i newstest2017.online-* -m bleu chrf
411
+ ╒═══════════════════════════════╤════════╤═════════╕
412
+ │ System │ BLEU │ chrF2 │
413
+ ╞═══════════════════════════════╪════════╪═════════╡
414
+ │ newstest2017.online-A.0.en-de │ 20.8 │ 52.0 │
415
+ ├───────────────────────────────┼────────┼─────────┤
416
+ │ newstest2017.online-B.0.en-de │ 26.7 │ 56.3 │
417
+ ├───────────────────────────────┼────────┼─────────┤
418
+ │ newstest2017.online-F.0.en-de │ 15.5 │ 49.3 │
419
+ ├───────────────────────────────┼────────┼─────────┤
420
+ │ newstest2017.online-G.0.en-de │ 18.2 │ 51.6 │
421
+ ╘═══════════════════════════════╧════════╧═════════╛
422
+
423
+ -----------------
424
+ Metric signatures
425
+ -----------------
426
+ - BLEU nrefs:1|case:mixed|eff:no|tok:13a|smooth:exp|version:2.0.0
427
+ - chrF2 nrefs:1|case:mixed|eff:yes|nc:6|nw:0|space:no|version:2.0.0
428
+ ```
429
+
430
+ You can also change the output format to `latex`:
431
+
432
+ ```
433
+ $ sacrebleu -t wmt17 -l en-de -i newstest2017.online-* -m bleu chrf -f latex
434
+ \begin{tabular}{rcc}
435
+ \toprule
436
+ System & BLEU & chrF2 \\
437
+ \midrule
438
+ newstest2017.online-A.0.en-de & 20.8 & 52.0 \\
439
+ newstest2017.online-B.0.en-de & 26.7 & 56.3 \\
440
+ newstest2017.online-F.0.en-de & 15.5 & 49.3 \\
441
+ newstest2017.online-G.0.en-de & 18.2 & 51.6 \\
442
+ \bottomrule
443
+ \end{tabular}
444
+
445
+ ...
446
+ ```
447
+
448
+ ## Confidence Intervals for Single System Evaluation
449
+
450
+ When enabled with the `--confidence` flag, SacreBLEU will print
451
+ (1) the actual system score, (2) the true mean estimated from bootstrap resampling and (3),
452
+ the 95% [confidence interval](https://en.wikipedia.org/wiki/Confidence_interval) around the mean.
453
+ By default, the number of bootstrap resamples is 1000 (`bs:1000` in the signature)
454
+ and can be changed with `--confidence-n`:
455
+
456
+ ```
457
+ $ sacrebleu -t wmt17 -l en-de -i output.detok.txt -m bleu chrf --confidence -f text --short
458
+ BLEU|#:1|bs:1000|rs:12345|c:mixed|e:no|tok:13a|s:exp|v:2.0.0 = 22.675 (μ = 22.669 ± 0.598) ...
459
+ chrF2|#:1|bs:1000|rs:12345|c:mixed|e:yes|nc:6|nw:0|s:no|v:2.0.0 = 51.953 (μ = 51.953 ± 0.462)
460
+ ```
461
+
462
+ **NOTE:** Although provided as a functionality, having access to confidence intervals for just one system
463
+ may not reveal much information about the underlying model. It often makes more sense to perform
464
+ **paired statistical tests** across multiple systems.
465
+
466
+ **NOTE:** When resampling, the seed of the `numpy`'s random number generator (RNG)
467
+ is fixed to `12345`. If you want to relax this and set your own seed, you can
468
+ export the environment variable `SACREBLEU_SEED` to an integer. Alternatively, you can export
469
+ `SACREBLEU_SEED=None` to skip initializing the RNG's seed and allow for non-deterministic
470
+ behavior.
471
+
472
+ ## Paired Significance Tests for Multi System Evaluation
473
+ Ideally, one would have access to many systems in cases such as (1) investigating
474
+ whether a newly added feature yields significantly different scores than the baseline or
475
+ (2) evaluating submissions for a particular shared task. SacreBLEU offers two different paired significance tests that are widely used in MT research.
476
+
477
+ ### Paired bootstrap resampling (--paired-bs)
478
+
479
+ This is an efficient implementation of the paper [Statistical Significance Tests for Machine Translation Evaluation](https://www.aclweb.org/anthology/W04-3250.pdf) and is result-compliant with the [reference Moses implementation](https://github.com/moses-smt/mosesdecoder/blob/master/scripts/analysis/bootstrap-hypothesis-difference-significance.pl). The number of bootstrap resamples can be changed with the `--paired-bs-n` flag and its default is 1000.
480
+
481
+ When launched, paired bootstrap resampling will perform:
482
+ - Bootstrap resampling to estimate 95% CI for all systems and the baseline
483
+ - A significance test between the **baseline** and each **system** to compute a [p-value](https://en.wikipedia.org/wiki/P-value).
484
+
485
+ ### Paired approximate randomization (--paired-ar)
486
+
487
+ Paired approximate randomization (AR) is another type of paired significance test that is claimed to be more accurate than paired bootstrap resampling when it comes to Type-I errors ([Riezler and Maxwell III, 2005](https://www.aclweb.org/anthology/W05-0908.pdf)). Type-I errors indicate failures to reject the null hypothesis when it is true. In other words, AR should in theory be more robust to subtle changes across systems.
488
+
489
+ Our implementation is verified to be result-compliant with the [Multeval toolkit](https://github.com/jhclark/multeval) that also uses paired AR test for pairwise comparison. The number of approximate randomization trials is set to 10,000 by default. This can be changed with the `--paired-ar-n` flag.
490
+
491
+ ### Running the tests
492
+
493
+ - The **first system** provided to `--input/-i` will be automatically taken as the **baseline system** against which you want to compare **other systems.**
494
+ - When `--input/-i` is used, the system output files will be automatically named according to the file paths. For the sake of simplicity, SacreBLEU will automatically discard the **baseline system** if it also appears amongst **other systems**. This is useful if you would like to run the tool by passing `-i systems/baseline.txt systems/*.txt`. Here, the `baseline.txt` file will not be also considered as a candidate system.
495
+ - Alternatively, you can also use a tab-separated input file redirected to SacreBLEU. In this case, the first column hypotheses will be taken as the **baseline system**. However, this method is **not recommended** as it won't allow naming your systems in a human-readable way. It will instead enumerate the systems from 1 to N following the column order in the tab-separated input.
496
+ - On Linux and Mac OS X, you can launch the tests on multiple CPU's by passing the flag `--paired-jobs N`. If `N == 0`, SacreBLEU will launch one worker for each pairwise comparison. If `N > 0`, `N` worker processes will be spawned. This feature will substantially speed up the runtime especially if you want the **TER** metric to be computed.
497
+
498
+ #### Example: Paired bootstrap resampling
499
+ In the example below, we select `newstest2017.LIUM-NMT.4900.en-de` as the baseline and compare it to 4 other WMT17 submissions using paired bootstrap resampling. According to the results, the null hypothesis (i.e. the two systems being essentially the same) could not be rejected (at the significance level of 0.05) for the following comparisons:
500
+
501
+ - 0.1 BLEU difference between the baseline and the online-B system (p = 0.3077)
502
+
503
+ ```
504
+ $ sacrebleu -t wmt17 -l en-de -i newstest2017.LIUM-NMT.4900.en-de newstest2017.online-* -m bleu chrf --paired-bs
505
+ ╒════════════════════════════════════════════╤═════════════════════╤══════════════════════╕
506
+ │ System │ BLEU (μ ± 95% CI) │ chrF2 (μ ± 95% CI) │
507
+ ╞════════════════════════════════════════════╪═════════════════════╪══════════════════════╡
508
+ │ Baseline: newstest2017.LIUM-NMT.4900.en-de │ 26.6 (26.6 ± 0.6) │ 55.9 (55.9 ± 0.5) │
509
+ ├────────────────────────────────────────────┼─────────────────────┼──────────────────────┤
510
+ │ newstest2017.online-A.0.en-de │ 20.8 (20.8 ± 0.6) │ 52.0 (52.0 ± 0.4) │
511
+ │ │ (p = 0.0010)* │ (p = 0.0010)* │
512
+ ├────────────────────────────────────────────┼─────────────────────┼──────────────────────┤
513
+ │ newstest2017.online-B.0.en-de │ 26.7 (26.6 ± 0.7) │ 56.3 (56.3 ± 0.5) │
514
+ │ │ (p = 0.3077) │ (p = 0.0240)* │
515
+ ├────────────────────────────────────────────┼─────────────────────┼──────────────────────┤
516
+ │ newstest2017.online-F.0.en-de │ 15.5 (15.4 ± 0.5) │ 49.3 (49.3 ± 0.4) │
517
+ │ │ (p = 0.0010)* │ (p = 0.0010)* │
518
+ ├────────────────────────────────────────────┼─────────────────────┼──────────────────────┤
519
+ │ newstest2017.online-G.0.en-de │ 18.2 (18.2 ± 0.5) │ 51.6 (51.6 ± 0.4) │
520
+ │ │ (p = 0.0010)* │ (p = 0.0010)* │
521
+ ╘════════════════════════════════════════════╧═════════════════════╧══════════════════════╛
522
+
523
+ ------------------------------------------------------------
524
+ Paired bootstrap resampling test with 1000 resampling trials
525
+ ------------------------------------------------------------
526
+ - Each system is pairwise compared to Baseline: newstest2017.LIUM-NMT.4900.en-de.
527
+ Actual system score / bootstrap estimated true mean / 95% CI are provided for each metric.
528
+
529
+ - Null hypothesis: the system and the baseline translations are essentially
530
+ generated by the same underlying process. For a given system and the baseline,
531
+ the p-value is roughly the probability of the absolute score difference (delta)
532
+ or higher occurring due to chance, under the assumption that the null hypothesis is correct.
533
+
534
+ - Assuming a significance threshold of 0.05, the null hypothesis can be rejected
535
+ for p-values < 0.05 (marked with "*"). This means that the delta is unlikely to be attributed
536
+ to chance, hence the system is significantly "different" than the baseline.
537
+ Otherwise, the p-values are highlighted in red.
538
+
539
+ - NOTE: Significance does not tell whether a system is "better" than the baseline but rather
540
+ emphasizes the "difference" of the systems in terms of the replicability of the delta.
541
+
542
+ -----------------
543
+ Metric signatures
544
+ -----------------
545
+ - BLEU nrefs:1|bs:1000|seed:12345|case:mixed|eff:no|tok:13a|smooth:exp|version:2.0.0
546
+ - chrF2 nrefs:1|bs:1000|seed:12345|case:mixed|eff:yes|nc:6|nw:0|space:no|version:2.0.0
547
+ ```
548
+
549
+ #### Example: Paired approximate randomization
550
+
551
+ Let's now run the paired approximate randomization test for the same comparison. According to the results, the findings are compatible with the paired bootstrap resampling test. However, the p-value for the `baseline vs. online-B` comparison is much higher (`0.8066`) than the paired bootstrap resampling test.
552
+
553
+ (**Note that** the AR test does not provide confidence intervals around the true mean as it does not perform bootstrap resampling.)
554
+
555
+ ```
556
+ $ sacrebleu -t wmt17 -l en-de -i newstest2017.LIUM-NMT.4900.en-de newstest2017.online-* -m bleu chrf --paired-ar
557
+ ╒════════════════════════════════════════════╤═══════════════╤═══════════════╕
558
+ │ System │ BLEU │ chrF2 │
559
+ ╞════════════════════════════════════════════╪═══════════════╪═══════════════╡
560
+ │ Baseline: newstest2017.LIUM-NMT.4900.en-de │ 26.6 │ 55.9 │
561
+ ├────────────────────────────────────────────┼───────────────┼───────────────┤
562
+ │ newstest2017.online-A.0.en-de │ 20.8 │ 52.0 │
563
+ │ │ (p = 0.0001)* │ (p = 0.0001)* │
564
+ ├────────────────────────────────────────────┼───────────────┼───────────────┤
565
+ │ newstest2017.online-B.0.en-de │ 26.7 │ 56.3 │
566
+ │ │ (p = 0.8066) │ (p = 0.0385)* │
567
+ ├────────────────────────────────────────────┼───────────────┼───────────────┤
568
+ │ newstest2017.online-F.0.en-de │ 15.5 │ 49.3 │
569
+ │ │ (p = 0.0001)* │ (p = 0.0001)* │
570
+ ├────────────────────────────────────────────┼───────────────┼───────────────┤
571
+ │ newstest2017.online-G.0.en-de │ 18.2 │ 51.6 │
572
+ │ │ (p = 0.0001)* │ (p = 0.0001)* │
573
+ ╘════════════════════════════════════════════╧═══════════════╧═══════════════╛
574
+
575
+ -------------------------------------------------------
576
+ Paired approximate randomization test with 10000 trials
577
+ -------------------------------------------------------
578
+ - Each system is pairwise compared to Baseline: newstest2017.LIUM-NMT.4900.en-de.
579
+ Actual system score is provided for each metric.
580
+
581
+ - Null hypothesis: the system and the baseline translations are essentially
582
+ generated by the same underlying process. For a given system and the baseline,
583
+ the p-value is roughly the probability of the absolute score difference (delta)
584
+ or higher occurring due to chance, under the assumption that the null hypothesis is correct.
585
+
586
+ - Assuming a significance threshold of 0.05, the null hypothesis can be rejected
587
+ for p-values < 0.05 (marked with "*"). This means that the delta is unlikely to be attributed
588
+ to chance, hence the system is significantly "different" than the baseline.
589
+ Otherwise, the p-values are highlighted in red.
590
+
591
+ - NOTE: Significance does not tell whether a system is "better" than the baseline but rather
592
+ emphasizes the "difference" of the systems in terms of the replicability of the delta.
593
+
594
+ -----------------
595
+ Metric signatures
596
+ -----------------
597
+ - BLEU nrefs:1|ar:10000|seed:12345|case:mixed|eff:no|tok:13a|smooth:exp|version:2.0.0
598
+ - chrF2 nrefs:1|ar:10000|seed:12345|case:mixed|eff:yes|nc:6|nw:0|space:no|version:2.0.0
599
+ ```
600
+
601
+ # Using SacreBLEU from Python
602
+
603
+ For evaluation, it may be useful to compute BLEU, chrF or TER from a Python script. The recommended
604
+ way of doing this is to use the object-oriented API, by creating an instance of the `metrics.BLEU` class
605
+ for example:
606
+
607
+ ```python
608
+ In [1]: from sacrebleu.metrics import BLEU, CHRF, TER
609
+ ...:
610
+ ...: refs = [ # First set of references
611
+ ...: ['The dog bit the man.', 'It was not unexpected.', 'The man bit him first.'],
612
+ ...: # Second set of references
613
+ ...: ['The dog had bit the man.', 'No one was surprised.', 'The man had bitten the dog.'],
614
+ ...: ]
615
+ ...: sys = ['The dog bit the man.', "It wasn't surprising.", 'The man had just bitten him.']
616
+
617
+ In [2]: bleu = BLEU()
618
+
619
+ In [3]: bleu.corpus_score(sys, refs)
620
+ Out[3]: BLEU = 48.53 82.4/50.0/45.5/37.5 (BP = 0.943 ratio = 0.944 hyp_len = 17 ref_len = 18)
621
+
622
+ In [4]: bleu.get_signature()
623
+ Out[4]: nrefs:2|case:mixed|eff:no|tok:13a|smooth:exp|version:2.0.0
624
+
625
+ In [5]: chrf = CHRF()
626
+
627
+ In [6]: chrf.corpus_score(sys, refs)
628
+ Out[6]: chrF2 = 59.73
629
+ ```
630
+
631
+ ### Variable Number of References
632
+
633
+ Let's now remove the first reference sentence for the first system sentence `The dog bit the man.` by replacing it with either `None` or the empty string `''`.
634
+ This allows using a variable number of reference segments per hypothesis. Observe how the signature changes from `nrefs:2` to `nrefs:var`:
635
+
636
+ ```python
637
+ In [1]: from sacrebleu.metrics import BLEU, CHRF, TER
638
+ ...:
639
+ ...: refs = [ # First set of references
640
+ # 1st sentence does not have a ref here
641
+ ...: ['', 'It was not unexpected.', 'The man bit him first.'],
642
+ ...: # Second set of references
643
+ ...: ['The dog had bit the man.', 'No one was surprised.', 'The man had bitten the dog.'],
644
+ ...: ]
645
+ ...: sys = ['The dog bit the man.', "It wasn't surprising.", 'The man had just bitten him.']
646
+
647
+ In [2]: bleu = BLEU()
648
+
649
+ In [3]: bleu.corpus_score(sys, refs)
650
+ Out[3]: BLEU = 29.44 82.4/42.9/27.3/12.5 (BP = 0.889 ratio = 0.895 hyp_len = 17 ref_len = 19)
651
+
652
+ In [4]: bleu.get_signature()
653
+ Out[4]: nrefs:var|case:mixed|eff:no|tok:13a|smooth:exp|version:2.0.0
654
+ ```
655
+
656
+ ## Compatibility API
657
+
658
+ You can also use the compatibility API that provides wrapper functions around the object-oriented API to
659
+ compute sentence-level and corpus-level BLEU, chrF and TER: (It should be noted that this API can be
660
+ removed in future releases)
661
+
662
+ ```python
663
+ In [1]: import sacrebleu
664
+ ...:
665
+ ...: refs = [ # First set of references
666
+ ...: ['The dog bit the man.', 'It was not unexpected.', 'The man bit him first.'],
667
+ ...: # Second set of references
668
+ ...: ['The dog had bit the man.', 'No one was surprised.', 'The man had bitten the dog.'],
669
+ ...: ]
670
+ ...: sys = ['The dog bit the man.', "It wasn't surprising.", 'The man had just bitten him.']
671
+
672
+ In [2]: sacrebleu.corpus_bleu(sys, refs)
673
+ Out[2]: BLEU = 48.53 82.4/50.0/45.5/37.5 (BP = 0.943 ratio = 0.944 hyp_len = 17 ref_len = 18)
674
+ ```
675
+
676
+ # License
677
+
678
+ SacreBLEU is licensed under the [Apache 2.0 License](LICENSE.txt).
679
+
680
+ # Credits
681
+
682
+ This was all [Rico Sennrich's idea](https://twitter.com/RicoSennrich/status/883246242763026433)
683
+ Originally written by Matt Post.
684
+ New features and ongoing support provided by Martin Popel (@martinpopel) and Ozan Caglayan (@ozancaglayan).
685
+
686
+ If you use SacreBLEU, please cite the following:
687
+
688
+ ```
689
+ @inproceedings{post-2018-call,
690
+ title = "A Call for Clarity in Reporting {BLEU} Scores",
691
+ author = "Post, Matt",
692
+ booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
693
+ month = oct,
694
+ year = "2018",
695
+ address = "Belgium, Brussels",
696
+ publisher = "Association for Computational Linguistics",
697
+ url = "https://www.aclweb.org/anthology/W18-6319",
698
+ pages = "186--191",
699
+ }
700
+ ```
701
+
702
+ # Release Notes
703
+
704
+ - 2.4.2 (2024-04-12)
705
+ Added:
706
+ - The CLI "--echo" now will return the domain for WMT22 and WMT23.
707
+
708
+ Fixed:
709
+ - Default reference for wmt23:en-de
710
+
711
+ - 2.4.1 (2024-03-12)
712
+ Fixed:
713
+ - Add exports to package __init__.py
714
+
715
+ - 2.4.0 (2023-12-11)
716
+ Added:
717
+ - WMT23 test sets (test set `wmt23`)
718
+
719
+ - 2.3.3 (2023-11-28)
720
+ Fixed:
721
+ - Typing issues (#249, #250)
722
+ - Improved builds (#252)
723
+
724
+ - 2.3.2 (2023-11-06)
725
+ Fixed:
726
+ - Special treatment of empty references in TER (#232)
727
+ - Bump in mecab version for JA (#234)
728
+
729
+ Added:
730
+ - Warning if `-tok spm` is used (use explicit `flores101` instead) (#238)
731
+
732
+ - 2.3.1 (2022-10-18)
733
+ Bugfix:
734
+ - Set lru_cache to 2^16 for SPM tokenizer (was set to infinite)
735
+
736
+ - 2.3.0 (2022-10-18)
737
+ Features:
738
+ - (#203) Added `-tok flores101` and `-tok flores200`, a.k.a. `spbleu`.
739
+ These are multilingual tokenizations that make use of the
740
+ multilingual SPM models released by Facebook and described in the
741
+ following papers:
742
+ * Flores-101: https://arxiv.org/abs/2106.03193
743
+ * Flores-200: https://arxiv.org/abs/2207.04672
744
+ - (#213) Added JSON formatting for multi-system output (thanks to Manikanta Inugurthi @me-manikanta)
745
+ - (#211) You can now list all test sets for a language pair with `--list SRC-TRG`.
746
+ Thanks to Jaume Zaragoza (@ZJaume) for adding this feature.
747
+ - Added WMT22 test sets (test set `wmt22`)
748
+ - System outputs: include with wmt22. Also added wmt21/systems which will produce WMT21 submitted systems.
749
+ To see available systems, give a dummy system to `--echo`, e.g., `sacrebleu -t wmt22 -l en-de --echo ?`
750
+
751
+ - 2.2.1 (2022-09-13)
752
+ Bugfix: Standard usage was returning (and using) each reference twice.
753
+
754
+ - 2.2.0 (2022-07-25)
755
+ Features:
756
+ - Added WMT21 datasets (thanks to @BrighXiaoHan)
757
+ - `--echo` now exposes document metadata where available (e.g., docid, genre, origlang)
758
+ - Bugfix: allow empty references (#161)
759
+ - Adds a Korean tokenizer (thanks to @NoUnique)
760
+
761
+ Under the hood:
762
+ - Moderate code refactoring
763
+ - Processed files have adopted a more sensible internal naming scheme under ~/.sacrebleu
764
+ (e.g., wmt17_ms.zh-en.src instead of zh-en.zh)
765
+ - Processed file extensions correspond to the values passed to `--echo` (e.g., "src")
766
+ - Now explicitly representing NoneTokenizer
767
+ - Got rid of the ".lock" lockfile for downloading (using the tarball itself)
768
+
769
+ Many thanks to @BrightXiaoHan (https://github.com/BrightXiaoHan) for the bulk of
770
+ the code contributions in this release.
771
+
772
+ - 2.1.0 (2022-05-19)
773
+ Features:
774
+ - Added `-tok spm` for multilingual SPM tokenization (#168)
775
+ (thanks to Naman Goyal and James Cross at Facebook)
776
+
777
+ Fixes:
778
+ - Handle potential memory usage issues due to LRU caching in tokenizers (#167)
779
+ - Bugfix: BLEU.corpus_score() now using max_ngram_order (#173)
780
+ - Upgraded ja-mecab to 1.0.5 (#196)
781
+
782
+ - 2.0.0 (2021-07-18)
783
+ - Build: Add Windows and OS X testing to Travis CI.
784
+ - Improve documentation and type annotations.
785
+ - Drop `Python < 3.6` support and migrate to f-strings.
786
+ - Relax `portalocker` version pinning, add `regex, tabulate, numpy` dependencies.
787
+ - Drop input type manipulation through `isinstance` checks. If the user does not obey
788
+ to the expected annotations, exceptions will be raised. Robustness attempts lead to
789
+ confusions and obfuscated score errors in the past (#121)
790
+ - Variable # references per segment is supported for all metrics by default. It is
791
+ still only available through the API.
792
+ - Use colored strings in tabular outputs (multi-system evaluation mode) through
793
+ the help of `colorama` package.
794
+ - tokenizers: Add caching to tokenizers which seem to speed up things a bit.
795
+ - `intl` tokenizer: Use `regex` module. Speed goes from ~4 seconds to ~0.6 seconds
796
+ for a particular test set evaluation. (#46)
797
+ - Signature: Formatting changed (mostly to remove '+' separator as it was
798
+ interfering with chrF++). The field separator is now '|' and key values
799
+ are separated with ':' rather than '.'.
800
+ - Signature: Boolean true / false values are shortened to yes / no.
801
+ - Signature: Number of references is `var` if variable number of references is used.
802
+ - Signature: Add effective order (yes/no) to BLEU and chrF signatures.
803
+ - Metrics: Scale all metrics into the [0, 100] range (#140)
804
+ - Metrics API: Use explicit argument names and defaults for the metrics instead of
805
+ passing obscure `argparse.Namespace` objects.
806
+ - Metrics API: A base abstract `Metric` class is introduced to guide further
807
+ metric development. This class defines the methods that should be implemented
808
+ in the derived classes and offers boilerplate methods for the common functionality.
809
+ A new metric implemented this way will automatically support significance testing.
810
+ - Metrics API: All metrics now receive an optional `references` argument at
811
+ initialization time to process and cache the references. Further evaluations
812
+ of different systems against the same references becomes faster this way
813
+ for example when using significance testing.
814
+ - BLEU: In case of no n-gram matches at all, skip smoothing and return 0.0 BLEU (#141).
815
+ - CHRF: Added multi-reference support, verified the scores against chrF++.py, added test case.
816
+ - CHRF: Added chrF+ support through `word_order` argument. Added test cases against chrF++.py.
817
+ Exposed it through the CLI (--chrf-word-order) (#124)
818
+ - CHRF: Add possibility to disable effective order smoothing (pass --chrf-eps-smoothing).
819
+ This way, the scores obtained are exactly the same as chrF++, Moses and NLTK implementations.
820
+ We keep the effective ordering as the default for compatibility, since this only
821
+ affects sentence-level scoring with very short sentences. (#144)
822
+ - CLI: `--input/-i` can now ingest multiple systems. For this reason, the positional
823
+ `references` should always preceed the `-i` flag.
824
+ - CLI: Allow modifying TER arguments through CLI. We still keep the TERCOM defaults.
825
+ - CLI: Prefix metric-specific arguments with --chrf and --ter. To maintain compatibility,
826
+ BLEU argument names are kept the same.
827
+ - CLI: Separate metric-specific arguments for clarity when `--help` is printed.
828
+ - CLI: Added `--format/-f` flag. The single-system output mode is now `json` by default.
829
+ If you want to keep the old text format persistently, you can export `SACREBLEU_FORMAT=text` into your
830
+ shell.
831
+ - CLI: For multi-system mode, `json` falls back to plain text. `latex` output can only
832
+ be generated for multi-system mode.
833
+ - CLI: sacreBLEU now supports evaluating multiple systems for a given test set
834
+ in an efficient way. Through the use of `tabulate` package, the results are
835
+ nicely rendered into a plain text table, LaTeX, HTML or RST (cf. --format/-f argument).
836
+ The systems can be either given as a list of plain text files to `-i/--input` or
837
+ as a tab-separated single stream redirected into `STDIN`. In the former case,
838
+ the basenames of the files will be automatically used as system names.
839
+ - Statistical tests: sacreBLEU now supports confidence interval estimation
840
+ through bootstrap resampling for single-system evaluation (`--confidence` flag)
841
+ as well as paired bootstrap resampling (`--paired-bs`) and paired approximate
842
+ randomization tests (`--paired-ar`) when evaluating multiple systems (#40 and #78).
843
+
844
+ - 1.5.1 (2021-03-05)
845
+ - Fix extraction error for WMT18 extra test sets (test-ts) (#142)
846
+ - Validation and test datasets are added for multilingual TEDx
847
+
848
+ - 1.5.0 (2021-01-15)
849
+ - Fix an assertion error in chrF (#121)
850
+ - Add missing `__repr__()` methods for BLEU and TER
851
+ - TER: Fix exception when `--short` is used (#131)
852
+ - Pin Mecab version to 1.0.3 for Python 3.5 support
853
+ - [API Change]: Default value for `floor` smoothing is now 0.1 instead of 0.
854
+ - [API Change]: `sacrebleu.sentence_bleu()` now uses the `exp` smoothing method,
855
+ exactly the same as the CLI's --sentence-level behavior. This was mainly done
856
+ to make two methods behave the same.
857
+ - Add smoothing value to BLEU signature (#98)
858
+ - dataset: Fix IWSLT links (#128)
859
+ - Allow variable number of references for BLEU (only via API) (#130).
860
+ Thanks to Ondrej Dusek (@tuetschek)
861
+
862
+ - 1.4.14 (2020-09-13)
863
+ - Added character-based tokenization (`-tok char`).
864
+ Thanks to Christian Federmann.
865
+ - Added TER (`-m ter`). Thanks to Ales Tamchyna! (fixes #90)
866
+ - Allow calling the script as a standalone utility (fixes #86)
867
+ - Fix type annotation issues (fixes #100) and mark sacrebleu as supporting mypy
868
+ - Added WMT20 robustness test sets:
869
+ - wmt20/robust/set1 (en-ja, en-de)
870
+ - wmt20/robust/set2 (en-ja, ja-en)
871
+ - wmt20/robust/set3 (de-en)
872
+
873
+ - 1.4.13 (2020-07-30)
874
+ - Added WMT20 newstest test sets (#103)
875
+ - Make mecab3-python an extra dependency, adapt code to new mecab3-python
876
+ This fixes the recent Windows installation issues as well (#104)
877
+ Japanese support should now be explicitly installed through sacrebleu[ja] package.
878
+ - Fix return type annotation of corpus_bleu()
879
+ - Improve sentence_score's documentation, do not allow single ref string (#98)
880
+
881
+ - 1.4.12 (2020-07-03)
882
+ - Fix a deployment bug (#96)
883
+
884
+ - 1.4.11 (2020-07-03)
885
+ - Added Multi30k multimodal MT test set metadata
886
+ - Refactored all tokenizers into respective classes (fixes #85)
887
+ - Refactored all metrics into respective classes
888
+ - Moved utility functions into `utils.py`
889
+ - Implemented signatures using `BLEUSignature` and `CHRFSignature` classes
890
+ - Simplified checking of Chinese characters (fixes #5)
891
+ - Unified common regexp tokenization codes for tokenizers (fixes #27)
892
+ - Fixed --detail failing when no test sets are provided
893
+ - Fixed multi-reference BLEU failing when tab-delimited reference stream is used
894
+ - Removed lowercase option for ChrF which was not functional (#85)
895
+ - Simplified ChrF and used the same I/O logic as BLEU to allow for future
896
+ multi-reference reading
897
+ - Added score regression tests for chrF using reference chrF++ implementation
898
+ - Added multi-reference & tokenizer & signature tests
899
+
900
+ - 1.4.10 (2020-05-30)
901
+ - Fixed bug in signature with mecab tokenizer
902
+ - Cleaned up deprecation warnings (thanks to Karthikeyan Singaravelan @tirkarthi)
903
+ - Now only lists the external [typing](https://pypi.org/project/typing/)
904
+ module as a dependency for Python `<= 3.4`, as it was integrated in the standard
905
+ library in Python 3.5 (thanks to Erwan de Lépinau @ErwanDL).
906
+ - Added LICENSE to pypi (thanks to Mark Harfouche @hmaarrfk)
907
+
908
+ - 1.4.9 (2020-04-30)
909
+ - Changed `get_available_testsets()` to return a list
910
+ - Remove Japanese MeCab tokenizer from requirements.
911
+ (Must be installed manually to avoid Windows incompatibility).
912
+ Many thanks to Makoto Morishita (@MorinoseiMorizo).
913
+
914
+ - 1.4.8 (2020-04-26)
915
+ - Added to API:
916
+ - get_source_file()
917
+ - get_reference_files()
918
+ - get_available_testsets()
919
+ - get_langpairs_for_testset()
920
+ - Some internal refactoring
921
+ - Fixed descriptions of some WMT19/google test sets
922
+ - Added API test case (test/test_apy.py)
923
+
924
+ - 1.4.7 (2020-04-19)
925
+ - Added Google's extra wmt19/en-de refs (-t wmt19/google/{ar,arp,hqall,hqp,hqr,wmtp})
926
+ (Freitag, Grangier, & Caswell
927
+ BLEU might be Guilty but References are not Innocent
928
+ https://arxiv.org/abs/2004.06063)
929
+ - Restored SACREBLEU_DIR and smart_open to exports (thanks to Thomas Liao @tholiao)
930
+
931
+ - 1.4.6 (2020-03-28)
932
+ - Large internal reorganization as a module (thanks to Thamme Gowda @thammegowda)
933
+
934
+ - 1.4.5 (2020-03-28)
935
+ - Added Japanese MeCab tokenizer (`-tok ja-mecab`) (thanks to Makoto Morishita @MorinoseiMorizo)
936
+ - Added wmt20/dev test sets (thanks to Martin Popel @martinpopel)
937
+
938
+ - 1.4.4 (2020-03-10)
939
+ - Smoothing changes (Sebastian Nickels @sn1c)
940
+ - Fixed bug that only applied smoothing to n-grams for n > 2
941
+ - Added default smoothing values for methods "floor" (0) and "add-k" (1)
942
+ - `--list` now returns a list of all language pairs for a task when combined with `-t`
943
+ (e.g., `sacrebleu -t wmt19 --list`)
944
+ - added missing languages for IWSLT17
945
+ - Minor code improvements (Thomas Liao @tholiao)
946
+
947
+ - 1.4.3 (2019-12-02)
948
+ - Bugfix: handling of result object for CHRF
949
+ - Improved API example
950
+
951
+ - 1.4.2 (2019-10-11)
952
+ - Tokenization variant omitted from the chrF signature; it is relevant only for BLEU (thanks to Martin Popel)
953
+ - Bugfix: call to sentence_bleu (thanks to Rachel Bawden)
954
+ - Documentation example for Python API (thanks to Vlad Lyalin)
955
+ - Calls to corpus_chrf and sentence_chrf now return a an object instead of a float (use result.score)
956
+
957
+ - 1.4.1 (2019-09-11)
958
+ - Added sentence-level scoring via -sl (--sentence-level)
959
+
960
+ - 1.4.0 (2019-09-10)
961
+ - Many thanks to Martin Popel for all the changes below!
962
+ - Added evaluation on concatenated test sets (e.g., `-t wmt17,wmt18`).
963
+ Works as long as they all have the same language pair.
964
+ - Added `sacrebleu --origlang` (both for evaluation on a subset and for `--echo`).
965
+ Note that while echoing prints just the subset, evaluation expects the complete
966
+ test set (and just skips the irrelevant parts).
967
+ - Added `sacrebleu --detail` for breakdown by domain-specific subsets of the test sets.
968
+ (Available for WMT19).
969
+ - Minor changes
970
+ - Improved display of `sacrebleu -h`
971
+ - Added `sacrebleu --list`
972
+ - Code refactoring
973
+ - Documentation and tests updates
974
+ - Fixed a race condition bug (`os.makedirs(outdir, exist_ok=True)` instead of `if os.path.exists`)
975
+
976
+ - 1.3.7 (2019-07-12)
977
+ - Lazy loading of regexes cuts import time from ~1s to nearly nothing (thanks, @louismartin!)
978
+ - Added a simple (non-atomic) lock on downloading
979
+ - Can now read multiple refs from a single tab-delimited file.
980
+ You need to pass `--num-refs N` to tell it to run the split.
981
+ Only works with a single reference file passed from the command line.
982
+
983
+ - 1.3.6 (2019-06-10)
984
+ - Removed another f-string for Python 3.5 compatibility
985
+
986
+ - 1.3.5 (2019-06-07)
987
+ - Restored Python 3.5 compatibility
988
+
989
+ - 1.3.4 (2019-05-28)
990
+ - Added MTNT 2019 test sets
991
+ - Added a BLEU object
992
+
993
+ - 1.3.3 (2019-05-08)
994
+ - Added WMT'19 test sets
995
+
996
+ - 1.3.2 (2018-04-24)
997
+ - Bugfix in test case (thanks to Adam Roberts, @adarob)
998
+ - Passing smoothing method through `sentence_bleu`
999
+
1000
+ - 1.3.1 (2019-03-20)
1001
+ - Added another smoothing approach (add-k) and a command-line option for choosing the smoothing method
1002
+ (`--smooth exp|floor|add-n|none`) and the associated value (`--smooth-value`), when relevant.
1003
+ - Changed interface to some functions (backwards incompatible)
1004
+ - 'smooth' is now 'smooth_method'
1005
+ - 'smooth_floor' is now 'smooth_value'
1006
+
1007
+ - 1.2.21 (19 March 2019)
1008
+ - Ctrl-M characters are now treated as normal characters, previously treated as newline.
1009
+
1010
+ - 1.2.20 (28 February 2018)
1011
+ - Tokenization now defaults to "zh" when language pair is known
1012
+
1013
+ - 1.2.19 (19 February 2019)
1014
+ - Updated checksum for wmt19/dev (seems to have changed)
1015
+
1016
+ - 1.2.18 (19 February 2019)
1017
+ - Fixed checksum for wmt17/dev (copy-paste error)
1018
+
1019
+ - 1.2.17 (6 February 2019)
1020
+ - Added kk-en and en-kk to wmt19/dev
1021
+
1022
+ - 1.2.16 (4 February 2019)
1023
+ - Added gu-en and en-gu to wmt19/dev
1024
+
1025
+ - 1.2.15 (30 January 2019)
1026
+ - Added MD5 checksumming of downloaded files for all datasets.
1027
+
1028
+ - 1.2.14 (22 January 2019)
1029
+ - Added mtnt1.1/train mtnt1.1/valid mtnt1.1/test data from [MTNT](http://www.cs.cmu.edu/~pmichel1/mtnt/)
1030
+
1031
+ - 1.2.13 (22 January 2019)
1032
+ - Added 'wmt19/dev' task for 'lt-en' and 'en-lt' (development data for new tasks).
1033
+ - Added MD5 checksum for downloaded tarballs.
1034
+
1035
+ - 1.2.12 (8 November 2018)
1036
+ - Now outputs only only digit after the decimal
1037
+
1038
+ - 1.2.11 (29 August 2018)
1039
+ - Added a function for sentence-level, smoothed BLEU
1040
+
1041
+ - 1.2.10 (23 May 2018)
1042
+ - Added wmt18 test set (with references)
1043
+
1044
+ - 1.2.9 (15 May 2018)
1045
+ - Added zh-en, en-zh, tr-en, and en-tr datasets for wmt18/test-ts
1046
+
1047
+ - 1.2.8 (14 May 2018)
1048
+ - Added wmt18/test-ts, the test sources (only) for [WMT18](http://statmt.org/wmt18/translation-task.html)
1049
+ - Moved README out of `sacrebleu.py` and the CHANGELOG into a separate file
1050
+
1051
+ - 1.2.7 (10 April 2018)
1052
+ - fixed another locale issue (with --echo)
1053
+ - grudgingly enabled `-tok none` from the command line
1054
+
1055
+ - 1.2.6 (22 March 2018)
1056
+ - added wmt17/ms (Microsoft's [additional ZH-EN references](https://github.com/MicrosoftTranslator/Translator-HumanParityData)).
1057
+ Try `sacrebleu -t wmt17/ms --cite`.
1058
+ - `--echo ref` now pastes together all references, if there is more than one
1059
+
1060
+ - 1.2.5 (13 March 2018)
1061
+ - added wmt18/dev datasets (en-et and et-en)
1062
+ - fixed logic with --force
1063
+ - locale-independent installation
1064
+ - added "--echo both" (tab-delimited)
1065
+
1066
+ - 1.2.3 (28 January 2018)
1067
+ - metrics (`-m`) are now printed in the order requested
1068
+ - chrF now prints a version string (including the beta parameter, importantly)
1069
+ - attempt to remove dependence on locale setting
1070
+
1071
+ - 1.2 (17 January 2018)
1072
+ - added the chrF metric (`-m chrf` or `-m bleu chrf` for both)
1073
+ See 'CHRF: character n-gram F-score for automatic MT evaluation' by Maja Popovic (WMT 2015)
1074
+ [http://www.statmt.org/wmt15/pdf/WMT49.pdf]
1075
+ - added IWSLT 2017 test and tuning sets for DE, FR, and ZH
1076
+ (Thanks to Mauro Cettolo and Marcello Federico).
1077
+ - added `--cite` to produce the citation for easy inclusion in papers
1078
+ - added `--input` (`-i`) to set input to a file instead of STDIN
1079
+ - removed accent mark after objection from UN official
1080
+
1081
+ - 1.1.7 (27 November 2017)
1082
+ - corpus_bleu() now raises an exception if input streams are different lengths
1083
+ - thanks to Martin Popel for:
1084
+ - small bugfix in tokenization_13a (not affecting WMT references)
1085
+ - adding `--tok intl` (international tokenization)
1086
+ - added wmt17/dev and wmt17/dev sets (for languages intro'd those years)
1087
+
1088
+ - 1.1.6 (15 November 2017)
1089
+ - bugfix for tokenization warning
1090
+
1091
+ - 1.1.5 (12 November 2017)
1092
+ - added -b option (only output the BLEU score)
1093
+ - removed fi-en from list of WMT16/17 systems with more than one reference
1094
+ - added WMT16/tworefs and WMT17/tworefs for scoring with both en-fi references
1095
+
1096
+ - 1.1.4 (10 November 2017)
1097
+ - added effective order for sentence-level BLEU computation
1098
+ - added unit tests from sockeye
1099
+
1100
+ - 1.1.3 (8 November 2017).
1101
+ - Factored code a bit to facilitate API:
1102
+ - compute_bleu: works from raw stats
1103
+ - corpus_bleu for use from the command line
1104
+ - raw_corpus_bleu: turns off tokenization, command-line sanity checks, floor smoothing
1105
+ - Smoothing (type 'exp', now the default) fixed to produce mteval-v13a.pl results
1106
+ - Added 'floor' smoothing (adds 0.01 to 0 counts, more versatile via API), 'none' smoothing (via API)
1107
+ - Small bugfixes, windows compatibility (H/T Christian Federmann)
1108
+
1109
+ - 1.0.3 (4 November 2017).
1110
+ - Contributions from Christian Federmann:
1111
+ - Added explicit support for encoding
1112
+ - Fixed Windows support
1113
+ - Bugfix in handling reference length with multiple refs
1114
+
1115
+ - version 1.0.1 (1 November 2017).
1116
+ - Small bugfix affecting some versions of Python.
1117
+ - Code reformatting due to Ozan Çağlayan.
1118
+
1119
+ - version 1.0 (23 October 2017).
1120
+ - Support for WMT 2008--2017.
1121
+ - Single tokenization (v13a) with lowercase fix (proper lower() instead of just A-Z).
1122
+ - Chinese tokenization.
1123
+ - Tested to match all WMT17 scores on all arcs.
1124
+
env-llmeval/lib/python3.10/site-packages/sacrebleu-2.4.2.dist-info/RECORD ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ../../../bin/sacrebleu,sha256=xGl1UQEIi3XGJ-ZmBESEe8PtLzNoFRFF8nqwn5TrLII,244
2
+ sacrebleu-2.4.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
3
+ sacrebleu-2.4.2.dist-info/LICENSE.txt,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
4
+ sacrebleu-2.4.2.dist-info/METADATA,sha256=nqHo74uPqdWFe4CAQXRARYbVcrPNfYjt8pCnOV6yzEM,58040
5
+ sacrebleu-2.4.2.dist-info/RECORD,,
6
+ sacrebleu-2.4.2.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92
7
+ sacrebleu-2.4.2.dist-info/entry_points.txt,sha256=oacOmm24wUB3Xd7hB1dBABMzlZh9ReRJRNhaKu1fDlw,55
8
+ sacrebleu-2.4.2.dist-info/top_level.txt,sha256=H3cpHXPQtQtVbKIwDN8Au2t8nwrTQlA9omGuoF7tJOA,10
9
+ sacrebleu/__init__.py,sha256=UUPHuhC7GiZVAAQCArR_33Tgk-kBShN07vaS233ye1k,1706
10
+ sacrebleu/__main__.py,sha256=Eim2Tft9Xcoh5PJ4n23mVnpP36UmecOtCAH1Upl5kao,1062
11
+ sacrebleu/__pycache__/__init__.cpython-310.pyc,,
12
+ sacrebleu/__pycache__/__main__.cpython-310.pyc,,
13
+ sacrebleu/__pycache__/compat.cpython-310.pyc,,
14
+ sacrebleu/__pycache__/sacrebleu.cpython-310.pyc,,
15
+ sacrebleu/__pycache__/significance.cpython-310.pyc,,
16
+ sacrebleu/__pycache__/utils.cpython-310.pyc,,
17
+ sacrebleu/compat.py,sha256=rq8s6SgH9xgBc2uK6JXoH0HWZ6CbcpP_4_X66nQhyCs,9100
18
+ sacrebleu/dataset/__init__.py,sha256=1KRahpk98bOFnIQaZLGDy0XgndrHjQXotCvogSvZ0RQ,106142
19
+ sacrebleu/dataset/__main__.py,sha256=4zJ7F7mtk2LKCGorTJe0YCtZ0al5lvrJNMrfrxl3_OQ,1250
20
+ sacrebleu/dataset/__pycache__/__init__.cpython-310.pyc,,
21
+ sacrebleu/dataset/__pycache__/__main__.cpython-310.pyc,,
22
+ sacrebleu/dataset/__pycache__/base.cpython-310.pyc,,
23
+ sacrebleu/dataset/__pycache__/fake_sgml.cpython-310.pyc,,
24
+ sacrebleu/dataset/__pycache__/iwslt_xml.cpython-310.pyc,,
25
+ sacrebleu/dataset/__pycache__/plain_text.cpython-310.pyc,,
26
+ sacrebleu/dataset/__pycache__/tsv.cpython-310.pyc,,
27
+ sacrebleu/dataset/__pycache__/wmt_xml.cpython-310.pyc,,
28
+ sacrebleu/dataset/base.py,sha256=TZGsir4PvdcO8YF_MWevd3Qb95ycQB2rvtAOkYIR0XM,6724
29
+ sacrebleu/dataset/fake_sgml.py,sha256=SW00xrlhdc9Sr9Z8Q7RF8mXVuLo3s1aNLC-zu3Crevo,4098
30
+ sacrebleu/dataset/iwslt_xml.py,sha256=nAwIXBfbcQLbbI3Eoe2DqdjGfD1V-429_qrwuZZqmj0,210
31
+ sacrebleu/dataset/plain_text.py,sha256=AKYCHFRtVLCoFod5pKBrqvKRxYmkqoPwUpcPup1Jg_8,1237
32
+ sacrebleu/dataset/tsv.py,sha256=m__O5lc8GmPvKl_901bndLAOuh9a1fK1koHOKWX6h90,2179
33
+ sacrebleu/dataset/wmt_xml.py,sha256=6xyzgctfaFWSxsUjKseWPy6YwHZekB-Ci-aUQrZFHP0,7828
34
+ sacrebleu/metrics/__init__.py,sha256=MzC5hSbprlwwvbeu6_6_FOz3L7c1KvtBkmkgOaGVTDk,260
35
+ sacrebleu/metrics/__pycache__/__init__.cpython-310.pyc,,
36
+ sacrebleu/metrics/__pycache__/base.cpython-310.pyc,,
37
+ sacrebleu/metrics/__pycache__/bleu.cpython-310.pyc,,
38
+ sacrebleu/metrics/__pycache__/chrf.cpython-310.pyc,,
39
+ sacrebleu/metrics/__pycache__/helpers.cpython-310.pyc,,
40
+ sacrebleu/metrics/__pycache__/lib_ter.cpython-310.pyc,,
41
+ sacrebleu/metrics/__pycache__/ter.cpython-310.pyc,,
42
+ sacrebleu/metrics/base.py,sha256=xTWUzNfXJIjnbiIYBlwhSoRgfiFYLqe84rIhYH1nSXM,16559
43
+ sacrebleu/metrics/bleu.py,sha256=TqGdxEDu3H0P1uGKi-Y_BMNYK0xtpqtEGe7T0dJpgbs,17485
44
+ sacrebleu/metrics/chrf.py,sha256=lhkOB3nwuSOvsub17Mct9gQ0oR038gEj5vj-1IFQhXU,10674
45
+ sacrebleu/metrics/helpers.py,sha256=VWngO3F_9gUa4uQFH1WJm4xZqpq3CLYeAWEOeMuECx4,2339
46
+ sacrebleu/metrics/lib_ter.py,sha256=OBJjJHmDBzq_supZW68Vma-tkSVy1tibSLw_R093ncE,16477
47
+ sacrebleu/metrics/ter.py,sha256=TuS52VLjayllPf-EB6-8Sf7EdkIGdJ0L1QfXzCSOcBo,7769
48
+ sacrebleu/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
49
+ sacrebleu/sacrebleu.py,sha256=ZwXF3KtqBb-NGDVYclj5gkO7IlfV8-T7gM7kBDPX1VM,28458
50
+ sacrebleu/significance.py,sha256=PDO7mCu5zL-7XNc9tA7QKs0C3TkcfrJgbKqYXwnXTp4,18329
51
+ sacrebleu/tokenizers/__init__.py,sha256=V1unPdEJPrfDKNDYR1VSLu1bXCX0Fr8Uu8y9pNcZXGA,89
52
+ sacrebleu/tokenizers/__pycache__/__init__.cpython-310.pyc,,
53
+ sacrebleu/tokenizers/__pycache__/tokenizer_13a.cpython-310.pyc,,
54
+ sacrebleu/tokenizers/__pycache__/tokenizer_base.cpython-310.pyc,,
55
+ sacrebleu/tokenizers/__pycache__/tokenizer_char.cpython-310.pyc,,
56
+ sacrebleu/tokenizers/__pycache__/tokenizer_intl.cpython-310.pyc,,
57
+ sacrebleu/tokenizers/__pycache__/tokenizer_ja_mecab.cpython-310.pyc,,
58
+ sacrebleu/tokenizers/__pycache__/tokenizer_ko_mecab.cpython-310.pyc,,
59
+ sacrebleu/tokenizers/__pycache__/tokenizer_none.cpython-310.pyc,,
60
+ sacrebleu/tokenizers/__pycache__/tokenizer_re.cpython-310.pyc,,
61
+ sacrebleu/tokenizers/__pycache__/tokenizer_spm.cpython-310.pyc,,
62
+ sacrebleu/tokenizers/__pycache__/tokenizer_ter.cpython-310.pyc,,
63
+ sacrebleu/tokenizers/__pycache__/tokenizer_zh.cpython-310.pyc,,
64
+ sacrebleu/tokenizers/tokenizer_13a.py,sha256=_1ClpQPIGqRj6uaklsFegAvSZUtlbE-yztmr51dLirU,985
65
+ sacrebleu/tokenizers/tokenizer_base.py,sha256=YNvqL3oW3rsCtUbMNat6RhlYSn2VlZuPHgYHBj6jfJg,461
66
+ sacrebleu/tokenizers/tokenizer_char.py,sha256=ubPsBjzNXqFPJ7WEpHQ2XD2ZWWqPgeOli9ErHOxmjTw,458
67
+ sacrebleu/tokenizers/tokenizer_intl.py,sha256=EGayKRqqcY6oV-Zstah4mrYw979W5jK0AWK8Uo3C55Q,1869
68
+ sacrebleu/tokenizers/tokenizer_ja_mecab.py,sha256=WYHc7xtAruICGfe-D6RYmnyrXxrlHlyNXVqQVs-6l1o,1420
69
+ sacrebleu/tokenizers/tokenizer_ko_mecab.py,sha256=BWTEkHp8d2dJOAmlqdBtLt2MHJtpOdM78CWztQ-qS00,1455
70
+ sacrebleu/tokenizers/tokenizer_none.py,sha256=0VX-qoUM3HzuLpF59L-gHjNgmoXXXpco23zYJtwmEmw,236
71
+ sacrebleu/tokenizers/tokenizer_re.py,sha256=OGck3pPhwibf48Dssy-6cZObsHNetwb3IIJnUYGRiAg,1271
72
+ sacrebleu/tokenizers/tokenizer_spm.py,sha256=otBUswldtMcpE95vvIDf-74eddy35_9p88mgv9J5T9g,2096
73
+ sacrebleu/tokenizers/tokenizer_ter.py,sha256=37KPsx0jwsSZsqZ7UJKWAYyttvqS11SxzkHZIjrZBdA,6219
74
+ sacrebleu/tokenizers/tokenizer_zh.py,sha256=GmBe3E29uECf61dFsR_qAHNzATEkgwwp0XAqb8de9wU,4724
75
+ sacrebleu/utils.py,sha256=lwyJiKniOQYhX5Yb2Dfe2kTROa0Fs-q-weO2kIVAz00,22530
env-llmeval/lib/python3.10/site-packages/sacrebleu-2.4.2.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.40.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
env-llmeval/lib/python3.10/site-packages/sacrebleu-2.4.2.dist-info/entry_points.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ [console_scripts]
2
+ sacrebleu = sacrebleu.sacrebleu:main
env-llmeval/lib/python3.10/site-packages/sacrebleu-2.4.2.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ sacrebleu
env-llmeval/lib/python3.10/site-packages/urllib3-2.2.1.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
env-llmeval/lib/python3.10/site-packages/urllib3-2.2.1.dist-info/METADATA ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: urllib3
3
+ Version: 2.2.1
4
+ Summary: HTTP library with thread-safe connection pooling, file post, and more.
5
+ Project-URL: Changelog, https://github.com/urllib3/urllib3/blob/main/CHANGES.rst
6
+ Project-URL: Documentation, https://urllib3.readthedocs.io
7
+ Project-URL: Code, https://github.com/urllib3/urllib3
8
+ Project-URL: Issue tracker, https://github.com/urllib3/urllib3/issues
9
+ Author-email: Andrey Petrov <[email protected]>
10
+ Maintainer-email: Seth Michael Larson <[email protected]>, Quentin Pradet <[email protected]>, Illia Volochii <[email protected]>
11
+ License-File: LICENSE.txt
12
+ Keywords: filepost,http,httplib,https,pooling,ssl,threadsafe,urllib
13
+ Classifier: Environment :: Web Environment
14
+ Classifier: Intended Audience :: Developers
15
+ Classifier: License :: OSI Approved :: MIT License
16
+ Classifier: Operating System :: OS Independent
17
+ Classifier: Programming Language :: Python
18
+ Classifier: Programming Language :: Python :: 3
19
+ Classifier: Programming Language :: Python :: 3 :: Only
20
+ Classifier: Programming Language :: Python :: 3.8
21
+ Classifier: Programming Language :: Python :: 3.9
22
+ Classifier: Programming Language :: Python :: 3.10
23
+ Classifier: Programming Language :: Python :: 3.11
24
+ Classifier: Programming Language :: Python :: 3.12
25
+ Classifier: Programming Language :: Python :: Implementation :: CPython
26
+ Classifier: Programming Language :: Python :: Implementation :: PyPy
27
+ Classifier: Topic :: Internet :: WWW/HTTP
28
+ Classifier: Topic :: Software Development :: Libraries
29
+ Requires-Python: >=3.8
30
+ Provides-Extra: brotli
31
+ Requires-Dist: brotli>=1.0.9; (platform_python_implementation == 'CPython') and extra == 'brotli'
32
+ Requires-Dist: brotlicffi>=0.8.0; (platform_python_implementation != 'CPython') and extra == 'brotli'
33
+ Provides-Extra: h2
34
+ Requires-Dist: h2<5,>=4; extra == 'h2'
35
+ Provides-Extra: socks
36
+ Requires-Dist: pysocks!=1.5.7,<2.0,>=1.5.6; extra == 'socks'
37
+ Provides-Extra: zstd
38
+ Requires-Dist: zstandard>=0.18.0; extra == 'zstd'
39
+ Description-Content-Type: text/markdown
40
+
41
+ <h1 align="center">
42
+
43
+ ![urllib3](https://github.com/urllib3/urllib3/raw/main/docs/_static/banner_github.svg)
44
+
45
+ </h1>
46
+
47
+ <p align="center">
48
+ <a href="https://pypi.org/project/urllib3"><img alt="PyPI Version" src="https://img.shields.io/pypi/v/urllib3.svg?maxAge=86400" /></a>
49
+ <a href="https://pypi.org/project/urllib3"><img alt="Python Versions" src="https://img.shields.io/pypi/pyversions/urllib3.svg?maxAge=86400" /></a>
50
+ <a href="https://discord.gg/urllib3"><img alt="Join our Discord" src="https://img.shields.io/discord/756342717725933608?color=%237289da&label=discord" /></a>
51
+ <a href="https://github.com/urllib3/urllib3/actions?query=workflow%3ACI"><img alt="Coverage Status" src="https://img.shields.io/badge/coverage-100%25-success" /></a>
52
+ <a href="https://github.com/urllib3/urllib3/actions?query=workflow%3ACI"><img alt="Build Status on GitHub" src="https://github.com/urllib3/urllib3/workflows/CI/badge.svg" /></a>
53
+ <a href="https://urllib3.readthedocs.io"><img alt="Documentation Status" src="https://readthedocs.org/projects/urllib3/badge/?version=latest" /></a><br>
54
+ <a href="https://deps.dev/pypi/urllib3"><img alt="OpenSSF Scorecard" src="https://api.securityscorecards.dev/projects/github.com/urllib3/urllib3/badge" /></a>
55
+ <a href="https://slsa.dev"><img alt="SLSA 3" src="https://slsa.dev/images/gh-badge-level3.svg" /></a>
56
+ <a href="https://bestpractices.coreinfrastructure.org/projects/6227"><img alt="CII Best Practices" src="https://bestpractices.coreinfrastructure.org/projects/6227/badge" /></a>
57
+ </p>
58
+
59
+ urllib3 is a powerful, *user-friendly* HTTP client for Python. Much of the
60
+ Python ecosystem already uses urllib3 and you should too.
61
+ urllib3 brings many critical features that are missing from the Python
62
+ standard libraries:
63
+
64
+ - Thread safety.
65
+ - Connection pooling.
66
+ - Client-side SSL/TLS verification.
67
+ - File uploads with multipart encoding.
68
+ - Helpers for retrying requests and dealing with HTTP redirects.
69
+ - Support for gzip, deflate, brotli, and zstd encoding.
70
+ - Proxy support for HTTP and SOCKS.
71
+ - 100% test coverage.
72
+
73
+ urllib3 is powerful and easy to use:
74
+
75
+ ```python3
76
+ >>> import urllib3
77
+ >>> resp = urllib3.request("GET", "http://httpbin.org/robots.txt")
78
+ >>> resp.status
79
+ 200
80
+ >>> resp.data
81
+ b"User-agent: *\nDisallow: /deny\n"
82
+ ```
83
+
84
+ ## Installing
85
+
86
+ urllib3 can be installed with [pip](https://pip.pypa.io):
87
+
88
+ ```bash
89
+ $ python -m pip install urllib3
90
+ ```
91
+
92
+ Alternatively, you can grab the latest source code from [GitHub](https://github.com/urllib3/urllib3):
93
+
94
+ ```bash
95
+ $ git clone https://github.com/urllib3/urllib3.git
96
+ $ cd urllib3
97
+ $ pip install .
98
+ ```
99
+
100
+
101
+ ## Documentation
102
+
103
+ urllib3 has usage and reference documentation at [urllib3.readthedocs.io](https://urllib3.readthedocs.io).
104
+
105
+
106
+ ## Community
107
+
108
+ urllib3 has a [community Discord channel](https://discord.gg/urllib3) for asking questions and
109
+ collaborating with other contributors. Drop by and say hello 👋
110
+
111
+
112
+ ## Contributing
113
+
114
+ urllib3 happily accepts contributions. Please see our
115
+ [contributing documentation](https://urllib3.readthedocs.io/en/latest/contributing.html)
116
+ for some tips on getting started.
117
+
118
+
119
+ ## Security Disclosures
120
+
121
+ To report a security vulnerability, please use the
122
+ [Tidelift security contact](https://tidelift.com/security).
123
+ Tidelift will coordinate the fix and disclosure with maintainers.
124
+
125
+
126
+ ## Maintainers
127
+
128
+ - [@sethmlarson](https://github.com/sethmlarson) (Seth M. Larson)
129
+ - [@pquentin](https://github.com/pquentin) (Quentin Pradet)
130
+ - [@illia-v](https://github.com/illia-v) (Illia Volochii)
131
+ - [@theacodes](https://github.com/theacodes) (Thea Flowers)
132
+ - [@haikuginger](https://github.com/haikuginger) (Jess Shapiro)
133
+ - [@lukasa](https://github.com/lukasa) (Cory Benfield)
134
+ - [@sigmavirus24](https://github.com/sigmavirus24) (Ian Stapleton Cordasco)
135
+ - [@shazow](https://github.com/shazow) (Andrey Petrov)
136
+
137
+ 👋
138
+
139
+
140
+ ## Sponsorship
141
+
142
+ If your company benefits from this library, please consider [sponsoring its
143
+ development](https://urllib3.readthedocs.io/en/latest/sponsors.html).
144
+
145
+
146
+ ## For Enterprise
147
+
148
+ Professional support for urllib3 is available as part of the [Tidelift
149
+ Subscription][1]. Tidelift gives software development teams a single source for
150
+ purchasing and maintaining their software, with professional grade assurances
151
+ from the experts who know it best, while seamlessly integrating with existing
152
+ tools.
153
+
154
+ [1]: https://tidelift.com/subscription/pkg/pypi-urllib3?utm_source=pypi-urllib3&utm_medium=referral&utm_campaign=readme
env-llmeval/lib/python3.10/site-packages/urllib3-2.2.1.dist-info/RECORD ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ urllib3-2.2.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ urllib3-2.2.1.dist-info/METADATA,sha256=uROmjQwfAbwRYjV9PMdc5JF5NA3kRkpoKafPkNzybfc,6434
3
+ urllib3-2.2.1.dist-info/RECORD,,
4
+ urllib3-2.2.1.dist-info/WHEEL,sha256=TJPnKdtrSue7xZ_AVGkp9YXcvDrobsjBds1du3Nx6dc,87
5
+ urllib3-2.2.1.dist-info/licenses/LICENSE.txt,sha256=Ew46ZNX91dCWp1JpRjSn2d8oRGnehuVzIQAmgEHj1oY,1093
6
+ urllib3/__init__.py,sha256=JMo1tg1nIV1AeJ2vENC_Txfl0e5h6Gzl9DGVk1rWRbo,6979
7
+ urllib3/__pycache__/__init__.cpython-310.pyc,,
8
+ urllib3/__pycache__/_base_connection.cpython-310.pyc,,
9
+ urllib3/__pycache__/_collections.cpython-310.pyc,,
10
+ urllib3/__pycache__/_request_methods.cpython-310.pyc,,
11
+ urllib3/__pycache__/_version.cpython-310.pyc,,
12
+ urllib3/__pycache__/connection.cpython-310.pyc,,
13
+ urllib3/__pycache__/connectionpool.cpython-310.pyc,,
14
+ urllib3/__pycache__/exceptions.cpython-310.pyc,,
15
+ urllib3/__pycache__/fields.cpython-310.pyc,,
16
+ urllib3/__pycache__/filepost.cpython-310.pyc,,
17
+ urllib3/__pycache__/http2.cpython-310.pyc,,
18
+ urllib3/__pycache__/poolmanager.cpython-310.pyc,,
19
+ urllib3/__pycache__/response.cpython-310.pyc,,
20
+ urllib3/_base_connection.py,sha256=p-DOG_Me7-sJXO1R9VgDpNmdVU_kIS8VtaC7ptEllA0,5640
21
+ urllib3/_collections.py,sha256=vzKA-7X-9resOamEWq52uV1nHshChjbYDvz47H0mMjw,17400
22
+ urllib3/_request_methods.py,sha256=ucEpHQyQf06b9o1RxKLkCpzGH0ct-v7X2xGpU6rmmlo,9984
23
+ urllib3/_version.py,sha256=12idLAcGmrAURPX52rGioBo33oQ__-ENJEdeqHvUUZg,98
24
+ urllib3/connection.py,sha256=zFgaaoqrICsl7-kBp-_4va9m82sYhioAuy4-4iDpK0I,34704
25
+ urllib3/connectionpool.py,sha256=XjTfYowLwN5ZzRMO41_OTbGNX4ANifgYVpWsVMRuC00,43556
26
+ urllib3/contrib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
27
+ urllib3/contrib/__pycache__/__init__.cpython-310.pyc,,
28
+ urllib3/contrib/__pycache__/pyopenssl.cpython-310.pyc,,
29
+ urllib3/contrib/__pycache__/socks.cpython-310.pyc,,
30
+ urllib3/contrib/emscripten/__init__.py,sha256=u6KNgzjlFZbuAAXa_ybCR7gQ71VJESnF-IIdDA73brw,733
31
+ urllib3/contrib/emscripten/__pycache__/__init__.cpython-310.pyc,,
32
+ urllib3/contrib/emscripten/__pycache__/connection.cpython-310.pyc,,
33
+ urllib3/contrib/emscripten/__pycache__/fetch.cpython-310.pyc,,
34
+ urllib3/contrib/emscripten/__pycache__/request.cpython-310.pyc,,
35
+ urllib3/contrib/emscripten/__pycache__/response.cpython-310.pyc,,
36
+ urllib3/contrib/emscripten/connection.py,sha256=kaBe2tWt7Yy9vNUFRBV7CSyDnfhCYILGxju9KTZj8Sw,8755
37
+ urllib3/contrib/emscripten/emscripten_fetch_worker.js,sha256=CDfYF_9CDobtx2lGidyJ1zjDEvwNT5F-dchmVWXDh0E,3655
38
+ urllib3/contrib/emscripten/fetch.py,sha256=ymwJlHBBuw6WTpKgPHpdmmrNBxlsr75HqoD4Rn27YXk,14131
39
+ urllib3/contrib/emscripten/request.py,sha256=mL28szy1KvE3NJhWor5jNmarp8gwplDU-7gwGZY5g0Q,566
40
+ urllib3/contrib/emscripten/response.py,sha256=wIDmdJ4doFWqLl5s86l9n0V70gFjQ2HWaPgz69jM52E,9546
41
+ urllib3/contrib/pyopenssl.py,sha256=X31eCYGwB09EkAHX8RhDKC0X0Ki7d0cCVWoMJZUM5bQ,19161
42
+ urllib3/contrib/socks.py,sha256=gFS2-zOw4_vLGpUvExOf3fNVT8liz6vhM2t6lBPn3CY,7572
43
+ urllib3/exceptions.py,sha256=RDaiudtR7rqbVKTKpLSgZBBtwaIqV7eZtervZV_mZag,9393
44
+ urllib3/fields.py,sha256=8vi0PeRo_pE5chPmJA07LZtMkVls4UrBS1k2xM506jM,10843
45
+ urllib3/filepost.py,sha256=-9qJT11cNGjO9dqnI20-oErZuTvNaM18xZZPCjZSbOE,2395
46
+ urllib3/http2.py,sha256=4QQcjTM9UYOQZe0r8KnA8anU9ST4p_s3SB3gRTueyPc,7480
47
+ urllib3/poolmanager.py,sha256=fcC3OwjFKxha06NsOORwbZOzrVt1pyY-bNCbKiqC0l8,22935
48
+ urllib3/py.typed,sha256=UaCuPFa3H8UAakbt-5G8SPacldTOGvJv18pPjUJ5gDY,93
49
+ urllib3/response.py,sha256=lmvseToQbkLXuFyA3jcSyCPjTgSfa6YPA4xUhVqq8QI,43874
50
+ urllib3/util/__init__.py,sha256=-qeS0QceivazvBEKDNFCAI-6ACcdDOE4TMvo7SLNlAQ,1001
51
+ urllib3/util/__pycache__/__init__.cpython-310.pyc,,
52
+ urllib3/util/__pycache__/connection.cpython-310.pyc,,
53
+ urllib3/util/__pycache__/proxy.cpython-310.pyc,,
54
+ urllib3/util/__pycache__/request.cpython-310.pyc,,
55
+ urllib3/util/__pycache__/response.cpython-310.pyc,,
56
+ urllib3/util/__pycache__/retry.cpython-310.pyc,,
57
+ urllib3/util/__pycache__/ssl_.cpython-310.pyc,,
58
+ urllib3/util/__pycache__/ssl_match_hostname.cpython-310.pyc,,
59
+ urllib3/util/__pycache__/ssltransport.cpython-310.pyc,,
60
+ urllib3/util/__pycache__/timeout.cpython-310.pyc,,
61
+ urllib3/util/__pycache__/url.cpython-310.pyc,,
62
+ urllib3/util/__pycache__/util.cpython-310.pyc,,
63
+ urllib3/util/__pycache__/wait.cpython-310.pyc,,
64
+ urllib3/util/connection.py,sha256=QeUUEuNmhznpuKNPL-B0IVOkMdMCu8oJX62OC0Vpzug,4462
65
+ urllib3/util/proxy.py,sha256=seP8-Q5B6bB0dMtwPj-YcZZQ30vHuLqRu-tI0JZ2fzs,1148
66
+ urllib3/util/request.py,sha256=PQnBmKUHMQ0hQQ41uhbLNAeA24ke60m6zeiwfwocpGo,8102
67
+ urllib3/util/response.py,sha256=vQE639uoEhj1vpjEdxu5lNIhJCSUZkd7pqllUI0BZOA,3374
68
+ urllib3/util/retry.py,sha256=WB-7x1m7fQH_-Qqtrk2OGvz93GvBTxc-pRn8Vf3p4mg,18384
69
+ urllib3/util/ssl_.py,sha256=FeymdS68RggEROwMB9VLGSqLHq2hRUKnIbQC_bCpGJI,19109
70
+ urllib3/util/ssl_match_hostname.py,sha256=gaWqixoYtQ_GKO8fcRGFj3VXeMoqyxQQuUTPgWeiL_M,5812
71
+ urllib3/util/ssltransport.py,sha256=SF__JQXVcHBQniFJZp3P9q-UeHM310WVwcBwqT9dCLE,9034
72
+ urllib3/util/timeout.py,sha256=4eT1FVeZZU7h7mYD1Jq2OXNe4fxekdNvhoWUkZusRpA,10346
73
+ urllib3/util/url.py,sha256=wHORhp80RAXyTlAIkTqLFzSrkU7J34ZDxX-tN65MBZk,15213
74
+ urllib3/util/util.py,sha256=j3lbZK1jPyiwD34T8IgJzdWEZVT-4E-0vYIJi9UjeNA,1146
75
+ urllib3/util/wait.py,sha256=_ph8IrUR3sqPqi0OopQgJUlH4wzkGeM5CiyA7XGGtmI,4423
env-llmeval/lib/python3.10/site-packages/urllib3-2.2.1.dist-info/WHEEL ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.21.1
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
env-llmeval/lib/python3.10/site-packages/urllib3-2.2.1.dist-info/licenses/LICENSE.txt ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2008-2020 Andrey Petrov and contributors.
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
env-llmeval/lib/python3.10/site-packages/urllib3/__init__.py ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Python HTTP library with thread-safe connection pooling, file post support, user friendly, and more
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ # Set default logging handler to avoid "No handler found" warnings.
8
+ import logging
9
+ import sys
10
+ import typing
11
+ import warnings
12
+ from logging import NullHandler
13
+
14
+ from . import exceptions
15
+ from ._base_connection import _TYPE_BODY
16
+ from ._collections import HTTPHeaderDict
17
+ from ._version import __version__
18
+ from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, connection_from_url
19
+ from .filepost import _TYPE_FIELDS, encode_multipart_formdata
20
+ from .poolmanager import PoolManager, ProxyManager, proxy_from_url
21
+ from .response import BaseHTTPResponse, HTTPResponse
22
+ from .util.request import make_headers
23
+ from .util.retry import Retry
24
+ from .util.timeout import Timeout
25
+
26
+ # Ensure that Python is compiled with OpenSSL 1.1.1+
27
+ # If the 'ssl' module isn't available at all that's
28
+ # fine, we only care if the module is available.
29
+ try:
30
+ import ssl
31
+ except ImportError:
32
+ pass
33
+ else:
34
+ if not ssl.OPENSSL_VERSION.startswith("OpenSSL "): # Defensive:
35
+ warnings.warn(
36
+ "urllib3 v2 only supports OpenSSL 1.1.1+, currently "
37
+ f"the 'ssl' module is compiled with {ssl.OPENSSL_VERSION!r}. "
38
+ "See: https://github.com/urllib3/urllib3/issues/3020",
39
+ exceptions.NotOpenSSLWarning,
40
+ )
41
+ elif ssl.OPENSSL_VERSION_INFO < (1, 1, 1): # Defensive:
42
+ raise ImportError(
43
+ "urllib3 v2 only supports OpenSSL 1.1.1+, currently "
44
+ f"the 'ssl' module is compiled with {ssl.OPENSSL_VERSION!r}. "
45
+ "See: https://github.com/urllib3/urllib3/issues/2168"
46
+ )
47
+
48
+ __author__ = "Andrey Petrov ([email protected])"
49
+ __license__ = "MIT"
50
+ __version__ = __version__
51
+
52
+ __all__ = (
53
+ "HTTPConnectionPool",
54
+ "HTTPHeaderDict",
55
+ "HTTPSConnectionPool",
56
+ "PoolManager",
57
+ "ProxyManager",
58
+ "HTTPResponse",
59
+ "Retry",
60
+ "Timeout",
61
+ "add_stderr_logger",
62
+ "connection_from_url",
63
+ "disable_warnings",
64
+ "encode_multipart_formdata",
65
+ "make_headers",
66
+ "proxy_from_url",
67
+ "request",
68
+ "BaseHTTPResponse",
69
+ )
70
+
71
+ logging.getLogger(__name__).addHandler(NullHandler())
72
+
73
+
74
+ def add_stderr_logger(
75
+ level: int = logging.DEBUG,
76
+ ) -> logging.StreamHandler[typing.TextIO]:
77
+ """
78
+ Helper for quickly adding a StreamHandler to the logger. Useful for
79
+ debugging.
80
+
81
+ Returns the handler after adding it.
82
+ """
83
+ # This method needs to be in this __init__.py to get the __name__ correct
84
+ # even if urllib3 is vendored within another package.
85
+ logger = logging.getLogger(__name__)
86
+ handler = logging.StreamHandler()
87
+ handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)s %(message)s"))
88
+ logger.addHandler(handler)
89
+ logger.setLevel(level)
90
+ logger.debug("Added a stderr logging handler to logger: %s", __name__)
91
+ return handler
92
+
93
+
94
+ # ... Clean up.
95
+ del NullHandler
96
+
97
+
98
+ # All warning filters *must* be appended unless you're really certain that they
99
+ # shouldn't be: otherwise, it's very hard for users to use most Python
100
+ # mechanisms to silence them.
101
+ # SecurityWarning's always go off by default.
102
+ warnings.simplefilter("always", exceptions.SecurityWarning, append=True)
103
+ # InsecurePlatformWarning's don't vary between requests, so we keep it default.
104
+ warnings.simplefilter("default", exceptions.InsecurePlatformWarning, append=True)
105
+
106
+
107
+ def disable_warnings(category: type[Warning] = exceptions.HTTPWarning) -> None:
108
+ """
109
+ Helper for quickly disabling all urllib3 warnings.
110
+ """
111
+ warnings.simplefilter("ignore", category)
112
+
113
+
114
+ _DEFAULT_POOL = PoolManager()
115
+
116
+
117
+ def request(
118
+ method: str,
119
+ url: str,
120
+ *,
121
+ body: _TYPE_BODY | None = None,
122
+ fields: _TYPE_FIELDS | None = None,
123
+ headers: typing.Mapping[str, str] | None = None,
124
+ preload_content: bool | None = True,
125
+ decode_content: bool | None = True,
126
+ redirect: bool | None = True,
127
+ retries: Retry | bool | int | None = None,
128
+ timeout: Timeout | float | int | None = 3,
129
+ json: typing.Any | None = None,
130
+ ) -> BaseHTTPResponse:
131
+ """
132
+ A convenience, top-level request method. It uses a module-global ``PoolManager`` instance.
133
+ Therefore, its side effects could be shared across dependencies relying on it.
134
+ To avoid side effects create a new ``PoolManager`` instance and use it instead.
135
+ The method does not accept low-level ``**urlopen_kw`` keyword arguments.
136
+
137
+ :param method:
138
+ HTTP request method (such as GET, POST, PUT, etc.)
139
+
140
+ :param url:
141
+ The URL to perform the request on.
142
+
143
+ :param body:
144
+ Data to send in the request body, either :class:`str`, :class:`bytes`,
145
+ an iterable of :class:`str`/:class:`bytes`, or a file-like object.
146
+
147
+ :param fields:
148
+ Data to encode and send in the request body.
149
+
150
+ :param headers:
151
+ Dictionary of custom headers to send, such as User-Agent,
152
+ If-None-Match, etc.
153
+
154
+ :param bool preload_content:
155
+ If True, the response's body will be preloaded into memory.
156
+
157
+ :param bool decode_content:
158
+ If True, will attempt to decode the body based on the
159
+ 'content-encoding' header.
160
+
161
+ :param redirect:
162
+ If True, automatically handle redirects (status codes 301, 302,
163
+ 303, 307, 308). Each redirect counts as a retry. Disabling retries
164
+ will disable redirect, too.
165
+
166
+ :param retries:
167
+ Configure the number of retries to allow before raising a
168
+ :class:`~urllib3.exceptions.MaxRetryError` exception.
169
+
170
+ If ``None`` (default) will retry 3 times, see ``Retry.DEFAULT``. Pass a
171
+ :class:`~urllib3.util.retry.Retry` object for fine-grained control
172
+ over different types of retries.
173
+ Pass an integer number to retry connection errors that many times,
174
+ but no other types of errors. Pass zero to never retry.
175
+
176
+ If ``False``, then retries are disabled and any exception is raised
177
+ immediately. Also, instead of raising a MaxRetryError on redirects,
178
+ the redirect response will be returned.
179
+
180
+ :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
181
+
182
+ :param timeout:
183
+ If specified, overrides the default timeout for this one
184
+ request. It may be a float (in seconds) or an instance of
185
+ :class:`urllib3.util.Timeout`.
186
+
187
+ :param json:
188
+ Data to encode and send as JSON with UTF-encoded in the request body.
189
+ The ``"Content-Type"`` header will be set to ``"application/json"``
190
+ unless specified otherwise.
191
+ """
192
+
193
+ return _DEFAULT_POOL.request(
194
+ method,
195
+ url,
196
+ body=body,
197
+ fields=fields,
198
+ headers=headers,
199
+ preload_content=preload_content,
200
+ decode_content=decode_content,
201
+ redirect=redirect,
202
+ retries=retries,
203
+ timeout=timeout,
204
+ json=json,
205
+ )
206
+
207
+
208
+ if sys.platform == "emscripten":
209
+ from .contrib.emscripten import inject_into_urllib3 # noqa: 401
210
+
211
+ inject_into_urllib3()
env-llmeval/lib/python3.10/site-packages/urllib3/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (6.11 kB). View file
 
env-llmeval/lib/python3.10/site-packages/urllib3/__pycache__/_base_connection.cpython-310.pyc ADDED
Binary file (5.69 kB). View file
 
env-llmeval/lib/python3.10/site-packages/urllib3/__pycache__/_collections.cpython-310.pyc ADDED
Binary file (16.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/urllib3/__pycache__/_request_methods.cpython-310.pyc ADDED
Binary file (9.14 kB). View file
 
env-llmeval/lib/python3.10/site-packages/urllib3/__pycache__/_version.cpython-310.pyc ADDED
Binary file (244 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/urllib3/__pycache__/connection.cpython-310.pyc ADDED
Binary file (22.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/urllib3/__pycache__/connectionpool.cpython-310.pyc ADDED
Binary file (29.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/urllib3/__pycache__/exceptions.cpython-310.pyc ADDED
Binary file (13.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/urllib3/__pycache__/fields.cpython-310.pyc ADDED
Binary file (9.72 kB). View file
 
env-llmeval/lib/python3.10/site-packages/urllib3/__pycache__/filepost.cpython-310.pyc ADDED
Binary file (2.35 kB). View file
 
env-llmeval/lib/python3.10/site-packages/urllib3/__pycache__/http2.cpython-310.pyc ADDED
Binary file (7.24 kB). View file
 
env-llmeval/lib/python3.10/site-packages/urllib3/__pycache__/poolmanager.cpython-310.pyc ADDED
Binary file (18.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/urllib3/__pycache__/response.cpython-310.pyc ADDED
Binary file (32.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/urllib3/_base_connection.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import typing
4
+
5
+ from .util.connection import _TYPE_SOCKET_OPTIONS
6
+ from .util.timeout import _DEFAULT_TIMEOUT, _TYPE_TIMEOUT
7
+ from .util.url import Url
8
+
9
+ _TYPE_BODY = typing.Union[bytes, typing.IO[typing.Any], typing.Iterable[bytes], str]
10
+
11
+
12
+ class ProxyConfig(typing.NamedTuple):
13
+ ssl_context: ssl.SSLContext | None
14
+ use_forwarding_for_https: bool
15
+ assert_hostname: None | str | Literal[False]
16
+ assert_fingerprint: str | None
17
+
18
+
19
+ class _ResponseOptions(typing.NamedTuple):
20
+ # TODO: Remove this in favor of a better
21
+ # HTTP request/response lifecycle tracking.
22
+ request_method: str
23
+ request_url: str
24
+ preload_content: bool
25
+ decode_content: bool
26
+ enforce_content_length: bool
27
+
28
+
29
+ if typing.TYPE_CHECKING:
30
+ import ssl
31
+ from typing import Literal, Protocol
32
+
33
+ from .response import BaseHTTPResponse
34
+
35
+ class BaseHTTPConnection(Protocol):
36
+ default_port: typing.ClassVar[int]
37
+ default_socket_options: typing.ClassVar[_TYPE_SOCKET_OPTIONS]
38
+
39
+ host: str
40
+ port: int
41
+ timeout: None | (
42
+ float
43
+ ) # Instance doesn't store _DEFAULT_TIMEOUT, must be resolved.
44
+ blocksize: int
45
+ source_address: tuple[str, int] | None
46
+ socket_options: _TYPE_SOCKET_OPTIONS | None
47
+
48
+ proxy: Url | None
49
+ proxy_config: ProxyConfig | None
50
+
51
+ is_verified: bool
52
+ proxy_is_verified: bool | None
53
+
54
+ def __init__(
55
+ self,
56
+ host: str,
57
+ port: int | None = None,
58
+ *,
59
+ timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,
60
+ source_address: tuple[str, int] | None = None,
61
+ blocksize: int = 8192,
62
+ socket_options: _TYPE_SOCKET_OPTIONS | None = ...,
63
+ proxy: Url | None = None,
64
+ proxy_config: ProxyConfig | None = None,
65
+ ) -> None:
66
+ ...
67
+
68
+ def set_tunnel(
69
+ self,
70
+ host: str,
71
+ port: int | None = None,
72
+ headers: typing.Mapping[str, str] | None = None,
73
+ scheme: str = "http",
74
+ ) -> None:
75
+ ...
76
+
77
+ def connect(self) -> None:
78
+ ...
79
+
80
+ def request(
81
+ self,
82
+ method: str,
83
+ url: str,
84
+ body: _TYPE_BODY | None = None,
85
+ headers: typing.Mapping[str, str] | None = None,
86
+ # We know *at least* botocore is depending on the order of the
87
+ # first 3 parameters so to be safe we only mark the later ones
88
+ # as keyword-only to ensure we have space to extend.
89
+ *,
90
+ chunked: bool = False,
91
+ preload_content: bool = True,
92
+ decode_content: bool = True,
93
+ enforce_content_length: bool = True,
94
+ ) -> None:
95
+ ...
96
+
97
+ def getresponse(self) -> BaseHTTPResponse:
98
+ ...
99
+
100
+ def close(self) -> None:
101
+ ...
102
+
103
+ @property
104
+ def is_closed(self) -> bool:
105
+ """Whether the connection either is brand new or has been previously closed.
106
+ If this property is True then both ``is_connected`` and ``has_connected_to_proxy``
107
+ properties must be False.
108
+ """
109
+
110
+ @property
111
+ def is_connected(self) -> bool:
112
+ """Whether the connection is actively connected to any origin (proxy or target)"""
113
+
114
+ @property
115
+ def has_connected_to_proxy(self) -> bool:
116
+ """Whether the connection has successfully connected to its proxy.
117
+ This returns False if no proxy is in use. Used to determine whether
118
+ errors are coming from the proxy layer or from tunnelling to the target origin.
119
+ """
120
+
121
+ class BaseHTTPSConnection(BaseHTTPConnection, Protocol):
122
+ default_port: typing.ClassVar[int]
123
+ default_socket_options: typing.ClassVar[_TYPE_SOCKET_OPTIONS]
124
+
125
+ # Certificate verification methods
126
+ cert_reqs: int | str | None
127
+ assert_hostname: None | str | Literal[False]
128
+ assert_fingerprint: str | None
129
+ ssl_context: ssl.SSLContext | None
130
+
131
+ # Trusted CAs
132
+ ca_certs: str | None
133
+ ca_cert_dir: str | None
134
+ ca_cert_data: None | str | bytes
135
+
136
+ # TLS version
137
+ ssl_minimum_version: int | None
138
+ ssl_maximum_version: int | None
139
+ ssl_version: int | str | None # Deprecated
140
+
141
+ # Client certificates
142
+ cert_file: str | None
143
+ key_file: str | None
144
+ key_password: str | None
145
+
146
+ def __init__(
147
+ self,
148
+ host: str,
149
+ port: int | None = None,
150
+ *,
151
+ timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,
152
+ source_address: tuple[str, int] | None = None,
153
+ blocksize: int = 16384,
154
+ socket_options: _TYPE_SOCKET_OPTIONS | None = ...,
155
+ proxy: Url | None = None,
156
+ proxy_config: ProxyConfig | None = None,
157
+ cert_reqs: int | str | None = None,
158
+ assert_hostname: None | str | Literal[False] = None,
159
+ assert_fingerprint: str | None = None,
160
+ server_hostname: str | None = None,
161
+ ssl_context: ssl.SSLContext | None = None,
162
+ ca_certs: str | None = None,
163
+ ca_cert_dir: str | None = None,
164
+ ca_cert_data: None | str | bytes = None,
165
+ ssl_minimum_version: int | None = None,
166
+ ssl_maximum_version: int | None = None,
167
+ ssl_version: int | str | None = None, # Deprecated
168
+ cert_file: str | None = None,
169
+ key_file: str | None = None,
170
+ key_password: str | None = None,
171
+ ) -> None:
172
+ ...
env-llmeval/lib/python3.10/site-packages/urllib3/_collections.py ADDED
@@ -0,0 +1,483 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import typing
4
+ from collections import OrderedDict
5
+ from enum import Enum, auto
6
+ from threading import RLock
7
+
8
+ if typing.TYPE_CHECKING:
9
+ # We can only import Protocol if TYPE_CHECKING because it's a development
10
+ # dependency, and is not available at runtime.
11
+ from typing import Protocol
12
+
13
+ from typing_extensions import Self
14
+
15
+ class HasGettableStringKeys(Protocol):
16
+ def keys(self) -> typing.Iterator[str]:
17
+ ...
18
+
19
+ def __getitem__(self, key: str) -> str:
20
+ ...
21
+
22
+
23
+ __all__ = ["RecentlyUsedContainer", "HTTPHeaderDict"]
24
+
25
+
26
+ # Key type
27
+ _KT = typing.TypeVar("_KT")
28
+ # Value type
29
+ _VT = typing.TypeVar("_VT")
30
+ # Default type
31
+ _DT = typing.TypeVar("_DT")
32
+
33
+ ValidHTTPHeaderSource = typing.Union[
34
+ "HTTPHeaderDict",
35
+ typing.Mapping[str, str],
36
+ typing.Iterable[typing.Tuple[str, str]],
37
+ "HasGettableStringKeys",
38
+ ]
39
+
40
+
41
+ class _Sentinel(Enum):
42
+ not_passed = auto()
43
+
44
+
45
+ def ensure_can_construct_http_header_dict(
46
+ potential: object,
47
+ ) -> ValidHTTPHeaderSource | None:
48
+ if isinstance(potential, HTTPHeaderDict):
49
+ return potential
50
+ elif isinstance(potential, typing.Mapping):
51
+ # Full runtime checking of the contents of a Mapping is expensive, so for the
52
+ # purposes of typechecking, we assume that any Mapping is the right shape.
53
+ return typing.cast(typing.Mapping[str, str], potential)
54
+ elif isinstance(potential, typing.Iterable):
55
+ # Similarly to Mapping, full runtime checking of the contents of an Iterable is
56
+ # expensive, so for the purposes of typechecking, we assume that any Iterable
57
+ # is the right shape.
58
+ return typing.cast(typing.Iterable[typing.Tuple[str, str]], potential)
59
+ elif hasattr(potential, "keys") and hasattr(potential, "__getitem__"):
60
+ return typing.cast("HasGettableStringKeys", potential)
61
+ else:
62
+ return None
63
+
64
+
65
+ class RecentlyUsedContainer(typing.Generic[_KT, _VT], typing.MutableMapping[_KT, _VT]):
66
+ """
67
+ Provides a thread-safe dict-like container which maintains up to
68
+ ``maxsize`` keys while throwing away the least-recently-used keys beyond
69
+ ``maxsize``.
70
+
71
+ :param maxsize:
72
+ Maximum number of recent elements to retain.
73
+
74
+ :param dispose_func:
75
+ Every time an item is evicted from the container,
76
+ ``dispose_func(value)`` is called. Callback which will get called
77
+ """
78
+
79
+ _container: typing.OrderedDict[_KT, _VT]
80
+ _maxsize: int
81
+ dispose_func: typing.Callable[[_VT], None] | None
82
+ lock: RLock
83
+
84
+ def __init__(
85
+ self,
86
+ maxsize: int = 10,
87
+ dispose_func: typing.Callable[[_VT], None] | None = None,
88
+ ) -> None:
89
+ super().__init__()
90
+ self._maxsize = maxsize
91
+ self.dispose_func = dispose_func
92
+ self._container = OrderedDict()
93
+ self.lock = RLock()
94
+
95
+ def __getitem__(self, key: _KT) -> _VT:
96
+ # Re-insert the item, moving it to the end of the eviction line.
97
+ with self.lock:
98
+ item = self._container.pop(key)
99
+ self._container[key] = item
100
+ return item
101
+
102
+ def __setitem__(self, key: _KT, value: _VT) -> None:
103
+ evicted_item = None
104
+ with self.lock:
105
+ # Possibly evict the existing value of 'key'
106
+ try:
107
+ # If the key exists, we'll overwrite it, which won't change the
108
+ # size of the pool. Because accessing a key should move it to
109
+ # the end of the eviction line, we pop it out first.
110
+ evicted_item = key, self._container.pop(key)
111
+ self._container[key] = value
112
+ except KeyError:
113
+ # When the key does not exist, we insert the value first so that
114
+ # evicting works in all cases, including when self._maxsize is 0
115
+ self._container[key] = value
116
+ if len(self._container) > self._maxsize:
117
+ # If we didn't evict an existing value, and we've hit our maximum
118
+ # size, then we have to evict the least recently used item from
119
+ # the beginning of the container.
120
+ evicted_item = self._container.popitem(last=False)
121
+
122
+ # After releasing the lock on the pool, dispose of any evicted value.
123
+ if evicted_item is not None and self.dispose_func:
124
+ _, evicted_value = evicted_item
125
+ self.dispose_func(evicted_value)
126
+
127
+ def __delitem__(self, key: _KT) -> None:
128
+ with self.lock:
129
+ value = self._container.pop(key)
130
+
131
+ if self.dispose_func:
132
+ self.dispose_func(value)
133
+
134
+ def __len__(self) -> int:
135
+ with self.lock:
136
+ return len(self._container)
137
+
138
+ def __iter__(self) -> typing.NoReturn:
139
+ raise NotImplementedError(
140
+ "Iteration over this class is unlikely to be threadsafe."
141
+ )
142
+
143
+ def clear(self) -> None:
144
+ with self.lock:
145
+ # Copy pointers to all values, then wipe the mapping
146
+ values = list(self._container.values())
147
+ self._container.clear()
148
+
149
+ if self.dispose_func:
150
+ for value in values:
151
+ self.dispose_func(value)
152
+
153
+ def keys(self) -> set[_KT]: # type: ignore[override]
154
+ with self.lock:
155
+ return set(self._container.keys())
156
+
157
+
158
+ class HTTPHeaderDictItemView(typing.Set[typing.Tuple[str, str]]):
159
+ """
160
+ HTTPHeaderDict is unusual for a Mapping[str, str] in that it has two modes of
161
+ address.
162
+
163
+ If we directly try to get an item with a particular name, we will get a string
164
+ back that is the concatenated version of all the values:
165
+
166
+ >>> d['X-Header-Name']
167
+ 'Value1, Value2, Value3'
168
+
169
+ However, if we iterate over an HTTPHeaderDict's items, we will optionally combine
170
+ these values based on whether combine=True was called when building up the dictionary
171
+
172
+ >>> d = HTTPHeaderDict({"A": "1", "B": "foo"})
173
+ >>> d.add("A", "2", combine=True)
174
+ >>> d.add("B", "bar")
175
+ >>> list(d.items())
176
+ [
177
+ ('A', '1, 2'),
178
+ ('B', 'foo'),
179
+ ('B', 'bar'),
180
+ ]
181
+
182
+ This class conforms to the interface required by the MutableMapping ABC while
183
+ also giving us the nonstandard iteration behavior we want; items with duplicate
184
+ keys, ordered by time of first insertion.
185
+ """
186
+
187
+ _headers: HTTPHeaderDict
188
+
189
+ def __init__(self, headers: HTTPHeaderDict) -> None:
190
+ self._headers = headers
191
+
192
+ def __len__(self) -> int:
193
+ return len(list(self._headers.iteritems()))
194
+
195
+ def __iter__(self) -> typing.Iterator[tuple[str, str]]:
196
+ return self._headers.iteritems()
197
+
198
+ def __contains__(self, item: object) -> bool:
199
+ if isinstance(item, tuple) and len(item) == 2:
200
+ passed_key, passed_val = item
201
+ if isinstance(passed_key, str) and isinstance(passed_val, str):
202
+ return self._headers._has_value_for_header(passed_key, passed_val)
203
+ return False
204
+
205
+
206
+ class HTTPHeaderDict(typing.MutableMapping[str, str]):
207
+ """
208
+ :param headers:
209
+ An iterable of field-value pairs. Must not contain multiple field names
210
+ when compared case-insensitively.
211
+
212
+ :param kwargs:
213
+ Additional field-value pairs to pass in to ``dict.update``.
214
+
215
+ A ``dict`` like container for storing HTTP Headers.
216
+
217
+ Field names are stored and compared case-insensitively in compliance with
218
+ RFC 7230. Iteration provides the first case-sensitive key seen for each
219
+ case-insensitive pair.
220
+
221
+ Using ``__setitem__`` syntax overwrites fields that compare equal
222
+ case-insensitively in order to maintain ``dict``'s api. For fields that
223
+ compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``
224
+ in a loop.
225
+
226
+ If multiple fields that are equal case-insensitively are passed to the
227
+ constructor or ``.update``, the behavior is undefined and some will be
228
+ lost.
229
+
230
+ >>> headers = HTTPHeaderDict()
231
+ >>> headers.add('Set-Cookie', 'foo=bar')
232
+ >>> headers.add('set-cookie', 'baz=quxx')
233
+ >>> headers['content-length'] = '7'
234
+ >>> headers['SET-cookie']
235
+ 'foo=bar, baz=quxx'
236
+ >>> headers['Content-Length']
237
+ '7'
238
+ """
239
+
240
+ _container: typing.MutableMapping[str, list[str]]
241
+
242
+ def __init__(self, headers: ValidHTTPHeaderSource | None = None, **kwargs: str):
243
+ super().__init__()
244
+ self._container = {} # 'dict' is insert-ordered
245
+ if headers is not None:
246
+ if isinstance(headers, HTTPHeaderDict):
247
+ self._copy_from(headers)
248
+ else:
249
+ self.extend(headers)
250
+ if kwargs:
251
+ self.extend(kwargs)
252
+
253
+ def __setitem__(self, key: str, val: str) -> None:
254
+ # avoid a bytes/str comparison by decoding before httplib
255
+ if isinstance(key, bytes):
256
+ key = key.decode("latin-1")
257
+ self._container[key.lower()] = [key, val]
258
+
259
+ def __getitem__(self, key: str) -> str:
260
+ val = self._container[key.lower()]
261
+ return ", ".join(val[1:])
262
+
263
+ def __delitem__(self, key: str) -> None:
264
+ del self._container[key.lower()]
265
+
266
+ def __contains__(self, key: object) -> bool:
267
+ if isinstance(key, str):
268
+ return key.lower() in self._container
269
+ return False
270
+
271
+ def setdefault(self, key: str, default: str = "") -> str:
272
+ return super().setdefault(key, default)
273
+
274
+ def __eq__(self, other: object) -> bool:
275
+ maybe_constructable = ensure_can_construct_http_header_dict(other)
276
+ if maybe_constructable is None:
277
+ return False
278
+ else:
279
+ other_as_http_header_dict = type(self)(maybe_constructable)
280
+
281
+ return {k.lower(): v for k, v in self.itermerged()} == {
282
+ k.lower(): v for k, v in other_as_http_header_dict.itermerged()
283
+ }
284
+
285
+ def __ne__(self, other: object) -> bool:
286
+ return not self.__eq__(other)
287
+
288
+ def __len__(self) -> int:
289
+ return len(self._container)
290
+
291
+ def __iter__(self) -> typing.Iterator[str]:
292
+ # Only provide the originally cased names
293
+ for vals in self._container.values():
294
+ yield vals[0]
295
+
296
+ def discard(self, key: str) -> None:
297
+ try:
298
+ del self[key]
299
+ except KeyError:
300
+ pass
301
+
302
+ def add(self, key: str, val: str, *, combine: bool = False) -> None:
303
+ """Adds a (name, value) pair, doesn't overwrite the value if it already
304
+ exists.
305
+
306
+ If this is called with combine=True, instead of adding a new header value
307
+ as a distinct item during iteration, this will instead append the value to
308
+ any existing header value with a comma. If no existing header value exists
309
+ for the key, then the value will simply be added, ignoring the combine parameter.
310
+
311
+ >>> headers = HTTPHeaderDict(foo='bar')
312
+ >>> headers.add('Foo', 'baz')
313
+ >>> headers['foo']
314
+ 'bar, baz'
315
+ >>> list(headers.items())
316
+ [('foo', 'bar'), ('foo', 'baz')]
317
+ >>> headers.add('foo', 'quz', combine=True)
318
+ >>> list(headers.items())
319
+ [('foo', 'bar, baz, quz')]
320
+ """
321
+ # avoid a bytes/str comparison by decoding before httplib
322
+ if isinstance(key, bytes):
323
+ key = key.decode("latin-1")
324
+ key_lower = key.lower()
325
+ new_vals = [key, val]
326
+ # Keep the common case aka no item present as fast as possible
327
+ vals = self._container.setdefault(key_lower, new_vals)
328
+ if new_vals is not vals:
329
+ # if there are values here, then there is at least the initial
330
+ # key/value pair
331
+ assert len(vals) >= 2
332
+ if combine:
333
+ vals[-1] = vals[-1] + ", " + val
334
+ else:
335
+ vals.append(val)
336
+
337
+ def extend(self, *args: ValidHTTPHeaderSource, **kwargs: str) -> None:
338
+ """Generic import function for any type of header-like object.
339
+ Adapted version of MutableMapping.update in order to insert items
340
+ with self.add instead of self.__setitem__
341
+ """
342
+ if len(args) > 1:
343
+ raise TypeError(
344
+ f"extend() takes at most 1 positional arguments ({len(args)} given)"
345
+ )
346
+ other = args[0] if len(args) >= 1 else ()
347
+
348
+ if isinstance(other, HTTPHeaderDict):
349
+ for key, val in other.iteritems():
350
+ self.add(key, val)
351
+ elif isinstance(other, typing.Mapping):
352
+ for key, val in other.items():
353
+ self.add(key, val)
354
+ elif isinstance(other, typing.Iterable):
355
+ other = typing.cast(typing.Iterable[typing.Tuple[str, str]], other)
356
+ for key, value in other:
357
+ self.add(key, value)
358
+ elif hasattr(other, "keys") and hasattr(other, "__getitem__"):
359
+ # THIS IS NOT A TYPESAFE BRANCH
360
+ # In this branch, the object has a `keys` attr but is not a Mapping or any of
361
+ # the other types indicated in the method signature. We do some stuff with
362
+ # it as though it partially implements the Mapping interface, but we're not
363
+ # doing that stuff safely AT ALL.
364
+ for key in other.keys():
365
+ self.add(key, other[key])
366
+
367
+ for key, value in kwargs.items():
368
+ self.add(key, value)
369
+
370
+ @typing.overload
371
+ def getlist(self, key: str) -> list[str]:
372
+ ...
373
+
374
+ @typing.overload
375
+ def getlist(self, key: str, default: _DT) -> list[str] | _DT:
376
+ ...
377
+
378
+ def getlist(
379
+ self, key: str, default: _Sentinel | _DT = _Sentinel.not_passed
380
+ ) -> list[str] | _DT:
381
+ """Returns a list of all the values for the named field. Returns an
382
+ empty list if the key doesn't exist."""
383
+ try:
384
+ vals = self._container[key.lower()]
385
+ except KeyError:
386
+ if default is _Sentinel.not_passed:
387
+ # _DT is unbound; empty list is instance of List[str]
388
+ return []
389
+ # _DT is bound; default is instance of _DT
390
+ return default
391
+ else:
392
+ # _DT may or may not be bound; vals[1:] is instance of List[str], which
393
+ # meets our external interface requirement of `Union[List[str], _DT]`.
394
+ return vals[1:]
395
+
396
+ def _prepare_for_method_change(self) -> Self:
397
+ """
398
+ Remove content-specific header fields before changing the request
399
+ method to GET or HEAD according to RFC 9110, Section 15.4.
400
+ """
401
+ content_specific_headers = [
402
+ "Content-Encoding",
403
+ "Content-Language",
404
+ "Content-Location",
405
+ "Content-Type",
406
+ "Content-Length",
407
+ "Digest",
408
+ "Last-Modified",
409
+ ]
410
+ for header in content_specific_headers:
411
+ self.discard(header)
412
+ return self
413
+
414
+ # Backwards compatibility for httplib
415
+ getheaders = getlist
416
+ getallmatchingheaders = getlist
417
+ iget = getlist
418
+
419
+ # Backwards compatibility for http.cookiejar
420
+ get_all = getlist
421
+
422
+ def __repr__(self) -> str:
423
+ return f"{type(self).__name__}({dict(self.itermerged())})"
424
+
425
+ def _copy_from(self, other: HTTPHeaderDict) -> None:
426
+ for key in other:
427
+ val = other.getlist(key)
428
+ self._container[key.lower()] = [key, *val]
429
+
430
+ def copy(self) -> HTTPHeaderDict:
431
+ clone = type(self)()
432
+ clone._copy_from(self)
433
+ return clone
434
+
435
+ def iteritems(self) -> typing.Iterator[tuple[str, str]]:
436
+ """Iterate over all header lines, including duplicate ones."""
437
+ for key in self:
438
+ vals = self._container[key.lower()]
439
+ for val in vals[1:]:
440
+ yield vals[0], val
441
+
442
+ def itermerged(self) -> typing.Iterator[tuple[str, str]]:
443
+ """Iterate over all headers, merging duplicate ones together."""
444
+ for key in self:
445
+ val = self._container[key.lower()]
446
+ yield val[0], ", ".join(val[1:])
447
+
448
+ def items(self) -> HTTPHeaderDictItemView: # type: ignore[override]
449
+ return HTTPHeaderDictItemView(self)
450
+
451
+ def _has_value_for_header(self, header_name: str, potential_value: str) -> bool:
452
+ if header_name in self:
453
+ return potential_value in self._container[header_name.lower()][1:]
454
+ return False
455
+
456
+ def __ior__(self, other: object) -> HTTPHeaderDict:
457
+ # Supports extending a header dict in-place using operator |=
458
+ # combining items with add instead of __setitem__
459
+ maybe_constructable = ensure_can_construct_http_header_dict(other)
460
+ if maybe_constructable is None:
461
+ return NotImplemented
462
+ self.extend(maybe_constructable)
463
+ return self
464
+
465
+ def __or__(self, other: object) -> HTTPHeaderDict:
466
+ # Supports merging header dicts using operator |
467
+ # combining items with add instead of __setitem__
468
+ maybe_constructable = ensure_can_construct_http_header_dict(other)
469
+ if maybe_constructable is None:
470
+ return NotImplemented
471
+ result = self.copy()
472
+ result.extend(maybe_constructable)
473
+ return result
474
+
475
+ def __ror__(self, other: object) -> HTTPHeaderDict:
476
+ # Supports merging header dicts using operator | when other is on left side
477
+ # combining items with add instead of __setitem__
478
+ maybe_constructable = ensure_can_construct_http_header_dict(other)
479
+ if maybe_constructable is None:
480
+ return NotImplemented
481
+ result = type(self)(maybe_constructable)
482
+ result.extend(self)
483
+ return result