applied-ai-018 commited on
Commit
1f99b4d
·
verified ·
1 Parent(s): 750c89d

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/INSTALLER +1 -0
  2. env-llmeval/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/LICENSE +201 -0
  3. env-llmeval/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/METADATA +128 -0
  4. env-llmeval/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/RECORD +10 -0
  5. env-llmeval/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/WHEEL +5 -0
  6. env-llmeval/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/top_level.txt +1 -0
  7. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/test_counter.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/test_models.cpython-310.pyc +0 -0
  9. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/translate/__init__.py +0 -0
  10. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/translate/test_bleu.py +405 -0
  11. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/translate/test_ibm5.py +160 -0
  12. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/translate/test_meteor.py +20 -0
  13. env-llmeval/lib/python3.10/site-packages/nltk/translate/__pycache__/__init__.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/nltk/translate/__pycache__/api.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/nltk/translate/__pycache__/chrf_score.cpython-310.pyc +0 -0
  16. env-llmeval/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm1.cpython-310.pyc +0 -0
  17. env-llmeval/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm5.cpython-310.pyc +0 -0
  18. env-llmeval/lib/python3.10/site-packages/nltk/translate/__pycache__/ribes_score.cpython-310.pyc +0 -0
  19. env-llmeval/lib/python3.10/site-packages/nltk/translate/bleu_score.py +685 -0
  20. env-llmeval/lib/python3.10/site-packages/nltk/translate/chrf_score.py +222 -0
  21. env-llmeval/lib/python3.10/site-packages/nltk/translate/gale_church.py +263 -0
  22. env-llmeval/lib/python3.10/site-packages/nltk/translate/gdfa.py +138 -0
  23. env-llmeval/lib/python3.10/site-packages/nltk/translate/gleu_score.py +190 -0
  24. env-llmeval/lib/python3.10/site-packages/nltk/translate/ibm2.py +319 -0
  25. env-llmeval/lib/python3.10/site-packages/nltk/translate/ibm5.py +663 -0
  26. env-llmeval/lib/python3.10/site-packages/nltk/translate/metrics.py +41 -0
  27. env-llmeval/lib/python3.10/site-packages/nltk/translate/nist_score.py +195 -0
  28. env-llmeval/lib/python3.10/site-packages/nltk/translate/phrase_based.py +193 -0
  29. env-llmeval/lib/python3.10/site-packages/pyarrow_hotfix-0.6.dist-info/INSTALLER +1 -0
  30. env-llmeval/lib/python3.10/site-packages/pyarrow_hotfix-0.6.dist-info/METADATA +115 -0
  31. env-llmeval/lib/python3.10/site-packages/pyarrow_hotfix-0.6.dist-info/RECORD +9 -0
  32. env-llmeval/lib/python3.10/site-packages/pyarrow_hotfix-0.6.dist-info/WHEEL +4 -0
  33. env-llmeval/lib/python3.10/site-packages/pyarrow_hotfix-0.6.dist-info/licenses/LICENSE.txt +202 -0
  34. env-llmeval/lib/python3.10/site-packages/setuptools/__init__.py +242 -0
  35. env-llmeval/lib/python3.10/site-packages/setuptools/_deprecation_warning.py +7 -0
  36. env-llmeval/lib/python3.10/site-packages/setuptools/_distutils/version.py +363 -0
  37. env-llmeval/lib/python3.10/site-packages/setuptools/archive_util.py +205 -0
  38. env-llmeval/lib/python3.10/site-packages/setuptools/build_meta.py +290 -0
  39. env-llmeval/lib/python3.10/site-packages/setuptools/cli-32.exe +0 -0
  40. env-llmeval/lib/python3.10/site-packages/setuptools/cli-arm64.exe +0 -0
  41. env-llmeval/lib/python3.10/site-packages/setuptools/cli.exe +0 -0
  42. env-llmeval/lib/python3.10/site-packages/setuptools/errors.py +40 -0
  43. env-llmeval/lib/python3.10/site-packages/setuptools/extension.py +55 -0
  44. env-llmeval/lib/python3.10/site-packages/setuptools/extern/__init__.py +73 -0
  45. env-llmeval/lib/python3.10/site-packages/setuptools/gui-32.exe +0 -0
  46. env-llmeval/lib/python3.10/site-packages/setuptools/gui-64.exe +0 -0
  47. env-llmeval/lib/python3.10/site-packages/setuptools/gui.exe +0 -0
  48. env-llmeval/lib/python3.10/site-packages/setuptools/installer.py +104 -0
  49. env-llmeval/lib/python3.10/site-packages/setuptools/launch.py +36 -0
  50. env-llmeval/lib/python3.10/site-packages/setuptools/monkey.py +177 -0
env-llmeval/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
env-llmeval/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "{}"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright 2013-2019 Nikolay Kim and Andrew Svetlov
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
env-llmeval/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/METADATA ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: aiosignal
3
+ Version: 1.3.1
4
+ Summary: aiosignal: a list of registered asynchronous callbacks
5
+ Home-page: https://github.com/aio-libs/aiosignal
6
+ Maintainer: aiohttp team <[email protected]>
7
+ Maintainer-email: [email protected]
8
+ License: Apache 2.0
9
+ Project-URL: Chat: Gitter, https://gitter.im/aio-libs/Lobby
10
+ Project-URL: CI: GitHub Actions, https://github.com/aio-libs/aiosignal/actions
11
+ Project-URL: Coverage: codecov, https://codecov.io/github/aio-libs/aiosignal
12
+ Project-URL: Docs: RTD, https://docs.aiosignal.org
13
+ Project-URL: GitHub: issues, https://github.com/aio-libs/aiosignal/issues
14
+ Project-URL: GitHub: repo, https://github.com/aio-libs/aiosignal
15
+ Classifier: License :: OSI Approved :: Apache Software License
16
+ Classifier: Intended Audience :: Developers
17
+ Classifier: Programming Language :: Python
18
+ Classifier: Programming Language :: Python :: 3
19
+ Classifier: Programming Language :: Python :: 3 :: Only
20
+ Classifier: Programming Language :: Python :: 3.7
21
+ Classifier: Programming Language :: Python :: 3.8
22
+ Classifier: Programming Language :: Python :: 3.9
23
+ Classifier: Programming Language :: Python :: 3.10
24
+ Classifier: Programming Language :: Python :: 3.11
25
+ Classifier: Development Status :: 5 - Production/Stable
26
+ Classifier: Operating System :: POSIX
27
+ Classifier: Operating System :: MacOS :: MacOS X
28
+ Classifier: Operating System :: Microsoft :: Windows
29
+ Classifier: Framework :: AsyncIO
30
+ Requires-Python: >=3.7
31
+ Description-Content-Type: text/x-rst
32
+ License-File: LICENSE
33
+ Requires-Dist: frozenlist (>=1.1.0)
34
+
35
+ =========
36
+ aiosignal
37
+ =========
38
+
39
+ .. image:: https://github.com/aio-libs/aiosignal/workflows/CI/badge.svg
40
+ :target: https://github.com/aio-libs/aiosignal/actions?query=workflow%3ACI
41
+ :alt: GitHub status for master branch
42
+
43
+ .. image:: https://codecov.io/gh/aio-libs/aiosignal/branch/master/graph/badge.svg
44
+ :target: https://codecov.io/gh/aio-libs/aiosignal
45
+ :alt: codecov.io status for master branch
46
+
47
+ .. image:: https://badge.fury.io/py/aiosignal.svg
48
+ :target: https://pypi.org/project/aiosignal
49
+ :alt: Latest PyPI package version
50
+
51
+ .. image:: https://readthedocs.org/projects/aiosignal/badge/?version=latest
52
+ :target: https://aiosignal.readthedocs.io/
53
+ :alt: Latest Read The Docs
54
+
55
+ .. image:: https://img.shields.io/discourse/topics?server=https%3A%2F%2Faio-libs.discourse.group%2F
56
+ :target: https://aio-libs.discourse.group/
57
+ :alt: Discourse group for io-libs
58
+
59
+ .. image:: https://badges.gitter.im/Join%20Chat.svg
60
+ :target: https://gitter.im/aio-libs/Lobby
61
+ :alt: Chat on Gitter
62
+
63
+ Introduction
64
+ ============
65
+
66
+ A project to manage callbacks in `asyncio` projects.
67
+
68
+ ``Signal`` is a list of registered asynchronous callbacks.
69
+
70
+ The signal's life-cycle has two stages: after creation its content
71
+ could be filled by using standard list operations: ``sig.append()``
72
+ etc.
73
+
74
+ After you call ``sig.freeze()`` the signal is *frozen*: adding, removing
75
+ and dropping callbacks is forbidden.
76
+
77
+ The only available operation is calling the previously registered
78
+ callbacks by using ``await sig.send(data)``.
79
+
80
+ For concrete usage examples see the `Signals
81
+ <https://docs.aiohttp.org/en/stable/web_advanced.html#aiohttp-web-signals>
82
+ section of the `Web Server Advanced
83
+ <https://docs.aiohttp.org/en/stable/web_advanced.html>` chapter of the `aiohttp
84
+ documentation`_.
85
+
86
+
87
+ Installation
88
+ ------------
89
+
90
+ ::
91
+
92
+ $ pip install aiosignal
93
+
94
+ The library requires Python 3.6 or newer.
95
+
96
+
97
+ Documentation
98
+ =============
99
+
100
+ https://aiosignal.readthedocs.io/
101
+
102
+ Communication channels
103
+ ======================
104
+
105
+ *gitter chat* https://gitter.im/aio-libs/Lobby
106
+
107
+ Requirements
108
+ ============
109
+
110
+ - Python >= 3.6
111
+ - frozenlist >= 1.0.0
112
+
113
+ License
114
+ =======
115
+
116
+ ``aiosignal`` is offered under the Apache 2 license.
117
+
118
+ Source code
119
+ ===========
120
+
121
+ The project is hosted on GitHub_
122
+
123
+ Please file an issue in the `bug tracker
124
+ <https://github.com/aio-libs/aiosignal/issues>`_ if you have found a bug
125
+ or have some suggestions to improve the library.
126
+
127
+ .. _GitHub: https://github.com/aio-libs/aiosignal
128
+ .. _aiohttp documentation: https://docs.aiohttp.org/
env-llmeval/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/RECORD ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ aiosignal-1.3.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ aiosignal-1.3.1.dist-info/LICENSE,sha256=b9UkPpLdf5jsacesN3co50kFcJ_1J6W_mNbQJjwE9bY,11332
3
+ aiosignal-1.3.1.dist-info/METADATA,sha256=c0HRnlYzfXKztZPTFDlPfygizTherhG5WdwXlvco0Ug,4008
4
+ aiosignal-1.3.1.dist-info/RECORD,,
5
+ aiosignal-1.3.1.dist-info/WHEEL,sha256=ZL1lC_LiPDNRgDnOl2taCMc83aPEUZgHHv2h-LDgdiM,92
6
+ aiosignal-1.3.1.dist-info/top_level.txt,sha256=z45aNOKGDdrI1roqZY3BGXQ22kJFPHBmVdwtLYLtXC0,10
7
+ aiosignal/__init__.py,sha256=zQNfFYRSd84bswvpFv8ZWjEr5DeYwV3LXbMSyo2222s,867
8
+ aiosignal/__init__.pyi,sha256=xeCddYSS8fZAkz8S4HuKSR2IDe3N7RW_LKcXDPPA1Xk,311
9
+ aiosignal/__pycache__/__init__.cpython-310.pyc,,
10
+ aiosignal/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
env-llmeval/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.38.2)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
env-llmeval/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ aiosignal
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/test_counter.cpython-310.pyc ADDED
Binary file (5.34 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/test_models.cpython-310.pyc ADDED
Binary file (9.25 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/translate/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/translate/test_bleu.py ADDED
@@ -0,0 +1,405 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests for BLEU translation evaluation metric
3
+ """
4
+
5
+ import io
6
+ import unittest
7
+
8
+ from nltk.data import find
9
+ from nltk.translate.bleu_score import (
10
+ SmoothingFunction,
11
+ brevity_penalty,
12
+ closest_ref_length,
13
+ corpus_bleu,
14
+ modified_precision,
15
+ sentence_bleu,
16
+ )
17
+
18
+
19
+ class TestBLEU(unittest.TestCase):
20
+ def test_modified_precision(self):
21
+ """
22
+ Examples from the original BLEU paper
23
+ https://www.aclweb.org/anthology/P02-1040.pdf
24
+ """
25
+ # Example 1: the "the*" example.
26
+ # Reference sentences.
27
+ ref1 = "the cat is on the mat".split()
28
+ ref2 = "there is a cat on the mat".split()
29
+ # Hypothesis sentence(s).
30
+ hyp1 = "the the the the the the the".split()
31
+
32
+ references = [ref1, ref2]
33
+
34
+ # Testing modified unigram precision.
35
+ hyp1_unigram_precision = float(modified_precision(references, hyp1, n=1))
36
+ assert round(hyp1_unigram_precision, 4) == 0.2857
37
+ # With assertAlmostEqual at 4 place precision.
38
+ self.assertAlmostEqual(hyp1_unigram_precision, 0.28571428, places=4)
39
+
40
+ # Testing modified bigram precision.
41
+ assert float(modified_precision(references, hyp1, n=2)) == 0.0
42
+
43
+ # Example 2: the "of the" example.
44
+ # Reference sentences
45
+ ref1 = str(
46
+ "It is a guide to action that ensures that the military "
47
+ "will forever heed Party commands"
48
+ ).split()
49
+ ref2 = str(
50
+ "It is the guiding principle which guarantees the military "
51
+ "forces always being under the command of the Party"
52
+ ).split()
53
+ ref3 = str(
54
+ "It is the practical guide for the army always to heed "
55
+ "the directions of the party"
56
+ ).split()
57
+ # Hypothesis sentence(s).
58
+ hyp1 = "of the".split()
59
+
60
+ references = [ref1, ref2, ref3]
61
+ # Testing modified unigram precision.
62
+ assert float(modified_precision(references, hyp1, n=1)) == 1.0
63
+
64
+ # Testing modified bigram precision.
65
+ assert float(modified_precision(references, hyp1, n=2)) == 1.0
66
+
67
+ # Example 3: Proper MT outputs.
68
+ hyp1 = str(
69
+ "It is a guide to action which ensures that the military "
70
+ "always obeys the commands of the party"
71
+ ).split()
72
+ hyp2 = str(
73
+ "It is to insure the troops forever hearing the activity "
74
+ "guidebook that party direct"
75
+ ).split()
76
+
77
+ references = [ref1, ref2, ref3]
78
+
79
+ # Unigram precision.
80
+ hyp1_unigram_precision = float(modified_precision(references, hyp1, n=1))
81
+ hyp2_unigram_precision = float(modified_precision(references, hyp2, n=1))
82
+ # Test unigram precision with assertAlmostEqual at 4 place precision.
83
+ self.assertAlmostEqual(hyp1_unigram_precision, 0.94444444, places=4)
84
+ self.assertAlmostEqual(hyp2_unigram_precision, 0.57142857, places=4)
85
+ # Test unigram precision with rounding.
86
+ assert round(hyp1_unigram_precision, 4) == 0.9444
87
+ assert round(hyp2_unigram_precision, 4) == 0.5714
88
+
89
+ # Bigram precision
90
+ hyp1_bigram_precision = float(modified_precision(references, hyp1, n=2))
91
+ hyp2_bigram_precision = float(modified_precision(references, hyp2, n=2))
92
+ # Test bigram precision with assertAlmostEqual at 4 place precision.
93
+ self.assertAlmostEqual(hyp1_bigram_precision, 0.58823529, places=4)
94
+ self.assertAlmostEqual(hyp2_bigram_precision, 0.07692307, places=4)
95
+ # Test bigram precision with rounding.
96
+ assert round(hyp1_bigram_precision, 4) == 0.5882
97
+ assert round(hyp2_bigram_precision, 4) == 0.0769
98
+
99
+ def test_brevity_penalty(self):
100
+ # Test case from brevity_penalty_closest function in mteval-v13a.pl.
101
+ # Same test cases as in the doctest in nltk.translate.bleu_score.py
102
+ references = [["a"] * 11, ["a"] * 8]
103
+ hypothesis = ["a"] * 7
104
+ hyp_len = len(hypothesis)
105
+ closest_ref_len = closest_ref_length(references, hyp_len)
106
+ self.assertAlmostEqual(
107
+ brevity_penalty(closest_ref_len, hyp_len), 0.8669, places=4
108
+ )
109
+
110
+ references = [["a"] * 11, ["a"] * 8, ["a"] * 6, ["a"] * 7]
111
+ hypothesis = ["a"] * 7
112
+ hyp_len = len(hypothesis)
113
+ closest_ref_len = closest_ref_length(references, hyp_len)
114
+ assert brevity_penalty(closest_ref_len, hyp_len) == 1.0
115
+
116
+ def test_zero_matches(self):
117
+ # Test case where there's 0 matches
118
+ references = ["The candidate has no alignment to any of the references".split()]
119
+ hypothesis = "John loves Mary".split()
120
+
121
+ # Test BLEU to nth order of n-grams, where n is len(hypothesis).
122
+ for n in range(1, len(hypothesis)):
123
+ weights = (1.0 / n,) * n # Uniform weights.
124
+ assert sentence_bleu(references, hypothesis, weights) == 0
125
+
126
+ def test_full_matches(self):
127
+ # Test case where there's 100% matches
128
+ references = ["John loves Mary".split()]
129
+ hypothesis = "John loves Mary".split()
130
+
131
+ # Test BLEU to nth order of n-grams, where n is len(hypothesis).
132
+ for n in range(1, len(hypothesis)):
133
+ weights = (1.0 / n,) * n # Uniform weights.
134
+ assert sentence_bleu(references, hypothesis, weights) == 1.0
135
+
136
+ def test_partial_matches_hypothesis_longer_than_reference(self):
137
+ references = ["John loves Mary".split()]
138
+ hypothesis = "John loves Mary who loves Mike".split()
139
+ # Since no 4-grams matches were found the result should be zero
140
+ # exp(w_1 * 1 * w_2 * 1 * w_3 * 1 * w_4 * -inf) = 0
141
+ self.assertAlmostEqual(sentence_bleu(references, hypothesis), 0.0, places=4)
142
+ # Checks that the warning has been raised because len(reference) < 4.
143
+ try:
144
+ self.assertWarns(UserWarning, sentence_bleu, references, hypothesis)
145
+ except AttributeError:
146
+ pass # unittest.TestCase.assertWarns is only supported in Python >= 3.2.
147
+
148
+
149
+ # @unittest.skip("Skipping fringe cases for BLEU.")
150
+ class TestBLEUFringeCases(unittest.TestCase):
151
+ def test_case_where_n_is_bigger_than_hypothesis_length(self):
152
+ # Test BLEU to nth order of n-grams, where n > len(hypothesis).
153
+ references = ["John loves Mary ?".split()]
154
+ hypothesis = "John loves Mary".split()
155
+ n = len(hypothesis) + 1 #
156
+ weights = (1.0 / n,) * n # Uniform weights.
157
+ # Since no n-grams matches were found the result should be zero
158
+ # exp(w_1 * 1 * w_2 * 1 * w_3 * 1 * w_4 * -inf) = 0
159
+ self.assertAlmostEqual(
160
+ sentence_bleu(references, hypothesis, weights), 0.0, places=4
161
+ )
162
+ # Checks that the warning has been raised because len(hypothesis) < 4.
163
+ try:
164
+ self.assertWarns(UserWarning, sentence_bleu, references, hypothesis)
165
+ except AttributeError:
166
+ pass # unittest.TestCase.assertWarns is only supported in Python >= 3.2.
167
+
168
+ # Test case where n > len(hypothesis) but so is n > len(reference), and
169
+ # it's a special case where reference == hypothesis.
170
+ references = ["John loves Mary".split()]
171
+ hypothesis = "John loves Mary".split()
172
+ # Since no 4-grams matches were found the result should be zero
173
+ # exp(w_1 * 1 * w_2 * 1 * w_3 * 1 * w_4 * -inf) = 0
174
+ self.assertAlmostEqual(
175
+ sentence_bleu(references, hypothesis, weights), 0.0, places=4
176
+ )
177
+
178
+ def test_empty_hypothesis(self):
179
+ # Test case where there's hypothesis is empty.
180
+ references = ["The candidate has no alignment to any of the references".split()]
181
+ hypothesis = []
182
+ assert sentence_bleu(references, hypothesis) == 0
183
+
184
+ def test_length_one_hypothesis(self):
185
+ # Test case where there's hypothesis is of length 1 in Smoothing method 4.
186
+ references = ["The candidate has no alignment to any of the references".split()]
187
+ hypothesis = ["Foo"]
188
+ method4 = SmoothingFunction().method4
189
+ try:
190
+ sentence_bleu(references, hypothesis, smoothing_function=method4)
191
+ except ValueError:
192
+ pass # unittest.TestCase.assertWarns is only supported in Python >= 3.2.
193
+
194
+ def test_empty_references(self):
195
+ # Test case where there's reference is empty.
196
+ references = [[]]
197
+ hypothesis = "John loves Mary".split()
198
+ assert sentence_bleu(references, hypothesis) == 0
199
+
200
+ def test_empty_references_and_hypothesis(self):
201
+ # Test case where both references and hypothesis is empty.
202
+ references = [[]]
203
+ hypothesis = []
204
+ assert sentence_bleu(references, hypothesis) == 0
205
+
206
+ def test_reference_or_hypothesis_shorter_than_fourgrams(self):
207
+ # Test case where the length of reference or hypothesis
208
+ # is shorter than 4.
209
+ references = ["let it go".split()]
210
+ hypothesis = "let go it".split()
211
+ # Checks that the value the hypothesis and reference returns is 0.0
212
+ # exp(w_1 * 1 * w_2 * 1 * w_3 * 1 * w_4 * -inf) = 0
213
+ self.assertAlmostEqual(sentence_bleu(references, hypothesis), 0.0, places=4)
214
+ # Checks that the warning has been raised.
215
+ try:
216
+ self.assertWarns(UserWarning, sentence_bleu, references, hypothesis)
217
+ except AttributeError:
218
+ pass # unittest.TestCase.assertWarns is only supported in Python >= 3.2.
219
+
220
+
221
+ class TestBLEUvsMteval13a(unittest.TestCase):
222
+ def test_corpus_bleu(self):
223
+ ref_file = find("models/wmt15_eval/ref.ru")
224
+ hyp_file = find("models/wmt15_eval/google.ru")
225
+ mteval_output_file = find("models/wmt15_eval/mteval-13a.output")
226
+
227
+ # Reads the BLEU scores from the `mteval-13a.output` file.
228
+ # The order of the list corresponds to the order of the ngrams.
229
+ with open(mteval_output_file) as mteval_fin:
230
+ # The numbers are located in the last 2nd line of the file.
231
+ # The first and 2nd item in the list are the score and system names.
232
+ mteval_bleu_scores = map(float, mteval_fin.readlines()[-2].split()[1:-1])
233
+
234
+ with open(ref_file, encoding="utf8") as ref_fin:
235
+ with open(hyp_file, encoding="utf8") as hyp_fin:
236
+ # Whitespace tokenize the file.
237
+ # Note: split() automatically strip().
238
+ hypothesis = list(map(lambda x: x.split(), hyp_fin))
239
+ # Note that the corpus_bleu input is list of list of references.
240
+ references = list(map(lambda x: [x.split()], ref_fin))
241
+ # Without smoothing.
242
+ for i, mteval_bleu in zip(range(1, 10), mteval_bleu_scores):
243
+ nltk_bleu = corpus_bleu(
244
+ references, hypothesis, weights=(1.0 / i,) * i
245
+ )
246
+ # Check that the BLEU scores difference is less than 0.005 .
247
+ # Note: This is an approximate comparison; as much as
248
+ # +/- 0.01 BLEU might be "statistically significant",
249
+ # the actual translation quality might not be.
250
+ assert abs(mteval_bleu - nltk_bleu) < 0.005
251
+
252
+ # With the same smoothing method used in mteval-v13a.pl
253
+ chencherry = SmoothingFunction()
254
+ for i, mteval_bleu in zip(range(1, 10), mteval_bleu_scores):
255
+ nltk_bleu = corpus_bleu(
256
+ references,
257
+ hypothesis,
258
+ weights=(1.0 / i,) * i,
259
+ smoothing_function=chencherry.method3,
260
+ )
261
+ assert abs(mteval_bleu - nltk_bleu) < 0.005
262
+
263
+
264
+ class TestBLEUWithBadSentence(unittest.TestCase):
265
+ def test_corpus_bleu_with_bad_sentence(self):
266
+ hyp = "Teo S yb , oe uNb , R , T t , , t Tue Ar saln S , , 5istsi l , 5oe R ulO sae oR R"
267
+ ref = str(
268
+ "Their tasks include changing a pump on the faulty stokehold ."
269
+ "Likewise , two species that are very similar in morphology "
270
+ "were distinguished using genetics ."
271
+ )
272
+ references = [[ref.split()]]
273
+ hypotheses = [hyp.split()]
274
+ try: # Check that the warning is raised since no. of 2-grams < 0.
275
+ with self.assertWarns(UserWarning):
276
+ # Verify that the BLEU output is undesired since no. of 2-grams < 0.
277
+ self.assertAlmostEqual(
278
+ corpus_bleu(references, hypotheses), 0.0, places=4
279
+ )
280
+ except AttributeError: # unittest.TestCase.assertWarns is only supported in Python >= 3.2.
281
+ self.assertAlmostEqual(corpus_bleu(references, hypotheses), 0.0, places=4)
282
+
283
+
284
+ class TestBLEUWithMultipleWeights(unittest.TestCase):
285
+ def test_corpus_bleu_with_multiple_weights(self):
286
+ hyp1 = [
287
+ "It",
288
+ "is",
289
+ "a",
290
+ "guide",
291
+ "to",
292
+ "action",
293
+ "which",
294
+ "ensures",
295
+ "that",
296
+ "the",
297
+ "military",
298
+ "always",
299
+ "obeys",
300
+ "the",
301
+ "commands",
302
+ "of",
303
+ "the",
304
+ "party",
305
+ ]
306
+ ref1a = [
307
+ "It",
308
+ "is",
309
+ "a",
310
+ "guide",
311
+ "to",
312
+ "action",
313
+ "that",
314
+ "ensures",
315
+ "that",
316
+ "the",
317
+ "military",
318
+ "will",
319
+ "forever",
320
+ "heed",
321
+ "Party",
322
+ "commands",
323
+ ]
324
+ ref1b = [
325
+ "It",
326
+ "is",
327
+ "the",
328
+ "guiding",
329
+ "principle",
330
+ "which",
331
+ "guarantees",
332
+ "the",
333
+ "military",
334
+ "forces",
335
+ "always",
336
+ "being",
337
+ "under",
338
+ "the",
339
+ "command",
340
+ "of",
341
+ "the",
342
+ "Party",
343
+ ]
344
+ ref1c = [
345
+ "It",
346
+ "is",
347
+ "the",
348
+ "practical",
349
+ "guide",
350
+ "for",
351
+ "the",
352
+ "army",
353
+ "always",
354
+ "to",
355
+ "heed",
356
+ "the",
357
+ "directions",
358
+ "of",
359
+ "the",
360
+ "party",
361
+ ]
362
+ hyp2 = [
363
+ "he",
364
+ "read",
365
+ "the",
366
+ "book",
367
+ "because",
368
+ "he",
369
+ "was",
370
+ "interested",
371
+ "in",
372
+ "world",
373
+ "history",
374
+ ]
375
+ ref2a = [
376
+ "he",
377
+ "was",
378
+ "interested",
379
+ "in",
380
+ "world",
381
+ "history",
382
+ "because",
383
+ "he",
384
+ "read",
385
+ "the",
386
+ "book",
387
+ ]
388
+ weight_1 = (1, 0, 0, 0)
389
+ weight_2 = (0.25, 0.25, 0.25, 0.25)
390
+ weight_3 = (0, 0, 0, 0, 1)
391
+
392
+ bleu_scores = corpus_bleu(
393
+ list_of_references=[[ref1a, ref1b, ref1c], [ref2a]],
394
+ hypotheses=[hyp1, hyp2],
395
+ weights=[weight_1, weight_2, weight_3],
396
+ )
397
+ assert bleu_scores[0] == corpus_bleu(
398
+ [[ref1a, ref1b, ref1c], [ref2a]], [hyp1, hyp2], weight_1
399
+ )
400
+ assert bleu_scores[1] == corpus_bleu(
401
+ [[ref1a, ref1b, ref1c], [ref2a]], [hyp1, hyp2], weight_2
402
+ )
403
+ assert bleu_scores[2] == corpus_bleu(
404
+ [[ref1a, ref1b, ref1c], [ref2a]], [hyp1, hyp2], weight_3
405
+ )
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/translate/test_ibm5.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests for IBM Model 5 training methods
3
+ """
4
+
5
+ import unittest
6
+ from collections import defaultdict
7
+
8
+ from nltk.translate import AlignedSent, IBMModel, IBMModel4, IBMModel5
9
+ from nltk.translate.ibm_model import AlignmentInfo
10
+
11
+
12
+ class TestIBMModel5(unittest.TestCase):
13
+ def test_set_uniform_vacancy_probabilities_of_max_displacements(self):
14
+ # arrange
15
+ src_classes = {"schinken": 0, "eier": 0, "spam": 1}
16
+ trg_classes = {"ham": 0, "eggs": 1, "spam": 2}
17
+ corpus = [
18
+ AlignedSent(["ham", "eggs"], ["schinken", "schinken", "eier"]),
19
+ AlignedSent(["spam", "spam", "spam", "spam"], ["spam", "spam"]),
20
+ ]
21
+ model5 = IBMModel5(corpus, 0, src_classes, trg_classes)
22
+
23
+ # act
24
+ model5.set_uniform_probabilities(corpus)
25
+
26
+ # assert
27
+ # number of vacancy difference values =
28
+ # 2 * number of words in longest target sentence
29
+ expected_prob = 1.0 / (2 * 4)
30
+
31
+ # examine the boundary values for (dv, max_v, trg_class)
32
+ self.assertEqual(model5.head_vacancy_table[4][4][0], expected_prob)
33
+ self.assertEqual(model5.head_vacancy_table[-3][1][2], expected_prob)
34
+ self.assertEqual(model5.non_head_vacancy_table[4][4][0], expected_prob)
35
+ self.assertEqual(model5.non_head_vacancy_table[-3][1][2], expected_prob)
36
+
37
+ def test_set_uniform_vacancy_probabilities_of_non_domain_values(self):
38
+ # arrange
39
+ src_classes = {"schinken": 0, "eier": 0, "spam": 1}
40
+ trg_classes = {"ham": 0, "eggs": 1, "spam": 2}
41
+ corpus = [
42
+ AlignedSent(["ham", "eggs"], ["schinken", "schinken", "eier"]),
43
+ AlignedSent(["spam", "spam", "spam", "spam"], ["spam", "spam"]),
44
+ ]
45
+ model5 = IBMModel5(corpus, 0, src_classes, trg_classes)
46
+
47
+ # act
48
+ model5.set_uniform_probabilities(corpus)
49
+
50
+ # assert
51
+ # examine dv and max_v values that are not in the training data domain
52
+ self.assertEqual(model5.head_vacancy_table[5][4][0], IBMModel.MIN_PROB)
53
+ self.assertEqual(model5.head_vacancy_table[-4][1][2], IBMModel.MIN_PROB)
54
+ self.assertEqual(model5.head_vacancy_table[4][0][0], IBMModel.MIN_PROB)
55
+ self.assertEqual(model5.non_head_vacancy_table[5][4][0], IBMModel.MIN_PROB)
56
+ self.assertEqual(model5.non_head_vacancy_table[-4][1][2], IBMModel.MIN_PROB)
57
+
58
+ def test_prob_t_a_given_s(self):
59
+ # arrange
60
+ src_sentence = ["ich", "esse", "ja", "gern", "räucherschinken"]
61
+ trg_sentence = ["i", "love", "to", "eat", "smoked", "ham"]
62
+ src_classes = {"räucherschinken": 0, "ja": 1, "ich": 2, "esse": 3, "gern": 4}
63
+ trg_classes = {"ham": 0, "smoked": 1, "i": 3, "love": 4, "to": 2, "eat": 4}
64
+ corpus = [AlignedSent(trg_sentence, src_sentence)]
65
+ alignment_info = AlignmentInfo(
66
+ (0, 1, 4, 0, 2, 5, 5),
67
+ [None] + src_sentence,
68
+ ["UNUSED"] + trg_sentence,
69
+ [[3], [1], [4], [], [2], [5, 6]],
70
+ )
71
+
72
+ head_vacancy_table = defaultdict(
73
+ lambda: defaultdict(lambda: defaultdict(float))
74
+ )
75
+ head_vacancy_table[1 - 0][6][3] = 0.97 # ich -> i
76
+ head_vacancy_table[3 - 0][5][4] = 0.97 # esse -> eat
77
+ head_vacancy_table[1 - 2][4][4] = 0.97 # gern -> love
78
+ head_vacancy_table[2 - 0][2][1] = 0.97 # räucherschinken -> smoked
79
+
80
+ non_head_vacancy_table = defaultdict(
81
+ lambda: defaultdict(lambda: defaultdict(float))
82
+ )
83
+ non_head_vacancy_table[1 - 0][1][0] = 0.96 # räucherschinken -> ham
84
+
85
+ translation_table = defaultdict(lambda: defaultdict(float))
86
+ translation_table["i"]["ich"] = 0.98
87
+ translation_table["love"]["gern"] = 0.98
88
+ translation_table["to"][None] = 0.98
89
+ translation_table["eat"]["esse"] = 0.98
90
+ translation_table["smoked"]["räucherschinken"] = 0.98
91
+ translation_table["ham"]["räucherschinken"] = 0.98
92
+
93
+ fertility_table = defaultdict(lambda: defaultdict(float))
94
+ fertility_table[1]["ich"] = 0.99
95
+ fertility_table[1]["esse"] = 0.99
96
+ fertility_table[0]["ja"] = 0.99
97
+ fertility_table[1]["gern"] = 0.99
98
+ fertility_table[2]["räucherschinken"] = 0.999
99
+ fertility_table[1][None] = 0.99
100
+
101
+ probabilities = {
102
+ "p1": 0.167,
103
+ "translation_table": translation_table,
104
+ "fertility_table": fertility_table,
105
+ "head_vacancy_table": head_vacancy_table,
106
+ "non_head_vacancy_table": non_head_vacancy_table,
107
+ "head_distortion_table": None,
108
+ "non_head_distortion_table": None,
109
+ "alignment_table": None,
110
+ }
111
+
112
+ model5 = IBMModel5(corpus, 0, src_classes, trg_classes, probabilities)
113
+
114
+ # act
115
+ probability = model5.prob_t_a_given_s(alignment_info)
116
+
117
+ # assert
118
+ null_generation = 5 * pow(0.167, 1) * pow(0.833, 4)
119
+ fertility = 1 * 0.99 * 1 * 0.99 * 1 * 0.99 * 1 * 0.99 * 2 * 0.999
120
+ lexical_translation = 0.98 * 0.98 * 0.98 * 0.98 * 0.98 * 0.98
121
+ vacancy = 0.97 * 0.97 * 1 * 0.97 * 0.97 * 0.96
122
+ expected_probability = (
123
+ null_generation * fertility * lexical_translation * vacancy
124
+ )
125
+ self.assertEqual(round(probability, 4), round(expected_probability, 4))
126
+
127
+ def test_prune(self):
128
+ # arrange
129
+ alignment_infos = [
130
+ AlignmentInfo((1, 1), None, None, None),
131
+ AlignmentInfo((1, 2), None, None, None),
132
+ AlignmentInfo((2, 1), None, None, None),
133
+ AlignmentInfo((2, 2), None, None, None),
134
+ AlignmentInfo((0, 0), None, None, None),
135
+ ]
136
+ min_factor = IBMModel5.MIN_SCORE_FACTOR
137
+ best_score = 0.9
138
+ scores = {
139
+ (1, 1): min(min_factor * 1.5, 1) * best_score, # above threshold
140
+ (1, 2): best_score,
141
+ (2, 1): min_factor * best_score, # at threshold
142
+ (2, 2): min_factor * best_score * 0.5, # low score
143
+ (0, 0): min(min_factor * 1.1, 1) * 1.2, # above threshold
144
+ }
145
+ corpus = [AlignedSent(["a"], ["b"])]
146
+ original_prob_function = IBMModel4.model4_prob_t_a_given_s
147
+ # mock static method
148
+ IBMModel4.model4_prob_t_a_given_s = staticmethod(
149
+ lambda a, model: scores[a.alignment]
150
+ )
151
+ model5 = IBMModel5(corpus, 0, None, None)
152
+
153
+ # act
154
+ pruned_alignments = model5.prune(alignment_infos)
155
+
156
+ # assert
157
+ self.assertEqual(len(pruned_alignments), 3)
158
+
159
+ # restore static method
160
+ IBMModel4.model4_prob_t_a_given_s = original_prob_function
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/translate/test_meteor.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import unittest
2
+
3
+ from nltk.translate.meteor_score import meteor_score
4
+
5
+
6
+ class TestMETEOR(unittest.TestCase):
7
+ reference = [["this", "is", "a", "test"], ["this", "is" "test"]]
8
+ candidate = ["THIS", "Is", "a", "tEST"]
9
+
10
+ def test_meteor(self):
11
+ score = meteor_score(self.reference, self.candidate, preprocess=str.lower)
12
+ assert score == 0.9921875
13
+
14
+ def test_reference_type_check(self):
15
+ str_reference = [" ".join(ref) for ref in self.reference]
16
+ self.assertRaises(TypeError, meteor_score, str_reference, self.candidate)
17
+
18
+ def test_candidate_type_check(self):
19
+ str_candidate = " ".join(self.candidate)
20
+ self.assertRaises(TypeError, meteor_score, self.reference, str_candidate)
env-llmeval/lib/python3.10/site-packages/nltk/translate/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.45 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/translate/__pycache__/api.cpython-310.pyc ADDED
Binary file (12.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/translate/__pycache__/chrf_score.cpython-310.pyc ADDED
Binary file (7.89 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm1.cpython-310.pyc ADDED
Binary file (8.43 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm5.cpython-310.pyc ADDED
Binary file (22.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/translate/__pycache__/ribes_score.cpython-310.pyc ADDED
Binary file (10.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/translate/bleu_score.py ADDED
@@ -0,0 +1,685 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: BLEU Score
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Authors: Chin Yee Lee, Hengfeng Li, Ruxin Hou, Calvin Tanujaya Lim
5
+ # Contributors: Björn Mattsson, Dmitrijs Milajevs, Liling Tan
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """BLEU score implementation."""
10
+
11
+ import math
12
+ import sys
13
+ import warnings
14
+ from collections import Counter
15
+ from fractions import Fraction
16
+
17
+ from nltk.util import ngrams
18
+
19
+
20
+ def sentence_bleu(
21
+ references,
22
+ hypothesis,
23
+ weights=(0.25, 0.25, 0.25, 0.25),
24
+ smoothing_function=None,
25
+ auto_reweigh=False,
26
+ ):
27
+ """
28
+ Calculate BLEU score (Bilingual Evaluation Understudy) from
29
+ Papineni, Kishore, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2002.
30
+ "BLEU: a method for automatic evaluation of machine translation."
31
+ In Proceedings of ACL. https://www.aclweb.org/anthology/P02-1040.pdf
32
+
33
+ >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
34
+ ... 'ensures', 'that', 'the', 'military', 'always',
35
+ ... 'obeys', 'the', 'commands', 'of', 'the', 'party']
36
+
37
+ >>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops',
38
+ ... 'forever', 'hearing', 'the', 'activity', 'guidebook',
39
+ ... 'that', 'party', 'direct']
40
+
41
+ >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
42
+ ... 'ensures', 'that', 'the', 'military', 'will', 'forever',
43
+ ... 'heed', 'Party', 'commands']
44
+
45
+ >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',
46
+ ... 'guarantees', 'the', 'military', 'forces', 'always',
47
+ ... 'being', 'under', 'the', 'command', 'of', 'the',
48
+ ... 'Party']
49
+
50
+ >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
51
+ ... 'army', 'always', 'to', 'heed', 'the', 'directions',
52
+ ... 'of', 'the', 'party']
53
+
54
+ >>> sentence_bleu([reference1, reference2, reference3], hypothesis1) # doctest: +ELLIPSIS
55
+ 0.5045...
56
+
57
+ If there is no ngrams overlap for any order of n-grams, BLEU returns the
58
+ value 0. This is because the precision for the order of n-grams without
59
+ overlap is 0, and the geometric mean in the final BLEU score computation
60
+ multiplies the 0 with the precision of other n-grams. This results in 0
61
+ (independently of the precision of the other n-gram orders). The following
62
+ example has zero 3-gram and 4-gram overlaps:
63
+
64
+ >>> round(sentence_bleu([reference1, reference2, reference3], hypothesis2),4) # doctest: +ELLIPSIS
65
+ 0.0
66
+
67
+ To avoid this harsh behaviour when no ngram overlaps are found a smoothing
68
+ function can be used.
69
+
70
+ >>> chencherry = SmoothingFunction()
71
+ >>> sentence_bleu([reference1, reference2, reference3], hypothesis2,
72
+ ... smoothing_function=chencherry.method1) # doctest: +ELLIPSIS
73
+ 0.0370...
74
+
75
+ The default BLEU calculates a score for up to 4-grams using uniform
76
+ weights (this is called BLEU-4). To evaluate your translations with
77
+ higher/lower order ngrams, use customized weights. E.g. when accounting
78
+ for up to 5-grams with uniform weights (this is called BLEU-5) use:
79
+
80
+ >>> weights = (1./5., 1./5., 1./5., 1./5., 1./5.)
81
+ >>> sentence_bleu([reference1, reference2, reference3], hypothesis1, weights) # doctest: +ELLIPSIS
82
+ 0.3920...
83
+
84
+ Multiple BLEU scores can be computed at once, by supplying a list of weights.
85
+ E.g. for computing BLEU-2, BLEU-3 *and* BLEU-4 in one computation, use:
86
+ >>> weights = [
87
+ ... (1./2., 1./2.),
88
+ ... (1./3., 1./3., 1./3.),
89
+ ... (1./4., 1./4., 1./4., 1./4.)
90
+ ... ]
91
+ >>> sentence_bleu([reference1, reference2, reference3], hypothesis1, weights) # doctest: +ELLIPSIS
92
+ [0.7453..., 0.6240..., 0.5045...]
93
+
94
+ :param references: reference sentences
95
+ :type references: list(list(str))
96
+ :param hypothesis: a hypothesis sentence
97
+ :type hypothesis: list(str)
98
+ :param weights: weights for unigrams, bigrams, trigrams and so on (one or a list of weights)
99
+ :type weights: tuple(float) / list(tuple(float))
100
+ :param smoothing_function:
101
+ :type smoothing_function: SmoothingFunction
102
+ :param auto_reweigh: Option to re-normalize the weights uniformly.
103
+ :type auto_reweigh: bool
104
+ :return: The sentence-level BLEU score. Returns a list if multiple weights were supplied.
105
+ :rtype: float / list(float)
106
+ """
107
+ return corpus_bleu(
108
+ [references], [hypothesis], weights, smoothing_function, auto_reweigh
109
+ )
110
+
111
+
112
+ def corpus_bleu(
113
+ list_of_references,
114
+ hypotheses,
115
+ weights=(0.25, 0.25, 0.25, 0.25),
116
+ smoothing_function=None,
117
+ auto_reweigh=False,
118
+ ):
119
+ """
120
+ Calculate a single corpus-level BLEU score (aka. system-level BLEU) for all
121
+ the hypotheses and their respective references.
122
+
123
+ Instead of averaging the sentence level BLEU scores (i.e. macro-average
124
+ precision), the original BLEU metric (Papineni et al. 2002) accounts for
125
+ the micro-average precision (i.e. summing the numerators and denominators
126
+ for each hypothesis-reference(s) pairs before the division).
127
+
128
+ >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
129
+ ... 'ensures', 'that', 'the', 'military', 'always',
130
+ ... 'obeys', 'the', 'commands', 'of', 'the', 'party']
131
+ >>> ref1a = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
132
+ ... 'ensures', 'that', 'the', 'military', 'will', 'forever',
133
+ ... 'heed', 'Party', 'commands']
134
+ >>> ref1b = ['It', 'is', 'the', 'guiding', 'principle', 'which',
135
+ ... 'guarantees', 'the', 'military', 'forces', 'always',
136
+ ... 'being', 'under', 'the', 'command', 'of', 'the', 'Party']
137
+ >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
138
+ ... 'army', 'always', 'to', 'heed', 'the', 'directions',
139
+ ... 'of', 'the', 'party']
140
+
141
+ >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
142
+ ... 'interested', 'in', 'world', 'history']
143
+ >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
144
+ ... 'because', 'he', 'read', 'the', 'book']
145
+
146
+ >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
147
+ >>> hypotheses = [hyp1, hyp2]
148
+ >>> corpus_bleu(list_of_references, hypotheses) # doctest: +ELLIPSIS
149
+ 0.5920...
150
+
151
+ The example below show that corpus_bleu() is different from averaging
152
+ sentence_bleu() for hypotheses
153
+
154
+ >>> score1 = sentence_bleu([ref1a, ref1b, ref1c], hyp1)
155
+ >>> score2 = sentence_bleu([ref2a], hyp2)
156
+ >>> (score1 + score2) / 2 # doctest: +ELLIPSIS
157
+ 0.6223...
158
+
159
+ Custom weights may be supplied to fine-tune the BLEU score further.
160
+ A tuple of float weights for unigrams, bigrams, trigrams and so on can be given.
161
+ >>> weights = (0.1, 0.3, 0.5, 0.1)
162
+ >>> corpus_bleu(list_of_references, hypotheses, weights=weights) # doctest: +ELLIPSIS
163
+ 0.5818...
164
+
165
+ This particular weight gave extra value to trigrams.
166
+ Furthermore, multiple weights can be given, resulting in multiple BLEU scores.
167
+ >>> weights = [
168
+ ... (0.5, 0.5),
169
+ ... (0.333, 0.333, 0.334),
170
+ ... (0.25, 0.25, 0.25, 0.25),
171
+ ... (0.2, 0.2, 0.2, 0.2, 0.2)
172
+ ... ]
173
+ >>> corpus_bleu(list_of_references, hypotheses, weights=weights) # doctest: +ELLIPSIS
174
+ [0.8242..., 0.7067..., 0.5920..., 0.4719...]
175
+
176
+ :param list_of_references: a corpus of lists of reference sentences, w.r.t. hypotheses
177
+ :type list_of_references: list(list(list(str)))
178
+ :param hypotheses: a list of hypothesis sentences
179
+ :type hypotheses: list(list(str))
180
+ :param weights: weights for unigrams, bigrams, trigrams and so on (one or a list of weights)
181
+ :type weights: tuple(float) / list(tuple(float))
182
+ :param smoothing_function:
183
+ :type smoothing_function: SmoothingFunction
184
+ :param auto_reweigh: Option to re-normalize the weights uniformly.
185
+ :type auto_reweigh: bool
186
+ :return: The corpus-level BLEU score.
187
+ :rtype: float
188
+ """
189
+ # Before proceeding to compute BLEU, perform sanity checks.
190
+
191
+ p_numerators = Counter() # Key = ngram order, and value = no. of ngram matches.
192
+ p_denominators = Counter() # Key = ngram order, and value = no. of ngram in ref.
193
+ hyp_lengths, ref_lengths = 0, 0
194
+
195
+ assert len(list_of_references) == len(hypotheses), (
196
+ "The number of hypotheses and their reference(s) should be the " "same "
197
+ )
198
+
199
+ try:
200
+ weights[0][0]
201
+ except TypeError:
202
+ weights = [weights]
203
+ max_weight_length = max(len(weight) for weight in weights)
204
+
205
+ # Iterate through each hypothesis and their corresponding references.
206
+ for references, hypothesis in zip(list_of_references, hypotheses):
207
+ # For each order of ngram, calculate the numerator and
208
+ # denominator for the corpus-level modified precision.
209
+ for i in range(1, max_weight_length + 1):
210
+ p_i = modified_precision(references, hypothesis, i)
211
+ p_numerators[i] += p_i.numerator
212
+ p_denominators[i] += p_i.denominator
213
+
214
+ # Calculate the hypothesis length and the closest reference length.
215
+ # Adds them to the corpus-level hypothesis and reference counts.
216
+ hyp_len = len(hypothesis)
217
+ hyp_lengths += hyp_len
218
+ ref_lengths += closest_ref_length(references, hyp_len)
219
+
220
+ # Calculate corpus-level brevity penalty.
221
+ bp = brevity_penalty(ref_lengths, hyp_lengths)
222
+
223
+ # Collects the various precision values for the different ngram orders.
224
+ p_n = [
225
+ Fraction(p_numerators[i], p_denominators[i], _normalize=False)
226
+ for i in range(1, max_weight_length + 1)
227
+ ]
228
+
229
+ # Returns 0 if there's no matching n-grams
230
+ # We only need to check for p_numerators[1] == 0, since if there's
231
+ # no unigrams, there won't be any higher order ngrams.
232
+ if p_numerators[1] == 0:
233
+ return 0 if len(weights) == 1 else [0] * len(weights)
234
+
235
+ # If there's no smoothing, set use method0 from SmoothinFunction class.
236
+ if not smoothing_function:
237
+ smoothing_function = SmoothingFunction().method0
238
+ # Smoothen the modified precision.
239
+ # Note: smoothing_function() may convert values into floats;
240
+ # it tries to retain the Fraction object as much as the
241
+ # smoothing method allows.
242
+ p_n = smoothing_function(
243
+ p_n, references=references, hypothesis=hypothesis, hyp_len=hyp_lengths
244
+ )
245
+
246
+ bleu_scores = []
247
+ for weight in weights:
248
+ # Uniformly re-weighting based on maximum hypothesis lengths if largest
249
+ # order of n-grams < 4 and weights is set at default.
250
+ if auto_reweigh:
251
+ if hyp_lengths < 4 and weight == (0.25, 0.25, 0.25, 0.25):
252
+ weight = (1 / hyp_lengths,) * hyp_lengths
253
+
254
+ s = (w_i * math.log(p_i) for w_i, p_i in zip(weight, p_n) if p_i > 0)
255
+ s = bp * math.exp(math.fsum(s))
256
+ bleu_scores.append(s)
257
+ return bleu_scores[0] if len(weights) == 1 else bleu_scores
258
+
259
+
260
+ def modified_precision(references, hypothesis, n):
261
+ """
262
+ Calculate modified ngram precision.
263
+
264
+ The normal precision method may lead to some wrong translations with
265
+ high-precision, e.g., the translation, in which a word of reference
266
+ repeats several times, has very high precision.
267
+
268
+ This function only returns the Fraction object that contains the numerator
269
+ and denominator necessary to calculate the corpus-level precision.
270
+ To calculate the modified precision for a single pair of hypothesis and
271
+ references, cast the Fraction object into a float.
272
+
273
+ The famous "the the the ... " example shows that you can get BLEU precision
274
+ by duplicating high frequency words.
275
+
276
+ >>> reference1 = 'the cat is on the mat'.split()
277
+ >>> reference2 = 'there is a cat on the mat'.split()
278
+ >>> hypothesis1 = 'the the the the the the the'.split()
279
+ >>> references = [reference1, reference2]
280
+ >>> float(modified_precision(references, hypothesis1, n=1)) # doctest: +ELLIPSIS
281
+ 0.2857...
282
+
283
+ In the modified n-gram precision, a reference word will be considered
284
+ exhausted after a matching hypothesis word is identified, e.g.
285
+
286
+ >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
287
+ ... 'ensures', 'that', 'the', 'military', 'will',
288
+ ... 'forever', 'heed', 'Party', 'commands']
289
+ >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',
290
+ ... 'guarantees', 'the', 'military', 'forces', 'always',
291
+ ... 'being', 'under', 'the', 'command', 'of', 'the',
292
+ ... 'Party']
293
+ >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
294
+ ... 'army', 'always', 'to', 'heed', 'the', 'directions',
295
+ ... 'of', 'the', 'party']
296
+ >>> hypothesis = 'of the'.split()
297
+ >>> references = [reference1, reference2, reference3]
298
+ >>> float(modified_precision(references, hypothesis, n=1))
299
+ 1.0
300
+ >>> float(modified_precision(references, hypothesis, n=2))
301
+ 1.0
302
+
303
+ An example of a normal machine translation hypothesis:
304
+
305
+ >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
306
+ ... 'ensures', 'that', 'the', 'military', 'always',
307
+ ... 'obeys', 'the', 'commands', 'of', 'the', 'party']
308
+
309
+ >>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops',
310
+ ... 'forever', 'hearing', 'the', 'activity', 'guidebook',
311
+ ... 'that', 'party', 'direct']
312
+
313
+ >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
314
+ ... 'ensures', 'that', 'the', 'military', 'will',
315
+ ... 'forever', 'heed', 'Party', 'commands']
316
+
317
+ >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',
318
+ ... 'guarantees', 'the', 'military', 'forces', 'always',
319
+ ... 'being', 'under', 'the', 'command', 'of', 'the',
320
+ ... 'Party']
321
+
322
+ >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
323
+ ... 'army', 'always', 'to', 'heed', 'the', 'directions',
324
+ ... 'of', 'the', 'party']
325
+ >>> references = [reference1, reference2, reference3]
326
+ >>> float(modified_precision(references, hypothesis1, n=1)) # doctest: +ELLIPSIS
327
+ 0.9444...
328
+ >>> float(modified_precision(references, hypothesis2, n=1)) # doctest: +ELLIPSIS
329
+ 0.5714...
330
+ >>> float(modified_precision(references, hypothesis1, n=2)) # doctest: +ELLIPSIS
331
+ 0.5882352941176471
332
+ >>> float(modified_precision(references, hypothesis2, n=2)) # doctest: +ELLIPSIS
333
+ 0.07692...
334
+
335
+
336
+ :param references: A list of reference translations.
337
+ :type references: list(list(str))
338
+ :param hypothesis: A hypothesis translation.
339
+ :type hypothesis: list(str)
340
+ :param n: The ngram order.
341
+ :type n: int
342
+ :return: BLEU's modified precision for the nth order ngram.
343
+ :rtype: Fraction
344
+ """
345
+ # Extracts all ngrams in hypothesis
346
+ # Set an empty Counter if hypothesis is empty.
347
+ counts = Counter(ngrams(hypothesis, n)) if len(hypothesis) >= n else Counter()
348
+ # Extract a union of references' counts.
349
+ # max_counts = reduce(or_, [Counter(ngrams(ref, n)) for ref in references])
350
+ max_counts = {}
351
+ for reference in references:
352
+ reference_counts = (
353
+ Counter(ngrams(reference, n)) if len(reference) >= n else Counter()
354
+ )
355
+ for ngram in counts:
356
+ max_counts[ngram] = max(max_counts.get(ngram, 0), reference_counts[ngram])
357
+
358
+ # Assigns the intersection between hypothesis and references' counts.
359
+ clipped_counts = {
360
+ ngram: min(count, max_counts[ngram]) for ngram, count in counts.items()
361
+ }
362
+
363
+ numerator = sum(clipped_counts.values())
364
+ # Ensures that denominator is minimum 1 to avoid ZeroDivisionError.
365
+ # Usually this happens when the ngram order is > len(reference).
366
+ denominator = max(1, sum(counts.values()))
367
+
368
+ return Fraction(numerator, denominator, _normalize=False)
369
+
370
+
371
+ def closest_ref_length(references, hyp_len):
372
+ """
373
+ This function finds the reference that is the closest length to the
374
+ hypothesis. The closest reference length is referred to as *r* variable
375
+ from the brevity penalty formula in Papineni et. al. (2002)
376
+
377
+ :param references: A list of reference translations.
378
+ :type references: list(list(str))
379
+ :param hyp_len: The length of the hypothesis.
380
+ :type hyp_len: int
381
+ :return: The length of the reference that's closest to the hypothesis.
382
+ :rtype: int
383
+ """
384
+ ref_lens = (len(reference) for reference in references)
385
+ closest_ref_len = min(
386
+ ref_lens, key=lambda ref_len: (abs(ref_len - hyp_len), ref_len)
387
+ )
388
+ return closest_ref_len
389
+
390
+
391
+ def brevity_penalty(closest_ref_len, hyp_len):
392
+ """
393
+ Calculate brevity penalty.
394
+
395
+ As the modified n-gram precision still has the problem from the short
396
+ length sentence, brevity penalty is used to modify the overall BLEU
397
+ score according to length.
398
+
399
+ An example from the paper. There are three references with length 12, 15
400
+ and 17. And a concise hypothesis of the length 12. The brevity penalty is 1.
401
+
402
+ >>> reference1 = list('aaaaaaaaaaaa') # i.e. ['a'] * 12
403
+ >>> reference2 = list('aaaaaaaaaaaaaaa') # i.e. ['a'] * 15
404
+ >>> reference3 = list('aaaaaaaaaaaaaaaaa') # i.e. ['a'] * 17
405
+ >>> hypothesis = list('aaaaaaaaaaaa') # i.e. ['a'] * 12
406
+ >>> references = [reference1, reference2, reference3]
407
+ >>> hyp_len = len(hypothesis)
408
+ >>> closest_ref_len = closest_ref_length(references, hyp_len)
409
+ >>> brevity_penalty(closest_ref_len, hyp_len)
410
+ 1.0
411
+
412
+ In case a hypothesis translation is shorter than the references, penalty is
413
+ applied.
414
+
415
+ >>> references = [['a'] * 28, ['a'] * 28]
416
+ >>> hypothesis = ['a'] * 12
417
+ >>> hyp_len = len(hypothesis)
418
+ >>> closest_ref_len = closest_ref_length(references, hyp_len)
419
+ >>> brevity_penalty(closest_ref_len, hyp_len)
420
+ 0.2635971381157267
421
+
422
+ The length of the closest reference is used to compute the penalty. If the
423
+ length of a hypothesis is 12, and the reference lengths are 13 and 2, the
424
+ penalty is applied because the hypothesis length (12) is less then the
425
+ closest reference length (13).
426
+
427
+ >>> references = [['a'] * 13, ['a'] * 2]
428
+ >>> hypothesis = ['a'] * 12
429
+ >>> hyp_len = len(hypothesis)
430
+ >>> closest_ref_len = closest_ref_length(references, hyp_len)
431
+ >>> brevity_penalty(closest_ref_len, hyp_len) # doctest: +ELLIPSIS
432
+ 0.9200...
433
+
434
+ The brevity penalty doesn't depend on reference order. More importantly,
435
+ when two reference sentences are at the same distance, the shortest
436
+ reference sentence length is used.
437
+
438
+ >>> references = [['a'] * 13, ['a'] * 11]
439
+ >>> hypothesis = ['a'] * 12
440
+ >>> hyp_len = len(hypothesis)
441
+ >>> closest_ref_len = closest_ref_length(references, hyp_len)
442
+ >>> bp1 = brevity_penalty(closest_ref_len, hyp_len)
443
+ >>> hyp_len = len(hypothesis)
444
+ >>> closest_ref_len = closest_ref_length(reversed(references), hyp_len)
445
+ >>> bp2 = brevity_penalty(closest_ref_len, hyp_len)
446
+ >>> bp1 == bp2 == 1
447
+ True
448
+
449
+ A test example from mteval-v13a.pl (starting from the line 705):
450
+
451
+ >>> references = [['a'] * 11, ['a'] * 8]
452
+ >>> hypothesis = ['a'] * 7
453
+ >>> hyp_len = len(hypothesis)
454
+ >>> closest_ref_len = closest_ref_length(references, hyp_len)
455
+ >>> brevity_penalty(closest_ref_len, hyp_len) # doctest: +ELLIPSIS
456
+ 0.8668...
457
+
458
+ >>> references = [['a'] * 11, ['a'] * 8, ['a'] * 6, ['a'] * 7]
459
+ >>> hypothesis = ['a'] * 7
460
+ >>> hyp_len = len(hypothesis)
461
+ >>> closest_ref_len = closest_ref_length(references, hyp_len)
462
+ >>> brevity_penalty(closest_ref_len, hyp_len)
463
+ 1.0
464
+
465
+ :param hyp_len: The length of the hypothesis for a single sentence OR the
466
+ sum of all the hypotheses' lengths for a corpus
467
+ :type hyp_len: int
468
+ :param closest_ref_len: The length of the closest reference for a single
469
+ hypothesis OR the sum of all the closest references for every hypotheses.
470
+ :type closest_ref_len: int
471
+ :return: BLEU's brevity penalty.
472
+ :rtype: float
473
+ """
474
+ if hyp_len > closest_ref_len:
475
+ return 1
476
+ # If hypothesis is empty, brevity penalty = 0 should result in BLEU = 0.0
477
+ elif hyp_len == 0:
478
+ return 0
479
+ else:
480
+ return math.exp(1 - closest_ref_len / hyp_len)
481
+
482
+
483
+ class SmoothingFunction:
484
+ """
485
+ This is an implementation of the smoothing techniques
486
+ for segment-level BLEU scores that was presented in
487
+ Boxing Chen and Collin Cherry (2014) A Systematic Comparison of
488
+ Smoothing Techniques for Sentence-Level BLEU. In WMT14.
489
+ http://acl2014.org/acl2014/W14-33/pdf/W14-3346.pdf
490
+ """
491
+
492
+ def __init__(self, epsilon=0.1, alpha=5, k=5):
493
+ """
494
+ This will initialize the parameters required for the various smoothing
495
+ techniques, the default values are set to the numbers used in the
496
+ experiments from Chen and Cherry (2014).
497
+
498
+ >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', 'ensures',
499
+ ... 'that', 'the', 'military', 'always', 'obeys', 'the',
500
+ ... 'commands', 'of', 'the', 'party']
501
+ >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', 'ensures',
502
+ ... 'that', 'the', 'military', 'will', 'forever', 'heed',
503
+ ... 'Party', 'commands']
504
+
505
+ >>> chencherry = SmoothingFunction()
506
+ >>> print(sentence_bleu([reference1], hypothesis1)) # doctest: +ELLIPSIS
507
+ 0.4118...
508
+ >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method0)) # doctest: +ELLIPSIS
509
+ 0.4118...
510
+ >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method1)) # doctest: +ELLIPSIS
511
+ 0.4118...
512
+ >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method2)) # doctest: +ELLIPSIS
513
+ 0.4452...
514
+ >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method3)) # doctest: +ELLIPSIS
515
+ 0.4118...
516
+ >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method4)) # doctest: +ELLIPSIS
517
+ 0.4118...
518
+ >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method5)) # doctest: +ELLIPSIS
519
+ 0.4905...
520
+ >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method6)) # doctest: +ELLIPSIS
521
+ 0.4135...
522
+ >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method7)) # doctest: +ELLIPSIS
523
+ 0.4905...
524
+
525
+ :param epsilon: the epsilon value use in method 1
526
+ :type epsilon: float
527
+ :param alpha: the alpha value use in method 6
528
+ :type alpha: int
529
+ :param k: the k value use in method 4
530
+ :type k: int
531
+ """
532
+ self.epsilon = epsilon
533
+ self.alpha = alpha
534
+ self.k = k
535
+
536
+ def method0(self, p_n, *args, **kwargs):
537
+ """
538
+ No smoothing.
539
+ """
540
+ p_n_new = []
541
+ for i, p_i in enumerate(p_n):
542
+ if p_i.numerator != 0:
543
+ p_n_new.append(p_i)
544
+ else:
545
+ _msg = str(
546
+ "\nThe hypothesis contains 0 counts of {}-gram overlaps.\n"
547
+ "Therefore the BLEU score evaluates to 0, independently of\n"
548
+ "how many N-gram overlaps of lower order it contains.\n"
549
+ "Consider using lower n-gram order or use "
550
+ "SmoothingFunction()"
551
+ ).format(i + 1)
552
+ warnings.warn(_msg)
553
+ # When numerator==0 where denonminator==0 or !=0, the result
554
+ # for the precision score should be equal to 0 or undefined.
555
+ # Due to BLEU geometric mean computation in logarithm space,
556
+ # we we need to take the return sys.float_info.min such that
557
+ # math.log(sys.float_info.min) returns a 0 precision score.
558
+ p_n_new.append(sys.float_info.min)
559
+ return p_n_new
560
+
561
+ def method1(self, p_n, *args, **kwargs):
562
+ """
563
+ Smoothing method 1: Add *epsilon* counts to precision with 0 counts.
564
+ """
565
+ return [
566
+ (p_i.numerator + self.epsilon) / p_i.denominator
567
+ if p_i.numerator == 0
568
+ else p_i
569
+ for p_i in p_n
570
+ ]
571
+
572
+ def method2(self, p_n, *args, **kwargs):
573
+ """
574
+ Smoothing method 2: Add 1 to both numerator and denominator from
575
+ Chin-Yew Lin and Franz Josef Och (2004) ORANGE: a Method for
576
+ Evaluating Automatic Evaluation Metrics for Machine Translation.
577
+ In COLING 2004.
578
+ """
579
+ return [
580
+ Fraction(p_n[i].numerator + 1, p_n[i].denominator + 1, _normalize=False)
581
+ if i != 0
582
+ else p_n[0]
583
+ for i in range(len(p_n))
584
+ ]
585
+
586
+ def method3(self, p_n, *args, **kwargs):
587
+ """
588
+ Smoothing method 3: NIST geometric sequence smoothing
589
+ The smoothing is computed by taking 1 / ( 2^k ), instead of 0, for each
590
+ precision score whose matching n-gram count is null.
591
+ k is 1 for the first 'n' value for which the n-gram match count is null/
592
+
593
+ For example, if the text contains:
594
+
595
+ - one 2-gram match
596
+ - and (consequently) two 1-gram matches
597
+
598
+ the n-gram count for each individual precision score would be:
599
+
600
+ - n=1 => prec_count = 2 (two unigrams)
601
+ - n=2 => prec_count = 1 (one bigram)
602
+ - n=3 => prec_count = 1/2 (no trigram, taking 'smoothed' value of 1 / ( 2^k ), with k=1)
603
+ - n=4 => prec_count = 1/4 (no fourgram, taking 'smoothed' value of 1 / ( 2^k ), with k=2)
604
+ """
605
+ incvnt = 1 # From the mteval-v13a.pl, it's referred to as k.
606
+ for i, p_i in enumerate(p_n):
607
+ if p_i.numerator == 0:
608
+ p_n[i] = 1 / (2**incvnt * p_i.denominator)
609
+ incvnt += 1
610
+ return p_n
611
+
612
+ def method4(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs):
613
+ """
614
+ Smoothing method 4:
615
+ Shorter translations may have inflated precision values due to having
616
+ smaller denominators; therefore, we give them proportionally
617
+ smaller smoothed counts. Instead of scaling to 1/(2^k), Chen and Cherry
618
+ suggests dividing by 1/ln(len(T)), where T is the length of the translation.
619
+ """
620
+ incvnt = 1
621
+ hyp_len = hyp_len if hyp_len else len(hypothesis)
622
+ for i, p_i in enumerate(p_n):
623
+ if p_i.numerator == 0 and hyp_len > 1:
624
+ # incvnt = i + 1 * self.k / math.log(
625
+ # hyp_len
626
+ # ) # Note that this K is different from the K from NIST.
627
+ # p_n[i] = incvnt / p_i.denominator\
628
+ numerator = 1 / (2**incvnt * self.k / math.log(hyp_len))
629
+ p_n[i] = numerator / p_i.denominator
630
+ incvnt += 1
631
+ return p_n
632
+
633
+ def method5(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs):
634
+ """
635
+ Smoothing method 5:
636
+ The matched counts for similar values of n should be similar. To a
637
+ calculate the n-gram matched count, it averages the n−1, n and n+1 gram
638
+ matched counts.
639
+ """
640
+ hyp_len = hyp_len if hyp_len else len(hypothesis)
641
+ m = {}
642
+ # Requires an precision value for an addition ngram order.
643
+ p_n_plus1 = p_n + [modified_precision(references, hypothesis, 5)]
644
+ m[-1] = p_n[0] + 1
645
+ for i, p_i in enumerate(p_n):
646
+ p_n[i] = (m[i - 1] + p_i + p_n_plus1[i + 1]) / 3
647
+ m[i] = p_n[i]
648
+ return p_n
649
+
650
+ def method6(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs):
651
+ """
652
+ Smoothing method 6:
653
+ Interpolates the maximum likelihood estimate of the precision *p_n* with
654
+ a prior estimate *pi0*. The prior is estimated by assuming that the ratio
655
+ between pn and pn−1 will be the same as that between pn−1 and pn−2; from
656
+ Gao and He (2013) Training MRF-Based Phrase Translation Models using
657
+ Gradient Ascent. In NAACL.
658
+ """
659
+ hyp_len = hyp_len if hyp_len else len(hypothesis)
660
+ # This smoothing only works when p_1 and p_2 is non-zero.
661
+ # Raise an error with an appropriate message when the input is too short
662
+ # to use this smoothing technique.
663
+ assert p_n[2], "This smoothing method requires non-zero precision for bigrams."
664
+ for i, p_i in enumerate(p_n):
665
+ if i in [0, 1]: # Skips the first 2 orders of ngrams.
666
+ continue
667
+ else:
668
+ pi0 = 0 if p_n[i - 2] == 0 else p_n[i - 1] ** 2 / p_n[i - 2]
669
+ # No. of ngrams in translation that matches the reference.
670
+ m = p_i.numerator
671
+ # No. of ngrams in translation.
672
+ l = sum(1 for _ in ngrams(hypothesis, i + 1))
673
+ # Calculates the interpolated precision.
674
+ p_n[i] = (m + self.alpha * pi0) / (l + self.alpha)
675
+ return p_n
676
+
677
+ def method7(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs):
678
+ """
679
+ Smoothing method 7:
680
+ Interpolates methods 4 and 5.
681
+ """
682
+ hyp_len = hyp_len if hyp_len else len(hypothesis)
683
+ p_n = self.method4(p_n, references, hypothesis, hyp_len)
684
+ p_n = self.method5(p_n, references, hypothesis, hyp_len)
685
+ return p_n
env-llmeval/lib/python3.10/site-packages/nltk/translate/chrf_score.py ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: ChrF score
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Authors: Maja Popovic
5
+ # Contributors: Liling Tan, Aleš Tamchyna (Memsource)
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """ ChrF score implementation """
10
+ import re
11
+ from collections import Counter, defaultdict
12
+
13
+ from nltk.util import ngrams
14
+
15
+
16
+ def sentence_chrf(
17
+ reference, hypothesis, min_len=1, max_len=6, beta=3.0, ignore_whitespace=True
18
+ ):
19
+ """
20
+ Calculates the sentence level CHRF (Character n-gram F-score) described in
21
+ - Maja Popovic. 2015. CHRF: Character n-gram F-score for Automatic MT Evaluation.
22
+ In Proceedings of the 10th Workshop on Machine Translation.
23
+ https://www.statmt.org/wmt15/pdf/WMT49.pdf
24
+ - Maja Popovic. 2016. CHRF Deconstructed: β Parameters and n-gram Weights.
25
+ In Proceedings of the 1st Conference on Machine Translation.
26
+ https://www.statmt.org/wmt16/pdf/W16-2341.pdf
27
+
28
+ This implementation of CHRF only supports a single reference at the moment.
29
+
30
+ For details not reported in the paper, consult Maja Popovic's original
31
+ implementation: https://github.com/m-popovic/chrF
32
+
33
+ The code should output results equivalent to running CHRF++ with the
34
+ following options: -nw 0 -b 3
35
+
36
+ An example from the original BLEU paper
37
+ https://www.aclweb.org/anthology/P02-1040.pdf
38
+
39
+ >>> ref1 = str('It is a guide to action that ensures that the military '
40
+ ... 'will forever heed Party commands').split()
41
+ >>> hyp1 = str('It is a guide to action which ensures that the military '
42
+ ... 'always obeys the commands of the party').split()
43
+ >>> hyp2 = str('It is to insure the troops forever hearing the activity '
44
+ ... 'guidebook that party direct').split()
45
+ >>> sentence_chrf(ref1, hyp1) # doctest: +ELLIPSIS
46
+ 0.6349...
47
+ >>> sentence_chrf(ref1, hyp2) # doctest: +ELLIPSIS
48
+ 0.3330...
49
+
50
+ The infamous "the the the ... " example
51
+
52
+ >>> ref = 'the cat is on the mat'.split()
53
+ >>> hyp = 'the the the the the the the'.split()
54
+ >>> sentence_chrf(ref, hyp) # doctest: +ELLIPSIS
55
+ 0.1468...
56
+
57
+ An example to show that this function allows users to use strings instead of
58
+ tokens, i.e. list(str) as inputs.
59
+
60
+ >>> ref1 = str('It is a guide to action that ensures that the military '
61
+ ... 'will forever heed Party commands')
62
+ >>> hyp1 = str('It is a guide to action which ensures that the military '
63
+ ... 'always obeys the commands of the party')
64
+ >>> sentence_chrf(ref1, hyp1) # doctest: +ELLIPSIS
65
+ 0.6349...
66
+ >>> type(ref1) == type(hyp1) == str
67
+ True
68
+ >>> sentence_chrf(ref1.split(), hyp1.split()) # doctest: +ELLIPSIS
69
+ 0.6349...
70
+
71
+ To skip the unigrams and only use 2- to 3-grams:
72
+
73
+ >>> sentence_chrf(ref1, hyp1, min_len=2, max_len=3) # doctest: +ELLIPSIS
74
+ 0.6617...
75
+
76
+ :param references: reference sentence
77
+ :type references: list(str) / str
78
+ :param hypothesis: a hypothesis sentence
79
+ :type hypothesis: list(str) / str
80
+ :param min_len: The minimum order of n-gram this function should extract.
81
+ :type min_len: int
82
+ :param max_len: The maximum order of n-gram this function should extract.
83
+ :type max_len: int
84
+ :param beta: the parameter to assign more importance to recall over precision
85
+ :type beta: float
86
+ :param ignore_whitespace: ignore whitespace characters in scoring
87
+ :type ignore_whitespace: bool
88
+ :return: the sentence level CHRF score.
89
+ :rtype: float
90
+ """
91
+ return corpus_chrf(
92
+ [reference],
93
+ [hypothesis],
94
+ min_len,
95
+ max_len,
96
+ beta=beta,
97
+ ignore_whitespace=ignore_whitespace,
98
+ )
99
+
100
+
101
+ def _preprocess(sent, ignore_whitespace):
102
+ if type(sent) != str:
103
+ # turn list of tokens into a string
104
+ sent = " ".join(sent)
105
+
106
+ if ignore_whitespace:
107
+ sent = re.sub(r"\s+", "", sent)
108
+ return sent
109
+
110
+
111
+ def chrf_precision_recall_fscore_support(
112
+ reference, hypothesis, n, beta=3.0, epsilon=1e-16
113
+ ):
114
+ """
115
+ This function computes the precision, recall and fscore from the ngram
116
+ overlaps. It returns the `support` which is the true positive score.
117
+
118
+ By underspecifying the input type, the function will be agnostic as to how
119
+ it computes the ngrams and simply take the whichever element in the list;
120
+ it could be either token or character.
121
+
122
+ :param reference: The reference sentence.
123
+ :type reference: list
124
+ :param hypothesis: The hypothesis sentence.
125
+ :type hypothesis: list
126
+ :param n: Extract up to the n-th order ngrams
127
+ :type n: int
128
+ :param beta: The parameter to assign more importance to recall over precision.
129
+ :type beta: float
130
+ :param epsilon: The fallback value if the hypothesis or reference is empty.
131
+ :type epsilon: float
132
+ :return: Returns the precision, recall and f-score and support (true positive).
133
+ :rtype: tuple(float)
134
+ """
135
+ ref_ngrams = Counter(ngrams(reference, n))
136
+ hyp_ngrams = Counter(ngrams(hypothesis, n))
137
+
138
+ # calculate the number of ngram matches
139
+ overlap_ngrams = ref_ngrams & hyp_ngrams
140
+ tp = sum(overlap_ngrams.values()) # True positives.
141
+ tpfp = sum(hyp_ngrams.values()) # True positives + False positives.
142
+ tpfn = sum(ref_ngrams.values()) # True positives + False negatives.
143
+
144
+ try:
145
+ prec = tp / tpfp # precision
146
+ rec = tp / tpfn # recall
147
+ factor = beta**2
148
+ fscore = (1 + factor) * (prec * rec) / (factor * prec + rec)
149
+ except ZeroDivisionError:
150
+ prec = rec = fscore = epsilon
151
+ return prec, rec, fscore, tp
152
+
153
+
154
+ def corpus_chrf(
155
+ references, hypotheses, min_len=1, max_len=6, beta=3.0, ignore_whitespace=True
156
+ ):
157
+ """
158
+ Calculates the corpus level CHRF (Character n-gram F-score), it is the
159
+ macro-averaged value of the sentence/segment level CHRF score.
160
+
161
+ This implementation of CHRF only supports a single reference at the moment.
162
+
163
+ >>> ref1 = str('It is a guide to action that ensures that the military '
164
+ ... 'will forever heed Party commands').split()
165
+ >>> ref2 = str('It is the guiding principle which guarantees the military '
166
+ ... 'forces always being under the command of the Party').split()
167
+ >>>
168
+ >>> hyp1 = str('It is a guide to action which ensures that the military '
169
+ ... 'always obeys the commands of the party').split()
170
+ >>> hyp2 = str('It is to insure the troops forever hearing the activity '
171
+ ... 'guidebook that party direct')
172
+ >>> corpus_chrf([ref1, ref2, ref1, ref2], [hyp1, hyp2, hyp2, hyp1]) # doctest: +ELLIPSIS
173
+ 0.3910...
174
+
175
+ :param references: a corpus of list of reference sentences, w.r.t. hypotheses
176
+ :type references: list(list(str))
177
+ :param hypotheses: a list of hypothesis sentences
178
+ :type hypotheses: list(list(str))
179
+ :param min_len: The minimum order of n-gram this function should extract.
180
+ :type min_len: int
181
+ :param max_len: The maximum order of n-gram this function should extract.
182
+ :type max_len: int
183
+ :param beta: the parameter to assign more importance to recall over precision
184
+ :type beta: float
185
+ :param ignore_whitespace: ignore whitespace characters in scoring
186
+ :type ignore_whitespace: bool
187
+ :return: the sentence level CHRF score.
188
+ :rtype: float
189
+ """
190
+
191
+ assert len(references) == len(
192
+ hypotheses
193
+ ), "The number of hypotheses and their references should be the same"
194
+ num_sents = len(hypotheses)
195
+
196
+ # Keep f-scores for each n-gram order separate
197
+ ngram_fscores = defaultdict(lambda: list())
198
+
199
+ # Iterate through each hypothesis and their corresponding references.
200
+ for reference, hypothesis in zip(references, hypotheses):
201
+
202
+ # preprocess both reference and hypothesis
203
+ reference = _preprocess(reference, ignore_whitespace)
204
+ hypothesis = _preprocess(hypothesis, ignore_whitespace)
205
+
206
+ # Calculate f-scores for each sentence and for each n-gram order
207
+ # separately.
208
+ for n in range(min_len, max_len + 1):
209
+ # Compute the precision, recall, fscore and support.
210
+ prec, rec, fscore, tp = chrf_precision_recall_fscore_support(
211
+ reference, hypothesis, n, beta=beta
212
+ )
213
+ ngram_fscores[n].append(fscore)
214
+
215
+ # how many n-gram sizes
216
+ num_ngram_sizes = len(ngram_fscores)
217
+
218
+ # sum of f-scores over all sentences for each n-gram order
219
+ total_scores = [sum(fscores) for n, fscores in ngram_fscores.items()]
220
+
221
+ # macro-average over n-gram orders and over all sentences
222
+ return (sum(total_scores) / num_ngram_sizes) / num_sents
env-llmeval/lib/python3.10/site-packages/nltk/translate/gale_church.py ADDED
@@ -0,0 +1,263 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Gale-Church Aligner
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Torsten Marek <[email protected]>
5
+ # Contributor: Cassidy Laidlaw, Liling Tan
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+
11
+ A port of the Gale-Church Aligner.
12
+
13
+ Gale & Church (1993), A Program for Aligning Sentences in Bilingual Corpora.
14
+ https://aclweb.org/anthology/J93-1004.pdf
15
+
16
+ """
17
+
18
+ import math
19
+
20
+ try:
21
+ from norm import logsf as norm_logsf
22
+ from scipy.stats import norm
23
+ except ImportError:
24
+
25
+ def erfcc(x):
26
+ """Complementary error function."""
27
+ z = abs(x)
28
+ t = 1 / (1 + 0.5 * z)
29
+ r = t * math.exp(
30
+ -z * z
31
+ - 1.26551223
32
+ + t
33
+ * (
34
+ 1.00002368
35
+ + t
36
+ * (
37
+ 0.37409196
38
+ + t
39
+ * (
40
+ 0.09678418
41
+ + t
42
+ * (
43
+ -0.18628806
44
+ + t
45
+ * (
46
+ 0.27886807
47
+ + t
48
+ * (
49
+ -1.13520398
50
+ + t
51
+ * (1.48851587 + t * (-0.82215223 + t * 0.17087277))
52
+ )
53
+ )
54
+ )
55
+ )
56
+ )
57
+ )
58
+ )
59
+ if x >= 0.0:
60
+ return r
61
+ else:
62
+ return 2.0 - r
63
+
64
+ def norm_cdf(x):
65
+ """Return the area under the normal distribution from M{-∞..x}."""
66
+ return 1 - 0.5 * erfcc(x / math.sqrt(2))
67
+
68
+ def norm_logsf(x):
69
+ try:
70
+ return math.log(1 - norm_cdf(x))
71
+ except ValueError:
72
+ return float("-inf")
73
+
74
+
75
+ LOG2 = math.log(2)
76
+
77
+
78
+ class LanguageIndependent:
79
+ # These are the language-independent probabilities and parameters
80
+ # given in Gale & Church
81
+
82
+ # for the computation, l_1 is always the language with less characters
83
+ PRIORS = {
84
+ (1, 0): 0.0099,
85
+ (0, 1): 0.0099,
86
+ (1, 1): 0.89,
87
+ (2, 1): 0.089,
88
+ (1, 2): 0.089,
89
+ (2, 2): 0.011,
90
+ }
91
+
92
+ AVERAGE_CHARACTERS = 1
93
+ VARIANCE_CHARACTERS = 6.8
94
+
95
+
96
+ def trace(backlinks, source_sents_lens, target_sents_lens):
97
+ """
98
+ Traverse the alignment cost from the tracebacks and retrieves
99
+ appropriate sentence pairs.
100
+
101
+ :param backlinks: A dictionary where the key is the alignment points and value is the cost (referencing the LanguageIndependent.PRIORS)
102
+ :type backlinks: dict
103
+ :param source_sents_lens: A list of target sentences' lengths
104
+ :type source_sents_lens: list(int)
105
+ :param target_sents_lens: A list of target sentences' lengths
106
+ :type target_sents_lens: list(int)
107
+ """
108
+ links = []
109
+ position = (len(source_sents_lens), len(target_sents_lens))
110
+ while position != (0, 0) and all(p >= 0 for p in position):
111
+ try:
112
+ s, t = backlinks[position]
113
+ except TypeError:
114
+ position = (position[0] - 1, position[1] - 1)
115
+ continue
116
+ for i in range(s):
117
+ for j in range(t):
118
+ links.append((position[0] - i - 1, position[1] - j - 1))
119
+ position = (position[0] - s, position[1] - t)
120
+
121
+ return links[::-1]
122
+
123
+
124
+ def align_log_prob(i, j, source_sents, target_sents, alignment, params):
125
+ """Returns the log probability of the two sentences C{source_sents[i]}, C{target_sents[j]}
126
+ being aligned with a specific C{alignment}.
127
+
128
+ @param i: The offset of the source sentence.
129
+ @param j: The offset of the target sentence.
130
+ @param source_sents: The list of source sentence lengths.
131
+ @param target_sents: The list of target sentence lengths.
132
+ @param alignment: The alignment type, a tuple of two integers.
133
+ @param params: The sentence alignment parameters.
134
+
135
+ @returns: The log probability of a specific alignment between the two sentences, given the parameters.
136
+ """
137
+ l_s = sum(source_sents[i - offset - 1] for offset in range(alignment[0]))
138
+ l_t = sum(target_sents[j - offset - 1] for offset in range(alignment[1]))
139
+ try:
140
+ # actually, the paper says l_s * params.VARIANCE_CHARACTERS, this is based on the C
141
+ # reference implementation. With l_s in the denominator, insertions are impossible.
142
+ m = (l_s + l_t / params.AVERAGE_CHARACTERS) / 2
143
+ delta = (l_s * params.AVERAGE_CHARACTERS - l_t) / math.sqrt(
144
+ m * params.VARIANCE_CHARACTERS
145
+ )
146
+ except ZeroDivisionError:
147
+ return float("-inf")
148
+
149
+ return -(LOG2 + norm_logsf(abs(delta)) + math.log(params.PRIORS[alignment]))
150
+
151
+
152
+ def align_blocks(source_sents_lens, target_sents_lens, params=LanguageIndependent):
153
+ """Return the sentence alignment of two text blocks (usually paragraphs).
154
+
155
+ >>> align_blocks([5,5,5], [7,7,7])
156
+ [(0, 0), (1, 1), (2, 2)]
157
+ >>> align_blocks([10,5,5], [12,20])
158
+ [(0, 0), (1, 1), (2, 1)]
159
+ >>> align_blocks([12,20], [10,5,5])
160
+ [(0, 0), (1, 1), (1, 2)]
161
+ >>> align_blocks([10,2,10,10,2,10], [12,3,20,3,12])
162
+ [(0, 0), (1, 1), (2, 2), (3, 2), (4, 3), (5, 4)]
163
+
164
+ @param source_sents_lens: The list of source sentence lengths.
165
+ @param target_sents_lens: The list of target sentence lengths.
166
+ @param params: the sentence alignment parameters.
167
+ @return: The sentence alignments, a list of index pairs.
168
+ """
169
+
170
+ alignment_types = list(params.PRIORS.keys())
171
+
172
+ # there are always three rows in the history (with the last of them being filled)
173
+ D = [[]]
174
+
175
+ backlinks = {}
176
+
177
+ for i in range(len(source_sents_lens) + 1):
178
+ for j in range(len(target_sents_lens) + 1):
179
+ min_dist = float("inf")
180
+ min_align = None
181
+ for a in alignment_types:
182
+ prev_i = -1 - a[0]
183
+ prev_j = j - a[1]
184
+ if prev_i < -len(D) or prev_j < 0:
185
+ continue
186
+ p = D[prev_i][prev_j] + align_log_prob(
187
+ i, j, source_sents_lens, target_sents_lens, a, params
188
+ )
189
+ if p < min_dist:
190
+ min_dist = p
191
+ min_align = a
192
+
193
+ if min_dist == float("inf"):
194
+ min_dist = 0
195
+
196
+ backlinks[(i, j)] = min_align
197
+ D[-1].append(min_dist)
198
+
199
+ if len(D) > 2:
200
+ D.pop(0)
201
+ D.append([])
202
+
203
+ return trace(backlinks, source_sents_lens, target_sents_lens)
204
+
205
+
206
+ def align_texts(source_blocks, target_blocks, params=LanguageIndependent):
207
+ """Creates the sentence alignment of two texts.
208
+
209
+ Texts can consist of several blocks. Block boundaries cannot be crossed by sentence
210
+ alignment links.
211
+
212
+ Each block consists of a list that contains the lengths (in characters) of the sentences
213
+ in this block.
214
+
215
+ @param source_blocks: The list of blocks in the source text.
216
+ @param target_blocks: The list of blocks in the target text.
217
+ @param params: the sentence alignment parameters.
218
+
219
+ @returns: A list of sentence alignment lists
220
+ """
221
+ if len(source_blocks) != len(target_blocks):
222
+ raise ValueError(
223
+ "Source and target texts do not have the same number of blocks."
224
+ )
225
+
226
+ return [
227
+ align_blocks(source_block, target_block, params)
228
+ for source_block, target_block in zip(source_blocks, target_blocks)
229
+ ]
230
+
231
+
232
+ # File I/O functions; may belong in a corpus reader
233
+
234
+
235
+ def split_at(it, split_value):
236
+ """Splits an iterator C{it} at values of C{split_value}.
237
+
238
+ Each instance of C{split_value} is swallowed. The iterator produces
239
+ subiterators which need to be consumed fully before the next subiterator
240
+ can be used.
241
+ """
242
+
243
+ def _chunk_iterator(first):
244
+ v = first
245
+ while v != split_value:
246
+ yield v
247
+ v = it.next()
248
+
249
+ while True:
250
+ yield _chunk_iterator(it.next())
251
+
252
+
253
+ def parse_token_stream(stream, soft_delimiter, hard_delimiter):
254
+ """Parses a stream of tokens and splits it into sentences (using C{soft_delimiter} tokens)
255
+ and blocks (using C{hard_delimiter} tokens) for use with the L{align_texts} function.
256
+ """
257
+ return [
258
+ [
259
+ sum(len(token) for token in sentence_it)
260
+ for sentence_it in split_at(block_it, soft_delimiter)
261
+ ]
262
+ for block_it in split_at(stream, hard_delimiter)
263
+ ]
env-llmeval/lib/python3.10/site-packages/nltk/translate/gdfa.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: GDFA word alignment symmetrization
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Authors: Liling Tan
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ from collections import defaultdict
9
+
10
+
11
+ def grow_diag_final_and(srclen, trglen, e2f, f2e):
12
+ """
13
+ This module symmetrisatizes the source-to-target and target-to-source
14
+ word alignment output and produces, aka. GDFA algorithm (Koehn, 2005).
15
+
16
+ Step 1: Find the intersection of the bidirectional alignment.
17
+
18
+ Step 2: Search for additional neighbor alignment points to be added, given
19
+ these criteria: (i) neighbor alignments points are not in the
20
+ intersection and (ii) neighbor alignments are in the union.
21
+
22
+ Step 3: Add all other alignment points that are not in the intersection, not in
23
+ the neighboring alignments that met the criteria but in the original
24
+ forward/backward alignment outputs.
25
+
26
+ >>> forw = ('0-0 2-1 9-2 21-3 10-4 7-5 11-6 9-7 12-8 1-9 3-10 '
27
+ ... '4-11 17-12 17-13 25-14 13-15 24-16 11-17 28-18')
28
+ >>> back = ('0-0 1-9 2-9 3-10 4-11 5-12 6-6 7-5 8-6 9-7 10-4 '
29
+ ... '11-6 12-8 13-12 15-12 17-13 18-13 19-12 20-13 '
30
+ ... '21-3 22-12 23-14 24-17 25-15 26-17 27-18 28-18')
31
+ >>> srctext = ("この よう な ハロー 白色 わい 星 の L 関数 "
32
+ ... "は L と 共 に 不連続 に 増加 する こと が "
33
+ ... "期待 さ れる こと を 示し た 。")
34
+ >>> trgtext = ("Therefore , we expect that the luminosity function "
35
+ ... "of such halo white dwarfs increases discontinuously "
36
+ ... "with the luminosity .")
37
+ >>> srclen = len(srctext.split())
38
+ >>> trglen = len(trgtext.split())
39
+ >>>
40
+ >>> gdfa = grow_diag_final_and(srclen, trglen, forw, back)
41
+ >>> gdfa == sorted(set([(28, 18), (6, 6), (24, 17), (2, 1), (15, 12), (13, 12),
42
+ ... (2, 9), (3, 10), (26, 17), (25, 15), (8, 6), (9, 7), (20,
43
+ ... 13), (18, 13), (0, 0), (10, 4), (13, 15), (23, 14), (7, 5),
44
+ ... (25, 14), (1, 9), (17, 13), (4, 11), (11, 17), (9, 2), (22,
45
+ ... 12), (27, 18), (24, 16), (21, 3), (19, 12), (17, 12), (5,
46
+ ... 12), (11, 6), (12, 8)]))
47
+ True
48
+
49
+ References:
50
+ Koehn, P., A. Axelrod, A. Birch, C. Callison, M. Osborne, and D. Talbot.
51
+ 2005. Edinburgh System Description for the 2005 IWSLT Speech
52
+ Translation Evaluation. In MT Eval Workshop.
53
+
54
+ :type srclen: int
55
+ :param srclen: the number of tokens in the source language
56
+ :type trglen: int
57
+ :param trglen: the number of tokens in the target language
58
+ :type e2f: str
59
+ :param e2f: the forward word alignment outputs from source-to-target
60
+ language (in pharaoh output format)
61
+ :type f2e: str
62
+ :param f2e: the backward word alignment outputs from target-to-source
63
+ language (in pharaoh output format)
64
+ :rtype: set(tuple(int))
65
+ :return: the symmetrized alignment points from the GDFA algorithm
66
+ """
67
+
68
+ # Converts pharaoh text format into list of tuples.
69
+ e2f = [tuple(map(int, a.split("-"))) for a in e2f.split()]
70
+ f2e = [tuple(map(int, a.split("-"))) for a in f2e.split()]
71
+
72
+ neighbors = [(-1, 0), (0, -1), (1, 0), (0, 1), (-1, -1), (-1, 1), (1, -1), (1, 1)]
73
+ alignment = set(e2f).intersection(set(f2e)) # Find the intersection.
74
+ union = set(e2f).union(set(f2e))
75
+
76
+ # *aligned* is used to check if neighbors are aligned in grow_diag()
77
+ aligned = defaultdict(set)
78
+ for i, j in alignment:
79
+ aligned["e"].add(i)
80
+ aligned["f"].add(j)
81
+
82
+ def grow_diag():
83
+ """
84
+ Search for the neighbor points and them to the intersected alignment
85
+ points if criteria are met.
86
+ """
87
+ prev_len = len(alignment) - 1
88
+ # iterate until no new points added
89
+ while prev_len < len(alignment):
90
+ no_new_points = True
91
+ # for english word e = 0 ... en
92
+ for e in range(srclen):
93
+ # for foreign word f = 0 ... fn
94
+ for f in range(trglen):
95
+ # if ( e aligned with f)
96
+ if (e, f) in alignment:
97
+ # for each neighboring point (e-new, f-new)
98
+ for neighbor in neighbors:
99
+ neighbor = tuple(i + j for i, j in zip((e, f), neighbor))
100
+ e_new, f_new = neighbor
101
+ # if ( ( e-new not aligned and f-new not aligned)
102
+ # and (e-new, f-new in union(e2f, f2e) )
103
+ if (
104
+ e_new not in aligned and f_new not in aligned
105
+ ) and neighbor in union:
106
+ alignment.add(neighbor)
107
+ aligned["e"].add(e_new)
108
+ aligned["f"].add(f_new)
109
+ prev_len += 1
110
+ no_new_points = False
111
+ # iterate until no new points added
112
+ if no_new_points:
113
+ break
114
+
115
+ def final_and(a):
116
+ """
117
+ Adds remaining points that are not in the intersection, not in the
118
+ neighboring alignments but in the original *e2f* and *f2e* alignments
119
+ """
120
+ # for english word e = 0 ... en
121
+ for e_new in range(srclen):
122
+ # for foreign word f = 0 ... fn
123
+ for f_new in range(trglen):
124
+ # if ( ( e-new not aligned and f-new not aligned)
125
+ # and (e-new, f-new in union(e2f, f2e) )
126
+ if (
127
+ e_new not in aligned
128
+ and f_new not in aligned
129
+ and (e_new, f_new) in union
130
+ ):
131
+ alignment.add((e_new, f_new))
132
+ aligned["e"].add(e_new)
133
+ aligned["f"].add(f_new)
134
+
135
+ grow_diag()
136
+ final_and(e2f)
137
+ final_and(f2e)
138
+ return sorted(alignment)
env-llmeval/lib/python3.10/site-packages/nltk/translate/gleu_score.py ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: GLEU Score
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Authors:
5
+ # Contributors: Mike Schuster, Michael Wayne Goodman, Liling Tan
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """ GLEU score implementation. """
10
+
11
+ from collections import Counter
12
+
13
+ from nltk.util import everygrams, ngrams
14
+
15
+
16
+ def sentence_gleu(references, hypothesis, min_len=1, max_len=4):
17
+ """
18
+ Calculates the sentence level GLEU (Google-BLEU) score described in
19
+
20
+ Yonghui Wu, Mike Schuster, Zhifeng Chen, Quoc V. Le, Mohammad Norouzi,
21
+ Wolfgang Macherey, Maxim Krikun, Yuan Cao, Qin Gao, Klaus Macherey,
22
+ Jeff Klingner, Apurva Shah, Melvin Johnson, Xiaobing Liu, Lukasz Kaiser,
23
+ Stephan Gouws, Yoshikiyo Kato, Taku Kudo, Hideto Kazawa, Keith Stevens,
24
+ George Kurian, Nishant Patil, Wei Wang, Cliff Young, Jason Smith,
25
+ Jason Riesa, Alex Rudnick, Oriol Vinyals, Greg Corrado, Macduff Hughes,
26
+ Jeffrey Dean. (2016) Google’s Neural Machine Translation System:
27
+ Bridging the Gap between Human and Machine Translation.
28
+ eprint arXiv:1609.08144. https://arxiv.org/pdf/1609.08144v2.pdf
29
+ Retrieved on 27 Oct 2016.
30
+
31
+ From Wu et al. (2016):
32
+ "The BLEU score has some undesirable properties when used for single
33
+ sentences, as it was designed to be a corpus measure. We therefore
34
+ use a slightly different score for our RL experiments which we call
35
+ the 'GLEU score'. For the GLEU score, we record all sub-sequences of
36
+ 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
37
+ compute a recall, which is the ratio of the number of matching n-grams
38
+ to the number of total n-grams in the target (ground truth) sequence,
39
+ and a precision, which is the ratio of the number of matching n-grams
40
+ to the number of total n-grams in the generated output sequence. Then
41
+ GLEU score is simply the minimum of recall and precision. This GLEU
42
+ score's range is always between 0 (no matches) and 1 (all match) and
43
+ it is symmetrical when switching output and target. According to
44
+ our experiments, GLEU score correlates quite well with the BLEU
45
+ metric on a corpus level but does not have its drawbacks for our per
46
+ sentence reward objective."
47
+
48
+ Note: The initial implementation only allowed a single reference, but now
49
+ a list of references is required (which is consistent with
50
+ bleu_score.sentence_bleu()).
51
+
52
+ The infamous "the the the ... " example
53
+
54
+ >>> ref = 'the cat is on the mat'.split()
55
+ >>> hyp = 'the the the the the the the'.split()
56
+ >>> sentence_gleu([ref], hyp) # doctest: +ELLIPSIS
57
+ 0.0909...
58
+
59
+ An example to evaluate normal machine translation outputs
60
+
61
+ >>> ref1 = str('It is a guide to action that ensures that the military '
62
+ ... 'will forever heed Party commands').split()
63
+ >>> hyp1 = str('It is a guide to action which ensures that the military '
64
+ ... 'always obeys the commands of the party').split()
65
+ >>> hyp2 = str('It is to insure the troops forever hearing the activity '
66
+ ... 'guidebook that party direct').split()
67
+ >>> sentence_gleu([ref1], hyp1) # doctest: +ELLIPSIS
68
+ 0.4393...
69
+ >>> sentence_gleu([ref1], hyp2) # doctest: +ELLIPSIS
70
+ 0.1206...
71
+
72
+ :param references: a list of reference sentences
73
+ :type references: list(list(str))
74
+ :param hypothesis: a hypothesis sentence
75
+ :type hypothesis: list(str)
76
+ :param min_len: The minimum order of n-gram this function should extract.
77
+ :type min_len: int
78
+ :param max_len: The maximum order of n-gram this function should extract.
79
+ :type max_len: int
80
+ :return: the sentence level GLEU score.
81
+ :rtype: float
82
+ """
83
+ return corpus_gleu([references], [hypothesis], min_len=min_len, max_len=max_len)
84
+
85
+
86
+ def corpus_gleu(list_of_references, hypotheses, min_len=1, max_len=4):
87
+ """
88
+ Calculate a single corpus-level GLEU score (aka. system-level GLEU) for all
89
+ the hypotheses and their respective references.
90
+
91
+ Instead of averaging the sentence level GLEU scores (i.e. macro-average
92
+ precision), Wu et al. (2016) sum up the matching tokens and the max of
93
+ hypothesis and reference tokens for each sentence, then compute using the
94
+ aggregate values.
95
+
96
+ From Mike Schuster (via email):
97
+ "For the corpus, we just add up the two statistics n_match and
98
+ n_all = max(n_all_output, n_all_target) for all sentences, then
99
+ calculate gleu_score = n_match / n_all, so it is not just a mean of
100
+ the sentence gleu scores (in our case, longer sentences count more,
101
+ which I think makes sense as they are more difficult to translate)."
102
+
103
+ >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
104
+ ... 'ensures', 'that', 'the', 'military', 'always',
105
+ ... 'obeys', 'the', 'commands', 'of', 'the', 'party']
106
+ >>> ref1a = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
107
+ ... 'ensures', 'that', 'the', 'military', 'will', 'forever',
108
+ ... 'heed', 'Party', 'commands']
109
+ >>> ref1b = ['It', 'is', 'the', 'guiding', 'principle', 'which',
110
+ ... 'guarantees', 'the', 'military', 'forces', 'always',
111
+ ... 'being', 'under', 'the', 'command', 'of', 'the', 'Party']
112
+ >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
113
+ ... 'army', 'always', 'to', 'heed', 'the', 'directions',
114
+ ... 'of', 'the', 'party']
115
+
116
+ >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
117
+ ... 'interested', 'in', 'world', 'history']
118
+ >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
119
+ ... 'because', 'he', 'read', 'the', 'book']
120
+
121
+ >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
122
+ >>> hypotheses = [hyp1, hyp2]
123
+ >>> corpus_gleu(list_of_references, hypotheses) # doctest: +ELLIPSIS
124
+ 0.5673...
125
+
126
+ The example below show that corpus_gleu() is different from averaging
127
+ sentence_gleu() for hypotheses
128
+
129
+ >>> score1 = sentence_gleu([ref1a], hyp1)
130
+ >>> score2 = sentence_gleu([ref2a], hyp2)
131
+ >>> (score1 + score2) / 2 # doctest: +ELLIPSIS
132
+ 0.6144...
133
+
134
+ :param list_of_references: a list of reference sentences, w.r.t. hypotheses
135
+ :type list_of_references: list(list(list(str)))
136
+ :param hypotheses: a list of hypothesis sentences
137
+ :type hypotheses: list(list(str))
138
+ :param min_len: The minimum order of n-gram this function should extract.
139
+ :type min_len: int
140
+ :param max_len: The maximum order of n-gram this function should extract.
141
+ :type max_len: int
142
+ :return: The corpus-level GLEU score.
143
+ :rtype: float
144
+ """
145
+ # sanity check
146
+ assert len(list_of_references) == len(
147
+ hypotheses
148
+ ), "The number of hypotheses and their reference(s) should be the same"
149
+
150
+ # sum matches and max-token-lengths over all sentences
151
+ corpus_n_match = 0
152
+ corpus_n_all = 0
153
+
154
+ for references, hypothesis in zip(list_of_references, hypotheses):
155
+ hyp_ngrams = Counter(everygrams(hypothesis, min_len, max_len))
156
+ tpfp = sum(hyp_ngrams.values()) # True positives + False positives.
157
+
158
+ hyp_counts = []
159
+ for reference in references:
160
+ ref_ngrams = Counter(everygrams(reference, min_len, max_len))
161
+ tpfn = sum(ref_ngrams.values()) # True positives + False negatives.
162
+
163
+ overlap_ngrams = ref_ngrams & hyp_ngrams
164
+ tp = sum(overlap_ngrams.values()) # True positives.
165
+
166
+ # While GLEU is defined as the minimum of precision and
167
+ # recall, we can reduce the number of division operations by one by
168
+ # instead finding the maximum of the denominators for the precision
169
+ # and recall formulae, since the numerators are the same:
170
+ # precision = tp / tpfp
171
+ # recall = tp / tpfn
172
+ # gleu_score = min(precision, recall) == tp / max(tpfp, tpfn)
173
+ n_all = max(tpfp, tpfn)
174
+
175
+ if n_all > 0:
176
+ hyp_counts.append((tp, n_all))
177
+
178
+ # use the reference yielding the highest score
179
+ if hyp_counts:
180
+ n_match, n_all = max(hyp_counts, key=lambda hc: hc[0] / hc[1])
181
+ corpus_n_match += n_match
182
+ corpus_n_all += n_all
183
+
184
+ # corner case: empty corpus or empty references---don't divide by zero!
185
+ if corpus_n_all == 0:
186
+ gleu_score = 0.0
187
+ else:
188
+ gleu_score = corpus_n_match / corpus_n_all
189
+
190
+ return gleu_score
env-llmeval/lib/python3.10/site-packages/nltk/translate/ibm2.py ADDED
@@ -0,0 +1,319 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: IBM Model 2
2
+ #
3
+ # Copyright (C) 2001-2013 NLTK Project
4
+ # Authors: Chin Yee Lee, Hengfeng Li, Ruxin Hou, Calvin Tanujaya Lim
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ Lexical translation model that considers word order.
10
+
11
+ IBM Model 2 improves on Model 1 by accounting for word order.
12
+ An alignment probability is introduced, a(i | j,l,m), which predicts
13
+ a source word position, given its aligned target word's position.
14
+
15
+ The EM algorithm used in Model 2 is:
16
+
17
+ :E step: In the training data, collect counts, weighted by prior
18
+ probabilities.
19
+
20
+ - (a) count how many times a source language word is translated
21
+ into a target language word
22
+ - (b) count how many times a particular position in the source
23
+ sentence is aligned to a particular position in the target
24
+ sentence
25
+
26
+ :M step: Estimate new probabilities based on the counts from the E step
27
+
28
+ Notations
29
+ ---------
30
+
31
+ :i: Position in the source sentence
32
+ Valid values are 0 (for NULL), 1, 2, ..., length of source sentence
33
+ :j: Position in the target sentence
34
+ Valid values are 1, 2, ..., length of target sentence
35
+ :l: Number of words in the source sentence, excluding NULL
36
+ :m: Number of words in the target sentence
37
+ :s: A word in the source language
38
+ :t: A word in the target language
39
+
40
+ References
41
+ ----------
42
+
43
+ Philipp Koehn. 2010. Statistical Machine Translation.
44
+ Cambridge University Press, New York.
45
+
46
+ Peter E Brown, Stephen A. Della Pietra, Vincent J. Della Pietra, and
47
+ Robert L. Mercer. 1993. The Mathematics of Statistical Machine
48
+ Translation: Parameter Estimation. Computational Linguistics, 19 (2),
49
+ 263-311.
50
+ """
51
+
52
+ import warnings
53
+ from collections import defaultdict
54
+
55
+ from nltk.translate import AlignedSent, Alignment, IBMModel, IBMModel1
56
+ from nltk.translate.ibm_model import Counts
57
+
58
+
59
+ class IBMModel2(IBMModel):
60
+ """
61
+ Lexical translation model that considers word order
62
+
63
+ >>> bitext = []
64
+ >>> bitext.append(AlignedSent(['klein', 'ist', 'das', 'haus'], ['the', 'house', 'is', 'small']))
65
+ >>> bitext.append(AlignedSent(['das', 'haus', 'ist', 'ja', 'groß'], ['the', 'house', 'is', 'big']))
66
+ >>> bitext.append(AlignedSent(['das', 'buch', 'ist', 'ja', 'klein'], ['the', 'book', 'is', 'small']))
67
+ >>> bitext.append(AlignedSent(['das', 'haus'], ['the', 'house']))
68
+ >>> bitext.append(AlignedSent(['das', 'buch'], ['the', 'book']))
69
+ >>> bitext.append(AlignedSent(['ein', 'buch'], ['a', 'book']))
70
+
71
+ >>> ibm2 = IBMModel2(bitext, 5)
72
+
73
+ >>> print(round(ibm2.translation_table['buch']['book'], 3))
74
+ 1.0
75
+ >>> print(round(ibm2.translation_table['das']['book'], 3))
76
+ 0.0
77
+ >>> print(round(ibm2.translation_table['buch'][None], 3))
78
+ 0.0
79
+ >>> print(round(ibm2.translation_table['ja'][None], 3))
80
+ 0.0
81
+
82
+ >>> print(round(ibm2.alignment_table[1][1][2][2], 3))
83
+ 0.939
84
+ >>> print(round(ibm2.alignment_table[1][2][2][2], 3))
85
+ 0.0
86
+ >>> print(round(ibm2.alignment_table[2][2][4][5], 3))
87
+ 1.0
88
+
89
+ >>> test_sentence = bitext[2]
90
+ >>> test_sentence.words
91
+ ['das', 'buch', 'ist', 'ja', 'klein']
92
+ >>> test_sentence.mots
93
+ ['the', 'book', 'is', 'small']
94
+ >>> test_sentence.alignment
95
+ Alignment([(0, 0), (1, 1), (2, 2), (3, 2), (4, 3)])
96
+
97
+ """
98
+
99
+ def __init__(self, sentence_aligned_corpus, iterations, probability_tables=None):
100
+ """
101
+ Train on ``sentence_aligned_corpus`` and create a lexical
102
+ translation model and an alignment model.
103
+
104
+ Translation direction is from ``AlignedSent.mots`` to
105
+ ``AlignedSent.words``.
106
+
107
+ :param sentence_aligned_corpus: Sentence-aligned parallel corpus
108
+ :type sentence_aligned_corpus: list(AlignedSent)
109
+
110
+ :param iterations: Number of iterations to run training algorithm
111
+ :type iterations: int
112
+
113
+ :param probability_tables: Optional. Use this to pass in custom
114
+ probability values. If not specified, probabilities will be
115
+ set to a uniform distribution, or some other sensible value.
116
+ If specified, all the following entries must be present:
117
+ ``translation_table``, ``alignment_table``.
118
+ See ``IBMModel`` for the type and purpose of these tables.
119
+ :type probability_tables: dict[str]: object
120
+ """
121
+ super().__init__(sentence_aligned_corpus)
122
+
123
+ if probability_tables is None:
124
+ # Get translation probabilities from IBM Model 1
125
+ # Run more iterations of training for Model 1, since it is
126
+ # faster than Model 2
127
+ ibm1 = IBMModel1(sentence_aligned_corpus, 2 * iterations)
128
+ self.translation_table = ibm1.translation_table
129
+ self.set_uniform_probabilities(sentence_aligned_corpus)
130
+ else:
131
+ # Set user-defined probabilities
132
+ self.translation_table = probability_tables["translation_table"]
133
+ self.alignment_table = probability_tables["alignment_table"]
134
+
135
+ for n in range(0, iterations):
136
+ self.train(sentence_aligned_corpus)
137
+
138
+ self.align_all(sentence_aligned_corpus)
139
+
140
+ def set_uniform_probabilities(self, sentence_aligned_corpus):
141
+ # a(i | j,l,m) = 1 / (l+1) for all i, j, l, m
142
+ l_m_combinations = set()
143
+ for aligned_sentence in sentence_aligned_corpus:
144
+ l = len(aligned_sentence.mots)
145
+ m = len(aligned_sentence.words)
146
+ if (l, m) not in l_m_combinations:
147
+ l_m_combinations.add((l, m))
148
+ initial_prob = 1 / (l + 1)
149
+ if initial_prob < IBMModel.MIN_PROB:
150
+ warnings.warn(
151
+ "A source sentence is too long ("
152
+ + str(l)
153
+ + " words). Results may be less accurate."
154
+ )
155
+
156
+ for i in range(0, l + 1):
157
+ for j in range(1, m + 1):
158
+ self.alignment_table[i][j][l][m] = initial_prob
159
+
160
+ def train(self, parallel_corpus):
161
+ counts = Model2Counts()
162
+ for aligned_sentence in parallel_corpus:
163
+ src_sentence = [None] + aligned_sentence.mots
164
+ trg_sentence = ["UNUSED"] + aligned_sentence.words # 1-indexed
165
+ l = len(aligned_sentence.mots)
166
+ m = len(aligned_sentence.words)
167
+
168
+ # E step (a): Compute normalization factors to weigh counts
169
+ total_count = self.prob_all_alignments(src_sentence, trg_sentence)
170
+
171
+ # E step (b): Collect counts
172
+ for j in range(1, m + 1):
173
+ t = trg_sentence[j]
174
+ for i in range(0, l + 1):
175
+ s = src_sentence[i]
176
+ count = self.prob_alignment_point(i, j, src_sentence, trg_sentence)
177
+ normalized_count = count / total_count[t]
178
+
179
+ counts.update_lexical_translation(normalized_count, s, t)
180
+ counts.update_alignment(normalized_count, i, j, l, m)
181
+
182
+ # M step: Update probabilities with maximum likelihood estimates
183
+ self.maximize_lexical_translation_probabilities(counts)
184
+ self.maximize_alignment_probabilities(counts)
185
+
186
+ def maximize_alignment_probabilities(self, counts):
187
+ MIN_PROB = IBMModel.MIN_PROB
188
+ for i, j_s in counts.alignment.items():
189
+ for j, src_sentence_lengths in j_s.items():
190
+ for l, trg_sentence_lengths in src_sentence_lengths.items():
191
+ for m in trg_sentence_lengths:
192
+ estimate = (
193
+ counts.alignment[i][j][l][m]
194
+ / counts.alignment_for_any_i[j][l][m]
195
+ )
196
+ self.alignment_table[i][j][l][m] = max(estimate, MIN_PROB)
197
+
198
+ def prob_all_alignments(self, src_sentence, trg_sentence):
199
+ """
200
+ Computes the probability of all possible word alignments,
201
+ expressed as a marginal distribution over target words t
202
+
203
+ Each entry in the return value represents the contribution to
204
+ the total alignment probability by the target word t.
205
+
206
+ To obtain probability(alignment | src_sentence, trg_sentence),
207
+ simply sum the entries in the return value.
208
+
209
+ :return: Probability of t for all s in ``src_sentence``
210
+ :rtype: dict(str): float
211
+ """
212
+ alignment_prob_for_t = defaultdict(lambda: 0.0)
213
+ for j in range(1, len(trg_sentence)):
214
+ t = trg_sentence[j]
215
+ for i in range(0, len(src_sentence)):
216
+ alignment_prob_for_t[t] += self.prob_alignment_point(
217
+ i, j, src_sentence, trg_sentence
218
+ )
219
+ return alignment_prob_for_t
220
+
221
+ def prob_alignment_point(self, i, j, src_sentence, trg_sentence):
222
+ """
223
+ Probability that position j in ``trg_sentence`` is aligned to
224
+ position i in the ``src_sentence``
225
+ """
226
+ l = len(src_sentence) - 1
227
+ m = len(trg_sentence) - 1
228
+ s = src_sentence[i]
229
+ t = trg_sentence[j]
230
+ return self.translation_table[t][s] * self.alignment_table[i][j][l][m]
231
+
232
+ def prob_t_a_given_s(self, alignment_info):
233
+ """
234
+ Probability of target sentence and an alignment given the
235
+ source sentence
236
+ """
237
+ prob = 1.0
238
+ l = len(alignment_info.src_sentence) - 1
239
+ m = len(alignment_info.trg_sentence) - 1
240
+
241
+ for j, i in enumerate(alignment_info.alignment):
242
+ if j == 0:
243
+ continue # skip the dummy zeroeth element
244
+ trg_word = alignment_info.trg_sentence[j]
245
+ src_word = alignment_info.src_sentence[i]
246
+ prob *= (
247
+ self.translation_table[trg_word][src_word]
248
+ * self.alignment_table[i][j][l][m]
249
+ )
250
+
251
+ return max(prob, IBMModel.MIN_PROB)
252
+
253
+ def align_all(self, parallel_corpus):
254
+ for sentence_pair in parallel_corpus:
255
+ self.align(sentence_pair)
256
+
257
+ def align(self, sentence_pair):
258
+ """
259
+ Determines the best word alignment for one sentence pair from
260
+ the corpus that the model was trained on.
261
+
262
+ The best alignment will be set in ``sentence_pair`` when the
263
+ method returns. In contrast with the internal implementation of
264
+ IBM models, the word indices in the ``Alignment`` are zero-
265
+ indexed, not one-indexed.
266
+
267
+ :param sentence_pair: A sentence in the source language and its
268
+ counterpart sentence in the target language
269
+ :type sentence_pair: AlignedSent
270
+ """
271
+ best_alignment = []
272
+
273
+ l = len(sentence_pair.mots)
274
+ m = len(sentence_pair.words)
275
+
276
+ for j, trg_word in enumerate(sentence_pair.words):
277
+ # Initialize trg_word to align with the NULL token
278
+ best_prob = (
279
+ self.translation_table[trg_word][None]
280
+ * self.alignment_table[0][j + 1][l][m]
281
+ )
282
+ best_prob = max(best_prob, IBMModel.MIN_PROB)
283
+ best_alignment_point = None
284
+ for i, src_word in enumerate(sentence_pair.mots):
285
+ align_prob = (
286
+ self.translation_table[trg_word][src_word]
287
+ * self.alignment_table[i + 1][j + 1][l][m]
288
+ )
289
+ if align_prob >= best_prob:
290
+ best_prob = align_prob
291
+ best_alignment_point = i
292
+
293
+ best_alignment.append((j, best_alignment_point))
294
+
295
+ sentence_pair.alignment = Alignment(best_alignment)
296
+
297
+
298
+ class Model2Counts(Counts):
299
+ """
300
+ Data object to store counts of various parameters during training.
301
+ Includes counts for alignment.
302
+ """
303
+
304
+ def __init__(self):
305
+ super().__init__()
306
+ self.alignment = defaultdict(
307
+ lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: 0.0)))
308
+ )
309
+ self.alignment_for_any_i = defaultdict(
310
+ lambda: defaultdict(lambda: defaultdict(lambda: 0.0))
311
+ )
312
+
313
+ def update_lexical_translation(self, count, s, t):
314
+ self.t_given_s[t][s] += count
315
+ self.any_t_given_s[s] += count
316
+
317
+ def update_alignment(self, count, i, j, l, m):
318
+ self.alignment[i][j][l][m] += count
319
+ self.alignment_for_any_i[j][l][m] += count
env-llmeval/lib/python3.10/site-packages/nltk/translate/ibm5.py ADDED
@@ -0,0 +1,663 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: IBM Model 5
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Tah Wei Hoon <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ Translation model that keeps track of vacant positions in the target
10
+ sentence to decide where to place translated words.
11
+
12
+ Translation can be viewed as a process where each word in the source
13
+ sentence is stepped through sequentially, generating translated words
14
+ for each source word. The target sentence can be viewed as being made
15
+ up of ``m`` empty slots initially, which gradually fill up as generated
16
+ words are placed in them.
17
+
18
+ Models 3 and 4 use distortion probabilities to decide how to place
19
+ translated words. For simplicity, these models ignore the history of
20
+ which slots have already been occupied with translated words.
21
+ Consider the placement of the last translated word: there is only one
22
+ empty slot left in the target sentence, so the distortion probability
23
+ should be 1.0 for that position and 0.0 everywhere else. However, the
24
+ distortion probabilities for Models 3 and 4 are set up such that all
25
+ positions are under consideration.
26
+
27
+ IBM Model 5 fixes this deficiency by accounting for occupied slots
28
+ during translation. It introduces the vacancy function v(j), the number
29
+ of vacancies up to, and including, position j in the target sentence.
30
+
31
+ Terminology
32
+ -----------
33
+
34
+ :Maximum vacancy:
35
+ The number of valid slots that a word can be placed in.
36
+ This is not necessarily the same as the number of vacant slots.
37
+ For example, if a tablet contains more than one word, the head word
38
+ cannot be placed at the last vacant slot because there will be no
39
+ space for the other words in the tablet. The number of valid slots
40
+ has to take into account the length of the tablet.
41
+ Non-head words cannot be placed before the head word, so vacancies
42
+ to the left of the head word are ignored.
43
+ :Vacancy difference:
44
+ For a head word: (v(j) - v(center of previous cept))
45
+ Can be positive or negative.
46
+ For a non-head word: (v(j) - v(position of previously placed word))
47
+ Always positive, because successive words in a tablet are assumed to
48
+ appear to the right of the previous word.
49
+
50
+ Positioning of target words fall under three cases:
51
+
52
+ 1. Words generated by NULL are distributed uniformly
53
+ 2. For a head word t, its position is modeled by the probability
54
+ v_head(dv | max_v,word_class_t(t))
55
+ 3. For a non-head word t, its position is modeled by the probability
56
+ v_non_head(dv | max_v,word_class_t(t))
57
+
58
+ dv and max_v are defined differently for head and non-head words.
59
+
60
+ The EM algorithm used in Model 5 is:
61
+
62
+ :E step: In the training data, collect counts, weighted by prior
63
+ probabilities.
64
+
65
+ - (a) count how many times a source language word is translated
66
+ into a target language word
67
+ - (b) for a particular word class and maximum vacancy, count how
68
+ many times a head word and the previous cept's center have
69
+ a particular difference in number of vacancies
70
+ - (b) for a particular word class and maximum vacancy, count how
71
+ many times a non-head word and the previous target word
72
+ have a particular difference in number of vacancies
73
+ - (d) count how many times a source word is aligned to phi number
74
+ of target words
75
+ - (e) count how many times NULL is aligned to a target word
76
+
77
+ :M step: Estimate new probabilities based on the counts from the E step
78
+
79
+ Like Model 4, there are too many possible alignments to consider. Thus,
80
+ a hill climbing approach is used to sample good candidates. In addition,
81
+ pruning is used to weed out unlikely alignments based on Model 4 scores.
82
+
83
+ Notations
84
+ ---------
85
+
86
+ :i: Position in the source sentence
87
+ Valid values are 0 (for NULL), 1, 2, ..., length of source sentence
88
+ :j: Position in the target sentence
89
+ Valid values are 1, 2, ..., length of target sentence
90
+ :l: Number of words in the source sentence, excluding NULL
91
+ :m: Number of words in the target sentence
92
+ :s: A word in the source language
93
+ :t: A word in the target language
94
+ :phi: Fertility, the number of target words produced by a source word
95
+ :p1: Probability that a target word produced by a source word is
96
+ accompanied by another target word that is aligned to NULL
97
+ :p0: 1 - p1
98
+ :max_v: Maximum vacancy
99
+ :dv: Vacancy difference, Δv
100
+
101
+ The definition of v_head here differs from GIZA++, section 4.7 of
102
+ [Brown et al., 1993], and [Koehn, 2010]. In the latter cases, v_head is
103
+ v_head(v(j) | v(center of previous cept),max_v,word_class(t)).
104
+
105
+ Here, we follow appendix B of [Brown et al., 1993] and combine v(j) with
106
+ v(center of previous cept) to obtain dv:
107
+ v_head(v(j) - v(center of previous cept) | max_v,word_class(t)).
108
+
109
+ References
110
+ ----------
111
+
112
+ Philipp Koehn. 2010. Statistical Machine Translation.
113
+ Cambridge University Press, New York.
114
+
115
+ Peter E Brown, Stephen A. Della Pietra, Vincent J. Della Pietra, and
116
+ Robert L. Mercer. 1993. The Mathematics of Statistical Machine
117
+ Translation: Parameter Estimation. Computational Linguistics, 19 (2),
118
+ 263-311.
119
+ """
120
+
121
+ import warnings
122
+ from collections import defaultdict
123
+ from math import factorial
124
+
125
+ from nltk.translate import AlignedSent, Alignment, IBMModel, IBMModel4
126
+ from nltk.translate.ibm_model import Counts, longest_target_sentence_length
127
+
128
+
129
+ class IBMModel5(IBMModel):
130
+ """
131
+ Translation model that keeps track of vacant positions in the target
132
+ sentence to decide where to place translated words
133
+
134
+ >>> bitext = []
135
+ >>> bitext.append(AlignedSent(['klein', 'ist', 'das', 'haus'], ['the', 'house', 'is', 'small']))
136
+ >>> bitext.append(AlignedSent(['das', 'haus', 'war', 'ja', 'groß'], ['the', 'house', 'was', 'big']))
137
+ >>> bitext.append(AlignedSent(['das', 'buch', 'ist', 'ja', 'klein'], ['the', 'book', 'is', 'small']))
138
+ >>> bitext.append(AlignedSent(['ein', 'haus', 'ist', 'klein'], ['a', 'house', 'is', 'small']))
139
+ >>> bitext.append(AlignedSent(['das', 'haus'], ['the', 'house']))
140
+ >>> bitext.append(AlignedSent(['das', 'buch'], ['the', 'book']))
141
+ >>> bitext.append(AlignedSent(['ein', 'buch'], ['a', 'book']))
142
+ >>> bitext.append(AlignedSent(['ich', 'fasse', 'das', 'buch', 'zusammen'], ['i', 'summarize', 'the', 'book']))
143
+ >>> bitext.append(AlignedSent(['fasse', 'zusammen'], ['summarize']))
144
+ >>> src_classes = {'the': 0, 'a': 0, 'small': 1, 'big': 1, 'house': 2, 'book': 2, 'is': 3, 'was': 3, 'i': 4, 'summarize': 5 }
145
+ >>> trg_classes = {'das': 0, 'ein': 0, 'haus': 1, 'buch': 1, 'klein': 2, 'groß': 2, 'ist': 3, 'war': 3, 'ja': 4, 'ich': 5, 'fasse': 6, 'zusammen': 6 }
146
+
147
+ >>> ibm5 = IBMModel5(bitext, 5, src_classes, trg_classes)
148
+
149
+ >>> print(round(ibm5.head_vacancy_table[1][1][1], 3))
150
+ 1.0
151
+ >>> print(round(ibm5.head_vacancy_table[2][1][1], 3))
152
+ 0.0
153
+ >>> print(round(ibm5.non_head_vacancy_table[3][3][6], 3))
154
+ 1.0
155
+
156
+ >>> print(round(ibm5.fertility_table[2]['summarize'], 3))
157
+ 1.0
158
+ >>> print(round(ibm5.fertility_table[1]['book'], 3))
159
+ 1.0
160
+
161
+ >>> print(round(ibm5.p1, 3))
162
+ 0.033
163
+
164
+ >>> test_sentence = bitext[2]
165
+ >>> test_sentence.words
166
+ ['das', 'buch', 'ist', 'ja', 'klein']
167
+ >>> test_sentence.mots
168
+ ['the', 'book', 'is', 'small']
169
+ >>> test_sentence.alignment
170
+ Alignment([(0, 0), (1, 1), (2, 2), (3, None), (4, 3)])
171
+
172
+ """
173
+
174
+ MIN_SCORE_FACTOR = 0.2
175
+ """
176
+ Alignments with scores below this factor are pruned during sampling
177
+ """
178
+
179
+ def __init__(
180
+ self,
181
+ sentence_aligned_corpus,
182
+ iterations,
183
+ source_word_classes,
184
+ target_word_classes,
185
+ probability_tables=None,
186
+ ):
187
+ """
188
+ Train on ``sentence_aligned_corpus`` and create a lexical
189
+ translation model, vacancy models, a fertility model, and a
190
+ model for generating NULL-aligned words.
191
+
192
+ Translation direction is from ``AlignedSent.mots`` to
193
+ ``AlignedSent.words``.
194
+
195
+ :param sentence_aligned_corpus: Sentence-aligned parallel corpus
196
+ :type sentence_aligned_corpus: list(AlignedSent)
197
+
198
+ :param iterations: Number of iterations to run training algorithm
199
+ :type iterations: int
200
+
201
+ :param source_word_classes: Lookup table that maps a source word
202
+ to its word class, the latter represented by an integer id
203
+ :type source_word_classes: dict[str]: int
204
+
205
+ :param target_word_classes: Lookup table that maps a target word
206
+ to its word class, the latter represented by an integer id
207
+ :type target_word_classes: dict[str]: int
208
+
209
+ :param probability_tables: Optional. Use this to pass in custom
210
+ probability values. If not specified, probabilities will be
211
+ set to a uniform distribution, or some other sensible value.
212
+ If specified, all the following entries must be present:
213
+ ``translation_table``, ``alignment_table``,
214
+ ``fertility_table``, ``p1``, ``head_distortion_table``,
215
+ ``non_head_distortion_table``, ``head_vacancy_table``,
216
+ ``non_head_vacancy_table``. See ``IBMModel``, ``IBMModel4``,
217
+ and ``IBMModel5`` for the type and purpose of these tables.
218
+ :type probability_tables: dict[str]: object
219
+ """
220
+ super().__init__(sentence_aligned_corpus)
221
+ self.reset_probabilities()
222
+ self.src_classes = source_word_classes
223
+ self.trg_classes = target_word_classes
224
+
225
+ if probability_tables is None:
226
+ # Get probabilities from IBM model 4
227
+ ibm4 = IBMModel4(
228
+ sentence_aligned_corpus,
229
+ iterations,
230
+ source_word_classes,
231
+ target_word_classes,
232
+ )
233
+ self.translation_table = ibm4.translation_table
234
+ self.alignment_table = ibm4.alignment_table
235
+ self.fertility_table = ibm4.fertility_table
236
+ self.p1 = ibm4.p1
237
+ self.head_distortion_table = ibm4.head_distortion_table
238
+ self.non_head_distortion_table = ibm4.non_head_distortion_table
239
+ self.set_uniform_probabilities(sentence_aligned_corpus)
240
+ else:
241
+ # Set user-defined probabilities
242
+ self.translation_table = probability_tables["translation_table"]
243
+ self.alignment_table = probability_tables["alignment_table"]
244
+ self.fertility_table = probability_tables["fertility_table"]
245
+ self.p1 = probability_tables["p1"]
246
+ self.head_distortion_table = probability_tables["head_distortion_table"]
247
+ self.non_head_distortion_table = probability_tables[
248
+ "non_head_distortion_table"
249
+ ]
250
+ self.head_vacancy_table = probability_tables["head_vacancy_table"]
251
+ self.non_head_vacancy_table = probability_tables["non_head_vacancy_table"]
252
+
253
+ for n in range(0, iterations):
254
+ self.train(sentence_aligned_corpus)
255
+
256
+ def reset_probabilities(self):
257
+ super().reset_probabilities()
258
+ self.head_vacancy_table = defaultdict(
259
+ lambda: defaultdict(lambda: defaultdict(lambda: self.MIN_PROB))
260
+ )
261
+ """
262
+ dict[int][int][int]: float. Probability(vacancy difference |
263
+ number of remaining valid positions,target word class).
264
+ Values accessed as ``head_vacancy_table[dv][v_max][trg_class]``.
265
+ """
266
+
267
+ self.non_head_vacancy_table = defaultdict(
268
+ lambda: defaultdict(lambda: defaultdict(lambda: self.MIN_PROB))
269
+ )
270
+ """
271
+ dict[int][int][int]: float. Probability(vacancy difference |
272
+ number of remaining valid positions,target word class).
273
+ Values accessed as ``non_head_vacancy_table[dv][v_max][trg_class]``.
274
+ """
275
+
276
+ def set_uniform_probabilities(self, sentence_aligned_corpus):
277
+ """
278
+ Set vacancy probabilities uniformly to
279
+ 1 / cardinality of vacancy difference values
280
+ """
281
+ max_m = longest_target_sentence_length(sentence_aligned_corpus)
282
+
283
+ # The maximum vacancy difference occurs when a word is placed in
284
+ # the last available position m of the target sentence and the
285
+ # previous word position has no vacancies.
286
+ # The minimum is 1-max_v, when a word is placed in the first
287
+ # available position and the previous word is placed beyond the
288
+ # last available position.
289
+ # Thus, the number of possible vacancy difference values is
290
+ # (max_v) - (1-max_v) + 1 = 2 * max_v.
291
+ if max_m > 0 and (1 / (2 * max_m)) < IBMModel.MIN_PROB:
292
+ warnings.warn(
293
+ "A target sentence is too long ("
294
+ + str(max_m)
295
+ + " words). Results may be less accurate."
296
+ )
297
+
298
+ for max_v in range(1, max_m + 1):
299
+ for dv in range(1, max_m + 1):
300
+ initial_prob = 1 / (2 * max_v)
301
+ self.head_vacancy_table[dv][max_v] = defaultdict(lambda: initial_prob)
302
+ self.head_vacancy_table[-(dv - 1)][max_v] = defaultdict(
303
+ lambda: initial_prob
304
+ )
305
+ self.non_head_vacancy_table[dv][max_v] = defaultdict(
306
+ lambda: initial_prob
307
+ )
308
+ self.non_head_vacancy_table[-(dv - 1)][max_v] = defaultdict(
309
+ lambda: initial_prob
310
+ )
311
+
312
+ def train(self, parallel_corpus):
313
+ counts = Model5Counts()
314
+ for aligned_sentence in parallel_corpus:
315
+ l = len(aligned_sentence.mots)
316
+ m = len(aligned_sentence.words)
317
+
318
+ # Sample the alignment space
319
+ sampled_alignments, best_alignment = self.sample(aligned_sentence)
320
+ # Record the most probable alignment
321
+ aligned_sentence.alignment = Alignment(
322
+ best_alignment.zero_indexed_alignment()
323
+ )
324
+
325
+ # E step (a): Compute normalization factors to weigh counts
326
+ total_count = self.prob_of_alignments(sampled_alignments)
327
+
328
+ # E step (b): Collect counts
329
+ for alignment_info in sampled_alignments:
330
+ count = self.prob_t_a_given_s(alignment_info)
331
+ normalized_count = count / total_count
332
+
333
+ for j in range(1, m + 1):
334
+ counts.update_lexical_translation(
335
+ normalized_count, alignment_info, j
336
+ )
337
+
338
+ slots = Slots(m)
339
+ for i in range(1, l + 1):
340
+ counts.update_vacancy(
341
+ normalized_count, alignment_info, i, self.trg_classes, slots
342
+ )
343
+
344
+ counts.update_null_generation(normalized_count, alignment_info)
345
+ counts.update_fertility(normalized_count, alignment_info)
346
+
347
+ # M step: Update probabilities with maximum likelihood estimates
348
+ # If any probability is less than MIN_PROB, clamp it to MIN_PROB
349
+ existing_alignment_table = self.alignment_table
350
+ self.reset_probabilities()
351
+ self.alignment_table = existing_alignment_table # don't retrain
352
+
353
+ self.maximize_lexical_translation_probabilities(counts)
354
+ self.maximize_vacancy_probabilities(counts)
355
+ self.maximize_fertility_probabilities(counts)
356
+ self.maximize_null_generation_probabilities(counts)
357
+
358
+ def sample(self, sentence_pair):
359
+ """
360
+ Sample the most probable alignments from the entire alignment
361
+ space according to Model 4
362
+
363
+ Note that Model 4 scoring is used instead of Model 5 because the
364
+ latter is too expensive to compute.
365
+
366
+ First, determine the best alignment according to IBM Model 2.
367
+ With this initial alignment, use hill climbing to determine the
368
+ best alignment according to a IBM Model 4. Add this
369
+ alignment and its neighbors to the sample set. Repeat this
370
+ process with other initial alignments obtained by pegging an
371
+ alignment point. Finally, prune alignments that have
372
+ substantially lower Model 4 scores than the best alignment.
373
+
374
+ :param sentence_pair: Source and target language sentence pair
375
+ to generate a sample of alignments from
376
+ :type sentence_pair: AlignedSent
377
+
378
+ :return: A set of best alignments represented by their ``AlignmentInfo``
379
+ and the best alignment of the set for convenience
380
+ :rtype: set(AlignmentInfo), AlignmentInfo
381
+ """
382
+ sampled_alignments, best_alignment = super().sample(sentence_pair)
383
+ return self.prune(sampled_alignments), best_alignment
384
+
385
+ def prune(self, alignment_infos):
386
+ """
387
+ Removes alignments from ``alignment_infos`` that have
388
+ substantially lower Model 4 scores than the best alignment
389
+
390
+ :return: Pruned alignments
391
+ :rtype: set(AlignmentInfo)
392
+ """
393
+ alignments = []
394
+ best_score = 0
395
+
396
+ for alignment_info in alignment_infos:
397
+ score = IBMModel4.model4_prob_t_a_given_s(alignment_info, self)
398
+ best_score = max(score, best_score)
399
+ alignments.append((alignment_info, score))
400
+
401
+ threshold = IBMModel5.MIN_SCORE_FACTOR * best_score
402
+ alignments = [a[0] for a in alignments if a[1] > threshold]
403
+ return set(alignments)
404
+
405
+ def hillclimb(self, alignment_info, j_pegged=None):
406
+ """
407
+ Starting from the alignment in ``alignment_info``, look at
408
+ neighboring alignments iteratively for the best one, according
409
+ to Model 4
410
+
411
+ Note that Model 4 scoring is used instead of Model 5 because the
412
+ latter is too expensive to compute.
413
+
414
+ There is no guarantee that the best alignment in the alignment
415
+ space will be found, because the algorithm might be stuck in a
416
+ local maximum.
417
+
418
+ :param j_pegged: If specified, the search will be constrained to
419
+ alignments where ``j_pegged`` remains unchanged
420
+ :type j_pegged: int
421
+
422
+ :return: The best alignment found from hill climbing
423
+ :rtype: AlignmentInfo
424
+ """
425
+ alignment = alignment_info # alias with shorter name
426
+ max_probability = IBMModel4.model4_prob_t_a_given_s(alignment, self)
427
+
428
+ while True:
429
+ old_alignment = alignment
430
+ for neighbor_alignment in self.neighboring(alignment, j_pegged):
431
+ neighbor_probability = IBMModel4.model4_prob_t_a_given_s(
432
+ neighbor_alignment, self
433
+ )
434
+
435
+ if neighbor_probability > max_probability:
436
+ alignment = neighbor_alignment
437
+ max_probability = neighbor_probability
438
+
439
+ if alignment == old_alignment:
440
+ # Until there are no better alignments
441
+ break
442
+
443
+ alignment.score = max_probability
444
+ return alignment
445
+
446
+ def prob_t_a_given_s(self, alignment_info):
447
+ """
448
+ Probability of target sentence and an alignment given the
449
+ source sentence
450
+ """
451
+ probability = 1.0
452
+ MIN_PROB = IBMModel.MIN_PROB
453
+ slots = Slots(len(alignment_info.trg_sentence) - 1)
454
+
455
+ def null_generation_term():
456
+ # Binomial distribution: B(m - null_fertility, p1)
457
+ value = 1.0
458
+ p1 = self.p1
459
+ p0 = 1 - p1
460
+ null_fertility = alignment_info.fertility_of_i(0)
461
+ m = len(alignment_info.trg_sentence) - 1
462
+ value *= pow(p1, null_fertility) * pow(p0, m - 2 * null_fertility)
463
+ if value < MIN_PROB:
464
+ return MIN_PROB
465
+
466
+ # Combination: (m - null_fertility) choose null_fertility
467
+ for i in range(1, null_fertility + 1):
468
+ value *= (m - null_fertility - i + 1) / i
469
+ return value
470
+
471
+ def fertility_term():
472
+ value = 1.0
473
+ src_sentence = alignment_info.src_sentence
474
+ for i in range(1, len(src_sentence)):
475
+ fertility = alignment_info.fertility_of_i(i)
476
+ value *= (
477
+ factorial(fertility)
478
+ * self.fertility_table[fertility][src_sentence[i]]
479
+ )
480
+ if value < MIN_PROB:
481
+ return MIN_PROB
482
+ return value
483
+
484
+ def lexical_translation_term(j):
485
+ t = alignment_info.trg_sentence[j]
486
+ i = alignment_info.alignment[j]
487
+ s = alignment_info.src_sentence[i]
488
+ return self.translation_table[t][s]
489
+
490
+ def vacancy_term(i):
491
+ value = 1.0
492
+ tablet = alignment_info.cepts[i]
493
+ tablet_length = len(tablet)
494
+ total_vacancies = slots.vacancies_at(len(slots))
495
+
496
+ # case 1: NULL-aligned words
497
+ if tablet_length == 0:
498
+ return value
499
+
500
+ # case 2: head word
501
+ j = tablet[0]
502
+ previous_cept = alignment_info.previous_cept(j)
503
+ previous_center = alignment_info.center_of_cept(previous_cept)
504
+ dv = slots.vacancies_at(j) - slots.vacancies_at(previous_center)
505
+ max_v = total_vacancies - tablet_length + 1
506
+ trg_class = self.trg_classes[alignment_info.trg_sentence[j]]
507
+ value *= self.head_vacancy_table[dv][max_v][trg_class]
508
+ slots.occupy(j) # mark position as occupied
509
+ total_vacancies -= 1
510
+ if value < MIN_PROB:
511
+ return MIN_PROB
512
+
513
+ # case 3: non-head words
514
+ for k in range(1, tablet_length):
515
+ previous_position = tablet[k - 1]
516
+ previous_vacancies = slots.vacancies_at(previous_position)
517
+ j = tablet[k]
518
+ dv = slots.vacancies_at(j) - previous_vacancies
519
+ max_v = total_vacancies - tablet_length + k + 1 - previous_vacancies
520
+ trg_class = self.trg_classes[alignment_info.trg_sentence[j]]
521
+ value *= self.non_head_vacancy_table[dv][max_v][trg_class]
522
+ slots.occupy(j) # mark position as occupied
523
+ total_vacancies -= 1
524
+ if value < MIN_PROB:
525
+ return MIN_PROB
526
+
527
+ return value
528
+
529
+ # end nested functions
530
+
531
+ # Abort computation whenever probability falls below MIN_PROB at
532
+ # any point, since MIN_PROB can be considered as zero
533
+ probability *= null_generation_term()
534
+ if probability < MIN_PROB:
535
+ return MIN_PROB
536
+
537
+ probability *= fertility_term()
538
+ if probability < MIN_PROB:
539
+ return MIN_PROB
540
+
541
+ for j in range(1, len(alignment_info.trg_sentence)):
542
+ probability *= lexical_translation_term(j)
543
+ if probability < MIN_PROB:
544
+ return MIN_PROB
545
+
546
+ for i in range(1, len(alignment_info.src_sentence)):
547
+ probability *= vacancy_term(i)
548
+ if probability < MIN_PROB:
549
+ return MIN_PROB
550
+
551
+ return probability
552
+
553
+ def maximize_vacancy_probabilities(self, counts):
554
+ MIN_PROB = IBMModel.MIN_PROB
555
+ head_vacancy_table = self.head_vacancy_table
556
+ for dv, max_vs in counts.head_vacancy.items():
557
+ for max_v, trg_classes in max_vs.items():
558
+ for t_cls in trg_classes:
559
+ estimate = (
560
+ counts.head_vacancy[dv][max_v][t_cls]
561
+ / counts.head_vacancy_for_any_dv[max_v][t_cls]
562
+ )
563
+ head_vacancy_table[dv][max_v][t_cls] = max(estimate, MIN_PROB)
564
+
565
+ non_head_vacancy_table = self.non_head_vacancy_table
566
+ for dv, max_vs in counts.non_head_vacancy.items():
567
+ for max_v, trg_classes in max_vs.items():
568
+ for t_cls in trg_classes:
569
+ estimate = (
570
+ counts.non_head_vacancy[dv][max_v][t_cls]
571
+ / counts.non_head_vacancy_for_any_dv[max_v][t_cls]
572
+ )
573
+ non_head_vacancy_table[dv][max_v][t_cls] = max(estimate, MIN_PROB)
574
+
575
+
576
+ class Model5Counts(Counts):
577
+ """
578
+ Data object to store counts of various parameters during training.
579
+ Includes counts for vacancies.
580
+ """
581
+
582
+ def __init__(self):
583
+ super().__init__()
584
+ self.head_vacancy = defaultdict(
585
+ lambda: defaultdict(lambda: defaultdict(lambda: 0.0))
586
+ )
587
+ self.head_vacancy_for_any_dv = defaultdict(lambda: defaultdict(lambda: 0.0))
588
+ self.non_head_vacancy = defaultdict(
589
+ lambda: defaultdict(lambda: defaultdict(lambda: 0.0))
590
+ )
591
+ self.non_head_vacancy_for_any_dv = defaultdict(lambda: defaultdict(lambda: 0.0))
592
+
593
+ def update_vacancy(self, count, alignment_info, i, trg_classes, slots):
594
+ """
595
+ :param count: Value to add to the vacancy counts
596
+ :param alignment_info: Alignment under consideration
597
+ :param i: Source word position under consideration
598
+ :param trg_classes: Target word classes
599
+ :param slots: Vacancy states of the slots in the target sentence.
600
+ Output parameter that will be modified as new words are placed
601
+ in the target sentence.
602
+ """
603
+ tablet = alignment_info.cepts[i]
604
+ tablet_length = len(tablet)
605
+ total_vacancies = slots.vacancies_at(len(slots))
606
+
607
+ # case 1: NULL aligned words
608
+ if tablet_length == 0:
609
+ return # ignore zero fertility words
610
+
611
+ # case 2: head word
612
+ j = tablet[0]
613
+ previous_cept = alignment_info.previous_cept(j)
614
+ previous_center = alignment_info.center_of_cept(previous_cept)
615
+ dv = slots.vacancies_at(j) - slots.vacancies_at(previous_center)
616
+ max_v = total_vacancies - tablet_length + 1
617
+ trg_class = trg_classes[alignment_info.trg_sentence[j]]
618
+ self.head_vacancy[dv][max_v][trg_class] += count
619
+ self.head_vacancy_for_any_dv[max_v][trg_class] += count
620
+ slots.occupy(j) # mark position as occupied
621
+ total_vacancies -= 1
622
+
623
+ # case 3: non-head words
624
+ for k in range(1, tablet_length):
625
+ previous_position = tablet[k - 1]
626
+ previous_vacancies = slots.vacancies_at(previous_position)
627
+ j = tablet[k]
628
+ dv = slots.vacancies_at(j) - previous_vacancies
629
+ max_v = total_vacancies - tablet_length + k + 1 - previous_vacancies
630
+ trg_class = trg_classes[alignment_info.trg_sentence[j]]
631
+ self.non_head_vacancy[dv][max_v][trg_class] += count
632
+ self.non_head_vacancy_for_any_dv[max_v][trg_class] += count
633
+ slots.occupy(j) # mark position as occupied
634
+ total_vacancies -= 1
635
+
636
+
637
+ class Slots:
638
+ """
639
+ Represents positions in a target sentence. Used to keep track of
640
+ which slot (position) is occupied.
641
+ """
642
+
643
+ def __init__(self, target_sentence_length):
644
+ self._slots = [False] * (target_sentence_length + 1) # 1-indexed
645
+
646
+ def occupy(self, position):
647
+ """
648
+ :return: Mark slot at ``position`` as occupied
649
+ """
650
+ self._slots[position] = True
651
+
652
+ def vacancies_at(self, position):
653
+ """
654
+ :return: Number of vacant slots up to, and including, ``position``
655
+ """
656
+ vacancies = 0
657
+ for k in range(1, position + 1):
658
+ if not self._slots[k]:
659
+ vacancies += 1
660
+ return vacancies
661
+
662
+ def __len__(self):
663
+ return len(self._slots) - 1 # exclude dummy zeroeth element
env-llmeval/lib/python3.10/site-packages/nltk/translate/metrics.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Translation metrics
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Will Zhang <[email protected]>
5
+ # Guan Gui <[email protected]>
6
+ # Steven Bird <[email protected]>
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+
11
+ def alignment_error_rate(reference, hypothesis, possible=None):
12
+ """
13
+ Return the Alignment Error Rate (AER) of an alignment
14
+ with respect to a "gold standard" reference alignment.
15
+ Return an error rate between 0.0 (perfect alignment) and 1.0 (no
16
+ alignment).
17
+
18
+ >>> from nltk.translate import Alignment
19
+ >>> ref = Alignment([(0, 0), (1, 1), (2, 2)])
20
+ >>> test = Alignment([(0, 0), (1, 2), (2, 1)])
21
+ >>> alignment_error_rate(ref, test) # doctest: +ELLIPSIS
22
+ 0.6666666666666667
23
+
24
+ :type reference: Alignment
25
+ :param reference: A gold standard alignment (sure alignments)
26
+ :type hypothesis: Alignment
27
+ :param hypothesis: A hypothesis alignment (aka. candidate alignments)
28
+ :type possible: Alignment or None
29
+ :param possible: A gold standard reference of possible alignments
30
+ (defaults to *reference* if None)
31
+ :rtype: float or None
32
+ """
33
+
34
+ if possible is None:
35
+ possible = reference
36
+ else:
37
+ assert reference.issubset(possible) # sanity check
38
+
39
+ return 1.0 - (len(hypothesis & reference) + len(hypothesis & possible)) / float(
40
+ len(hypothesis) + len(reference)
41
+ )
env-llmeval/lib/python3.10/site-packages/nltk/translate/nist_score.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: NIST Score
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Authors:
5
+ # Contributors:
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """NIST score implementation."""
10
+
11
+ import fractions
12
+ import math
13
+ from collections import Counter
14
+
15
+ from nltk.util import ngrams
16
+
17
+
18
+ def sentence_nist(references, hypothesis, n=5):
19
+ """
20
+ Calculate NIST score from
21
+ George Doddington. 2002. "Automatic evaluation of machine translation quality
22
+ using n-gram co-occurrence statistics." Proceedings of HLT.
23
+ Morgan Kaufmann Publishers Inc. https://dl.acm.org/citation.cfm?id=1289189.1289273
24
+
25
+ DARPA commissioned NIST to develop an MT evaluation facility based on the BLEU
26
+ score. The official script used by NIST to compute BLEU and NIST score is
27
+ mteval-14.pl. The main differences are:
28
+
29
+ - BLEU uses geometric mean of the ngram overlaps, NIST uses arithmetic mean.
30
+ - NIST has a different brevity penalty
31
+ - NIST score from mteval-14.pl has a self-contained tokenizer
32
+
33
+ Note: The mteval-14.pl includes a smoothing function for BLEU score that is NOT
34
+ used in the NIST score computation.
35
+
36
+ >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
37
+ ... 'ensures', 'that', 'the', 'military', 'always',
38
+ ... 'obeys', 'the', 'commands', 'of', 'the', 'party']
39
+
40
+ >>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops',
41
+ ... 'forever', 'hearing', 'the', 'activity', 'guidebook',
42
+ ... 'that', 'party', 'direct']
43
+
44
+ >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
45
+ ... 'ensures', 'that', 'the', 'military', 'will', 'forever',
46
+ ... 'heed', 'Party', 'commands']
47
+
48
+ >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',
49
+ ... 'guarantees', 'the', 'military', 'forces', 'always',
50
+ ... 'being', 'under', 'the', 'command', 'of', 'the',
51
+ ... 'Party']
52
+
53
+ >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
54
+ ... 'army', 'always', 'to', 'heed', 'the', 'directions',
55
+ ... 'of', 'the', 'party']
56
+
57
+ >>> sentence_nist([reference1, reference2, reference3], hypothesis1) # doctest: +ELLIPSIS
58
+ 3.3709...
59
+
60
+ >>> sentence_nist([reference1, reference2, reference3], hypothesis2) # doctest: +ELLIPSIS
61
+ 1.4619...
62
+
63
+ :param references: reference sentences
64
+ :type references: list(list(str))
65
+ :param hypothesis: a hypothesis sentence
66
+ :type hypothesis: list(str)
67
+ :param n: highest n-gram order
68
+ :type n: int
69
+ """
70
+ return corpus_nist([references], [hypothesis], n)
71
+
72
+
73
+ def corpus_nist(list_of_references, hypotheses, n=5):
74
+ """
75
+ Calculate a single corpus-level NIST score (aka. system-level BLEU) for all
76
+ the hypotheses and their respective references.
77
+
78
+ :param references: a corpus of lists of reference sentences, w.r.t. hypotheses
79
+ :type references: list(list(list(str)))
80
+ :param hypotheses: a list of hypothesis sentences
81
+ :type hypotheses: list(list(str))
82
+ :param n: highest n-gram order
83
+ :type n: int
84
+ """
85
+ # Before proceeding to compute NIST, perform sanity checks.
86
+ assert len(list_of_references) == len(
87
+ hypotheses
88
+ ), "The number of hypotheses and their reference(s) should be the same"
89
+
90
+ # Collect the ngram coounts from the reference sentences.
91
+ ngram_freq = Counter()
92
+ total_reference_words = 0
93
+ for (
94
+ references
95
+ ) in list_of_references: # For each source sent, there's a list of reference sents.
96
+ for reference in references:
97
+ # For each order of ngram, count the ngram occurrences.
98
+ for i in range(1, n + 1):
99
+ ngram_freq.update(ngrams(reference, i))
100
+ total_reference_words += len(reference)
101
+
102
+ # Compute the information weights based on the reference sentences.
103
+ # Eqn 2 in Doddington (2002):
104
+ # Info(w_1 ... w_n) = log_2 [ (# of occurrences of w_1 ... w_n-1) / (# of occurrences of w_1 ... w_n) ]
105
+ information_weights = {}
106
+ for _ngram in ngram_freq: # w_1 ... w_n
107
+ _mgram = _ngram[:-1] # w_1 ... w_n-1
108
+ # From https://github.com/moses-smt/mosesdecoder/blob/master/scripts/generic/mteval-v13a.pl#L546
109
+ # it's computed as such:
110
+ # denominator = ngram_freq[_mgram] if _mgram and _mgram in ngram_freq else denominator = total_reference_words
111
+ # information_weights[_ngram] = -1 * math.log(ngram_freq[_ngram]/denominator) / math.log(2)
112
+ #
113
+ # Mathematically, it's equivalent to the our implementation:
114
+ if _mgram and _mgram in ngram_freq:
115
+ numerator = ngram_freq[_mgram]
116
+ else:
117
+ numerator = total_reference_words
118
+ information_weights[_ngram] = math.log(numerator / ngram_freq[_ngram], 2)
119
+
120
+ # Micro-average.
121
+ nist_precision_numerator_per_ngram = Counter()
122
+ nist_precision_denominator_per_ngram = Counter()
123
+ l_ref, l_sys = 0, 0
124
+ # For each order of ngram.
125
+ for i in range(1, n + 1):
126
+ # Iterate through each hypothesis and their corresponding references.
127
+ for references, hypothesis in zip(list_of_references, hypotheses):
128
+ hyp_len = len(hypothesis)
129
+
130
+ # Find reference with the best NIST score.
131
+ nist_score_per_ref = []
132
+ for reference in references:
133
+ _ref_len = len(reference)
134
+ # Counter of ngrams in hypothesis.
135
+ hyp_ngrams = (
136
+ Counter(ngrams(hypothesis, i))
137
+ if len(hypothesis) >= i
138
+ else Counter()
139
+ )
140
+ ref_ngrams = (
141
+ Counter(ngrams(reference, i)) if len(reference) >= i else Counter()
142
+ )
143
+ ngram_overlaps = hyp_ngrams & ref_ngrams
144
+ # Precision part of the score in Eqn 3
145
+ _numerator = sum(
146
+ information_weights[_ngram] * count
147
+ for _ngram, count in ngram_overlaps.items()
148
+ )
149
+ _denominator = sum(hyp_ngrams.values())
150
+ _precision = 0 if _denominator == 0 else _numerator / _denominator
151
+ nist_score_per_ref.append(
152
+ (_precision, _numerator, _denominator, _ref_len)
153
+ )
154
+ # Best reference.
155
+ precision, numerator, denominator, ref_len = max(nist_score_per_ref)
156
+ nist_precision_numerator_per_ngram[i] += numerator
157
+ nist_precision_denominator_per_ngram[i] += denominator
158
+ l_ref += ref_len
159
+ l_sys += hyp_len
160
+
161
+ # Final NIST micro-average mean aggregation.
162
+ nist_precision = 0
163
+ for i in nist_precision_numerator_per_ngram:
164
+ precision = (
165
+ nist_precision_numerator_per_ngram[i]
166
+ / nist_precision_denominator_per_ngram[i]
167
+ )
168
+ nist_precision += precision
169
+ # Eqn 3 in Doddington(2002)
170
+ return nist_precision * nist_length_penalty(l_ref, l_sys)
171
+
172
+
173
+ def nist_length_penalty(ref_len, hyp_len):
174
+ """
175
+ Calculates the NIST length penalty, from Eq. 3 in Doddington (2002)
176
+
177
+ penalty = exp( beta * log( min( len(hyp)/len(ref) , 1.0 )))
178
+
179
+ where,
180
+
181
+ `beta` is chosen to make the brevity penalty factor = 0.5 when the
182
+ no. of words in the system output (hyp) is 2/3 of the average
183
+ no. of words in the reference translation (ref)
184
+
185
+ The NIST penalty is different from BLEU's such that it minimize the impact
186
+ of the score of small variations in the length of a translation.
187
+ See Fig. 4 in Doddington (2002)
188
+ """
189
+ ratio = hyp_len / ref_len
190
+ if 0 < ratio < 1:
191
+ ratio_x, score_x = 1.5, 0.5
192
+ beta = math.log(score_x) / math.log(ratio_x) ** 2
193
+ return math.exp(beta * math.log(ratio) ** 2)
194
+ else: # ratio <= 0 or ratio >= 1
195
+ return max(min(ratio, 1.0), 0.0)
env-llmeval/lib/python3.10/site-packages/nltk/translate/phrase_based.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Phrase Extraction Algorithm
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Authors: Liling Tan, Fredrik Hedman, Petra Barancikova
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+
9
+ def extract(
10
+ f_start,
11
+ f_end,
12
+ e_start,
13
+ e_end,
14
+ alignment,
15
+ f_aligned,
16
+ srctext,
17
+ trgtext,
18
+ srclen,
19
+ trglen,
20
+ max_phrase_length,
21
+ ):
22
+ """
23
+ This function checks for alignment point consistency and extracts
24
+ phrases using the chunk of consistent phrases.
25
+
26
+ A phrase pair (e, f ) is consistent with an alignment A if and only if:
27
+
28
+ (i) No English words in the phrase pair are aligned to words outside it.
29
+
30
+ ∀e i ∈ e, (e i , f j ) ∈ A ⇒ f j ∈ f
31
+
32
+ (ii) No Foreign words in the phrase pair are aligned to words outside it.
33
+
34
+ ∀f j ∈ f , (e i , f j ) ∈ A ⇒ e i ∈ e
35
+
36
+ (iii) The phrase pair contains at least one alignment point.
37
+
38
+ ∃e i ∈ e ̄ , f j ∈ f ̄ s.t. (e i , f j ) ∈ A
39
+
40
+ :type f_start: int
41
+ :param f_start: Starting index of the possible foreign language phrases
42
+ :type f_end: int
43
+ :param f_end: End index of the possible foreign language phrases
44
+ :type e_start: int
45
+ :param e_start: Starting index of the possible source language phrases
46
+ :type e_end: int
47
+ :param e_end: End index of the possible source language phrases
48
+ :type srctext: list
49
+ :param srctext: The source language tokens, a list of string.
50
+ :type trgtext: list
51
+ :param trgtext: The target language tokens, a list of string.
52
+ :type srclen: int
53
+ :param srclen: The number of tokens in the source language tokens.
54
+ :type trglen: int
55
+ :param trglen: The number of tokens in the target language tokens.
56
+ """
57
+
58
+ if f_end < 0: # 0-based indexing.
59
+ return {}
60
+ # Check if alignment points are consistent.
61
+ for e, f in alignment:
62
+ if (f_start <= f <= f_end) and (e < e_start or e > e_end):
63
+ return {}
64
+
65
+ # Add phrase pairs (incl. additional unaligned f)
66
+ phrases = set()
67
+ fs = f_start
68
+ while True:
69
+ fe = min(f_end, f_start + max_phrase_length - 1)
70
+ while True:
71
+ # add phrase pair ([e_start, e_end], [fs, fe]) to set E
72
+ # Need to +1 in range to include the end-point.
73
+ src_phrase = " ".join(srctext[e_start : e_end + 1])
74
+ trg_phrase = " ".join(trgtext[fs : fe + 1])
75
+ # Include more data for later ordering.
76
+ phrases.add(((e_start, e_end + 1), (fs, fe + 1), src_phrase, trg_phrase))
77
+ fe += 1
78
+ if fe in f_aligned or fe >= trglen:
79
+ break
80
+ fs -= 1
81
+ if fs in f_aligned or fs < 0:
82
+ break
83
+ return phrases
84
+
85
+
86
+ def phrase_extraction(srctext, trgtext, alignment, max_phrase_length=0):
87
+ """
88
+ Phrase extraction algorithm extracts all consistent phrase pairs from
89
+ a word-aligned sentence pair.
90
+
91
+ The idea is to loop over all possible source language (e) phrases and find
92
+ the minimal foreign phrase (f) that matches each of them. Matching is done
93
+ by identifying all alignment points for the source phrase and finding the
94
+ shortest foreign phrase that includes all the foreign counterparts for the
95
+ source words.
96
+
97
+ In short, a phrase alignment has to
98
+ (a) contain all alignment points for all covered words
99
+ (b) contain at least one alignment point
100
+
101
+ >>> srctext = "michael assumes that he will stay in the house"
102
+ >>> trgtext = "michael geht davon aus , dass er im haus bleibt"
103
+ >>> alignment = [(0,0), (1,1), (1,2), (1,3), (2,5), (3,6), (4,9),
104
+ ... (5,9), (6,7), (7,7), (8,8)]
105
+ >>> phrases = phrase_extraction(srctext, trgtext, alignment)
106
+ >>> for i in sorted(phrases):
107
+ ... print(i)
108
+ ...
109
+ ((0, 1), (0, 1), 'michael', 'michael')
110
+ ((0, 2), (0, 4), 'michael assumes', 'michael geht davon aus')
111
+ ((0, 2), (0, 5), 'michael assumes', 'michael geht davon aus ,')
112
+ ((0, 3), (0, 6), 'michael assumes that', 'michael geht davon aus , dass')
113
+ ((0, 4), (0, 7), 'michael assumes that he', 'michael geht davon aus , dass er')
114
+ ((0, 9), (0, 10), 'michael assumes that he will stay in the house', 'michael geht davon aus , dass er im haus bleibt')
115
+ ((1, 2), (1, 4), 'assumes', 'geht davon aus')
116
+ ((1, 2), (1, 5), 'assumes', 'geht davon aus ,')
117
+ ((1, 3), (1, 6), 'assumes that', 'geht davon aus , dass')
118
+ ((1, 4), (1, 7), 'assumes that he', 'geht davon aus , dass er')
119
+ ((1, 9), (1, 10), 'assumes that he will stay in the house', 'geht davon aus , dass er im haus bleibt')
120
+ ((2, 3), (4, 6), 'that', ', dass')
121
+ ((2, 3), (5, 6), 'that', 'dass')
122
+ ((2, 4), (4, 7), 'that he', ', dass er')
123
+ ((2, 4), (5, 7), 'that he', 'dass er')
124
+ ((2, 9), (4, 10), 'that he will stay in the house', ', dass er im haus bleibt')
125
+ ((2, 9), (5, 10), 'that he will stay in the house', 'dass er im haus bleibt')
126
+ ((3, 4), (6, 7), 'he', 'er')
127
+ ((3, 9), (6, 10), 'he will stay in the house', 'er im haus bleibt')
128
+ ((4, 6), (9, 10), 'will stay', 'bleibt')
129
+ ((4, 9), (7, 10), 'will stay in the house', 'im haus bleibt')
130
+ ((6, 8), (7, 8), 'in the', 'im')
131
+ ((6, 9), (7, 9), 'in the house', 'im haus')
132
+ ((8, 9), (8, 9), 'house', 'haus')
133
+
134
+ :type srctext: str
135
+ :param srctext: The sentence string from the source language.
136
+ :type trgtext: str
137
+ :param trgtext: The sentence string from the target language.
138
+ :type alignment: list(tuple)
139
+ :param alignment: The word alignment outputs as list of tuples, where
140
+ the first elements of tuples are the source words' indices and
141
+ second elements are the target words' indices. This is also the output
142
+ format of nltk.translate.ibm1
143
+ :rtype: list(tuple)
144
+ :return: A list of tuples, each element in a list is a phrase and each
145
+ phrase is a tuple made up of (i) its source location, (ii) its target
146
+ location, (iii) the source phrase and (iii) the target phrase. The phrase
147
+ list of tuples represents all the possible phrases extracted from the
148
+ word alignments.
149
+ :type max_phrase_length: int
150
+ :param max_phrase_length: maximal phrase length, if 0 or not specified
151
+ it is set to a length of the longer sentence (srctext or trgtext).
152
+ """
153
+
154
+ srctext = srctext.split() # e
155
+ trgtext = trgtext.split() # f
156
+ srclen = len(srctext) # len(e)
157
+ trglen = len(trgtext) # len(f)
158
+ # Keeps an index of which source/target words that are aligned.
159
+ f_aligned = [j for _, j in alignment]
160
+ max_phrase_length = max_phrase_length or max(srclen, trglen)
161
+
162
+ # set of phrase pairs BP
163
+ bp = set()
164
+
165
+ for e_start in range(srclen):
166
+ max_idx = min(srclen, e_start + max_phrase_length)
167
+ for e_end in range(e_start, max_idx):
168
+ # // find the minimally matching foreign phrase
169
+ # (f start , f end ) = ( length(f), 0 )
170
+ # f_start ∈ [0, len(f) - 1]; f_end ∈ [0, len(f) - 1]
171
+ f_start, f_end = trglen - 1, -1 # 0-based indexing
172
+
173
+ for e, f in alignment:
174
+ if e_start <= e <= e_end:
175
+ f_start = min(f, f_start)
176
+ f_end = max(f, f_end)
177
+ # add extract (f start , f end , e start , e end ) to set BP
178
+ phrases = extract(
179
+ f_start,
180
+ f_end,
181
+ e_start,
182
+ e_end,
183
+ alignment,
184
+ f_aligned,
185
+ srctext,
186
+ trgtext,
187
+ srclen,
188
+ trglen,
189
+ max_phrase_length,
190
+ )
191
+ if phrases:
192
+ bp.update(phrases)
193
+ return bp
env-llmeval/lib/python3.10/site-packages/pyarrow_hotfix-0.6.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
env-llmeval/lib/python3.10/site-packages/pyarrow_hotfix-0.6.dist-info/METADATA ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: pyarrow-hotfix
3
+ Version: 0.6
4
+ Project-URL: Documentation, https://github.com/pitrou/pyarrow-hotfix#readme
5
+ Project-URL: Issues, https://github.com/pitrou/pyarrow-hotfix/issues
6
+ Project-URL: Source, https://github.com/pitrou/pyarrow-hotfix
7
+ Author-email: Antoine Pitrou <[email protected]>
8
+ License: Apache License, Version 2.0
9
+ License-File: LICENSE.txt
10
+ Classifier: Development Status :: 4 - Beta
11
+ Classifier: Operating System :: OS Independent
12
+ Classifier: Programming Language :: Python
13
+ Classifier: Programming Language :: Python :: 3
14
+ Classifier: Programming Language :: Python :: 3.5
15
+ Classifier: Programming Language :: Python :: 3.6
16
+ Classifier: Programming Language :: Python :: 3.7
17
+ Classifier: Programming Language :: Python :: 3.8
18
+ Classifier: Programming Language :: Python :: 3.9
19
+ Classifier: Programming Language :: Python :: 3.10
20
+ Classifier: Programming Language :: Python :: 3.11
21
+ Classifier: Programming Language :: Python :: 3.12
22
+ Requires-Python: >=3.5
23
+ Description-Content-Type: text/x-rst
24
+
25
+ PyArrow Hotfix
26
+ ==============
27
+
28
+ .. image:: https://img.shields.io/pypi/v/pyarrow-hotfix.svg
29
+ :alt: pyarrow_hotfix package on PyPI
30
+ :target: https://pypi.org/project/pyarrow-hotfix
31
+
32
+ .. image:: https://img.shields.io/pypi/pyversions/pyarrow-hotfix.svg
33
+ :alt: pyarrow_hotfix supported Python versions
34
+ :target: https://pypi.org/project/pyarrow-hotfix
35
+
36
+ .. image:: https://github.com/pitrou/pyarrow-hotfix/actions/workflows/tests.yml/badge.svg
37
+ :alt: latest unit test results
38
+ :target: https://github.com/pitrou/pyarrow-hotfix/actions/workflows/tests.yml
39
+
40
+
41
+ Description
42
+ -----------
43
+
44
+ This is a hotfix for the PyArrow security vulnerability
45
+ `CVE-2023-47248 <https://www.cve.org/CVERecord?id=CVE-2023-47248>`__.
46
+
47
+ We generally recommend upgrading to PyArrow 14.0.1 or later, but if you
48
+ cannot upgrade, this package disables the vulnerability on older versions.
49
+
50
+ Installation
51
+ ------------
52
+
53
+ Use ``pip`` to install:
54
+
55
+ .. code-block:: console
56
+
57
+ pip install pyarrow_hotfix
58
+
59
+ .. note::
60
+ Both ``pyarrow-hotfix`` and ``pyarrow_hotfix`` are accepted and point to
61
+ the same package.
62
+
63
+ Usage
64
+ -----
65
+
66
+ ``pyarrow_hotfix`` must be imported in your application or library code for
67
+ it to take effect:
68
+
69
+ .. code-block:: python
70
+
71
+ import pyarrow_hotfix
72
+
73
+ Supported versions
74
+ ------------------
75
+
76
+ ``pyarrow_hotfix`` supports all Python versions starting from Python 3.5,
77
+ and all PyArrow versions starting from 0.14.0.
78
+
79
+ Dependencies
80
+ ------------
81
+
82
+ ``pyarrow_hotfix`` is a pure Python package that does not have any explicit
83
+ dependencies, and assumes you have installed ``pyarrow`` through other means
84
+ (such as ``pip`` or ``conda``).
85
+
86
+ Example
87
+ -------
88
+
89
+ .. code-block:: pycon
90
+
91
+ >>> import pyarrow as pa
92
+ >>> import pyarrow_hotfix
93
+ >>>
94
+ >>> pa.ipc.open_file('data.arrow')
95
+ Traceback (most recent call last):
96
+ [ ... ]
97
+ RuntimeError: forbidden deserialization of 'arrow.py_extension_type': storage_type = null, serialized = b"\x80\x03cbuiltins\neval\nq\x00X\x15\x00\x00\x00print('hello world!')q\x01\x85q\x02Rq\x03.", pickle disassembly:
98
+ 0: \x80 PROTO 3
99
+ 2: c GLOBAL 'builtins eval'
100
+ 17: q BINPUT 0
101
+ 19: X BINUNICODE "print('hello world!')"
102
+ 45: q BINPUT 1
103
+ 47: \x85 TUPLE1
104
+ 48: q BINPUT 2
105
+ 50: R REDUCE
106
+ 51: q BINPUT 3
107
+ 53: . STOP
108
+ highest protocol among opcodes = 2
109
+
110
+
111
+ License
112
+ -------
113
+
114
+ Like ``pyarrow``, ``pyarrow_hotfix`` is distributed under the terms of the
115
+ `Apache License, version 2.0 <https://www.apache.org/licenses/LICENSE-2.0>`_.
env-llmeval/lib/python3.10/site-packages/pyarrow_hotfix-0.6.dist-info/RECORD ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ pyarrow_hotfix-0.6.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ pyarrow_hotfix-0.6.dist-info/METADATA,sha256=ZfkKYA2ae-Tzt8eLmiUZm4AyiIqYgfqKdw-BCk1jfao,3553
3
+ pyarrow_hotfix-0.6.dist-info/RECORD,,
4
+ pyarrow_hotfix-0.6.dist-info/WHEEL,sha256=9QBuHhg6FNW7lppboF2vKVbCGTVzsFykgRQjjlajrhA,87
5
+ pyarrow_hotfix-0.6.dist-info/licenses/LICENSE.txt,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
6
+ pyarrow_hotfix/__about__.py,sha256=vCzHOqAMvexTYGj1TtWWLK-FaFwXvvLLmvfVCpMqh54,136
7
+ pyarrow_hotfix/__init__.py,sha256=7hf1tpfbJuFixx_fMYMcRfhsKZ9Yo3XTNmlyyoasBCw,3527
8
+ pyarrow_hotfix/__pycache__/__about__.cpython-310.pyc,,
9
+ pyarrow_hotfix/__pycache__/__init__.cpython-310.pyc,,
env-llmeval/lib/python3.10/site-packages/pyarrow_hotfix-0.6.dist-info/WHEEL ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.18.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
env-llmeval/lib/python3.10/site-packages/pyarrow_hotfix-0.6.dist-info/licenses/LICENSE.txt ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ Apache License
3
+ Version 2.0, January 2004
4
+ http://www.apache.org/licenses/
5
+
6
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7
+
8
+ 1. Definitions.
9
+
10
+ "License" shall mean the terms and conditions for use, reproduction,
11
+ and distribution as defined by Sections 1 through 9 of this document.
12
+
13
+ "Licensor" shall mean the copyright owner or entity authorized by
14
+ the copyright owner that is granting the License.
15
+
16
+ "Legal Entity" shall mean the union of the acting entity and all
17
+ other entities that control, are controlled by, or are under common
18
+ control with that entity. For the purposes of this definition,
19
+ "control" means (i) the power, direct or indirect, to cause the
20
+ direction or management of such entity, whether by contract or
21
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
22
+ outstanding shares, or (iii) beneficial ownership of such entity.
23
+
24
+ "You" (or "Your") shall mean an individual or Legal Entity
25
+ exercising permissions granted by this License.
26
+
27
+ "Source" form shall mean the preferred form for making modifications,
28
+ including but not limited to software source code, documentation
29
+ source, and configuration files.
30
+
31
+ "Object" form shall mean any form resulting from mechanical
32
+ transformation or translation of a Source form, including but
33
+ not limited to compiled object code, generated documentation,
34
+ and conversions to other media types.
35
+
36
+ "Work" shall mean the work of authorship, whether in Source or
37
+ Object form, made available under the License, as indicated by a
38
+ copyright notice that is included in or attached to the work
39
+ (an example is provided in the Appendix below).
40
+
41
+ "Derivative Works" shall mean any work, whether in Source or Object
42
+ form, that is based on (or derived from) the Work and for which the
43
+ editorial revisions, annotations, elaborations, or other modifications
44
+ represent, as a whole, an original work of authorship. For the purposes
45
+ of this License, Derivative Works shall not include works that remain
46
+ separable from, or merely link (or bind by name) to the interfaces of,
47
+ the Work and Derivative Works thereof.
48
+
49
+ "Contribution" shall mean any work of authorship, including
50
+ the original version of the Work and any modifications or additions
51
+ to that Work or Derivative Works thereof, that is intentionally
52
+ submitted to Licensor for inclusion in the Work by the copyright owner
53
+ or by an individual or Legal Entity authorized to submit on behalf of
54
+ the copyright owner. For the purposes of this definition, "submitted"
55
+ means any form of electronic, verbal, or written communication sent
56
+ to the Licensor or its representatives, including but not limited to
57
+ communication on electronic mailing lists, source code control systems,
58
+ and issue tracking systems that are managed by, or on behalf of, the
59
+ Licensor for the purpose of discussing and improving the Work, but
60
+ excluding communication that is conspicuously marked or otherwise
61
+ designated in writing by the copyright owner as "Not a Contribution."
62
+
63
+ "Contributor" shall mean Licensor and any individual or Legal Entity
64
+ on behalf of whom a Contribution has been received by Licensor and
65
+ subsequently incorporated within the Work.
66
+
67
+ 2. Grant of Copyright License. Subject to the terms and conditions of
68
+ this License, each Contributor hereby grants to You a perpetual,
69
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70
+ copyright license to reproduce, prepare Derivative Works of,
71
+ publicly display, publicly perform, sublicense, and distribute the
72
+ Work and such Derivative Works in Source or Object form.
73
+
74
+ 3. Grant of Patent License. Subject to the terms and conditions of
75
+ this License, each Contributor hereby grants to You a perpetual,
76
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77
+ (except as stated in this section) patent license to make, have made,
78
+ use, offer to sell, sell, import, and otherwise transfer the Work,
79
+ where such license applies only to those patent claims licensable
80
+ by such Contributor that are necessarily infringed by their
81
+ Contribution(s) alone or by combination of their Contribution(s)
82
+ with the Work to which such Contribution(s) was submitted. If You
83
+ institute patent litigation against any entity (including a
84
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
85
+ or a Contribution incorporated within the Work constitutes direct
86
+ or contributory patent infringement, then any patent licenses
87
+ granted to You under this License for that Work shall terminate
88
+ as of the date such litigation is filed.
89
+
90
+ 4. Redistribution. You may reproduce and distribute copies of the
91
+ Work or Derivative Works thereof in any medium, with or without
92
+ modifications, and in Source or Object form, provided that You
93
+ meet the following conditions:
94
+
95
+ (a) You must give any other recipients of the Work or
96
+ Derivative Works a copy of this License; and
97
+
98
+ (b) You must cause any modified files to carry prominent notices
99
+ stating that You changed the files; and
100
+
101
+ (c) You must retain, in the Source form of any Derivative Works
102
+ that You distribute, all copyright, patent, trademark, and
103
+ attribution notices from the Source form of the Work,
104
+ excluding those notices that do not pertain to any part of
105
+ the Derivative Works; and
106
+
107
+ (d) If the Work includes a "NOTICE" text file as part of its
108
+ distribution, then any Derivative Works that You distribute must
109
+ include a readable copy of the attribution notices contained
110
+ within such NOTICE file, excluding those notices that do not
111
+ pertain to any part of the Derivative Works, in at least one
112
+ of the following places: within a NOTICE text file distributed
113
+ as part of the Derivative Works; within the Source form or
114
+ documentation, if provided along with the Derivative Works; or,
115
+ within a display generated by the Derivative Works, if and
116
+ wherever such third-party notices normally appear. The contents
117
+ of the NOTICE file are for informational purposes only and
118
+ do not modify the License. You may add Your own attribution
119
+ notices within Derivative Works that You distribute, alongside
120
+ or as an addendum to the NOTICE text from the Work, provided
121
+ that such additional attribution notices cannot be construed
122
+ as modifying the License.
123
+
124
+ You may add Your own copyright statement to Your modifications and
125
+ may provide additional or different license terms and conditions
126
+ for use, reproduction, or distribution of Your modifications, or
127
+ for any such Derivative Works as a whole, provided Your use,
128
+ reproduction, and distribution of the Work otherwise complies with
129
+ the conditions stated in this License.
130
+
131
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
132
+ any Contribution intentionally submitted for inclusion in the Work
133
+ by You to the Licensor shall be under the terms and conditions of
134
+ this License, without any additional terms or conditions.
135
+ Notwithstanding the above, nothing herein shall supersede or modify
136
+ the terms of any separate license agreement you may have executed
137
+ with Licensor regarding such Contributions.
138
+
139
+ 6. Trademarks. This License does not grant permission to use the trade
140
+ names, trademarks, service marks, or product names of the Licensor,
141
+ except as required for reasonable and customary use in describing the
142
+ origin of the Work and reproducing the content of the NOTICE file.
143
+
144
+ 7. Disclaimer of Warranty. Unless required by applicable law or
145
+ agreed to in writing, Licensor provides the Work (and each
146
+ Contributor provides its Contributions) on an "AS IS" BASIS,
147
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148
+ implied, including, without limitation, any warranties or conditions
149
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150
+ PARTICULAR PURPOSE. You are solely responsible for determining the
151
+ appropriateness of using or redistributing the Work and assume any
152
+ risks associated with Your exercise of permissions under this License.
153
+
154
+ 8. Limitation of Liability. In no event and under no legal theory,
155
+ whether in tort (including negligence), contract, or otherwise,
156
+ unless required by applicable law (such as deliberate and grossly
157
+ negligent acts) or agreed to in writing, shall any Contributor be
158
+ liable to You for damages, including any direct, indirect, special,
159
+ incidental, or consequential damages of any character arising as a
160
+ result of this License or out of the use or inability to use the
161
+ Work (including but not limited to damages for loss of goodwill,
162
+ work stoppage, computer failure or malfunction, or any and all
163
+ other commercial damages or losses), even if such Contributor
164
+ has been advised of the possibility of such damages.
165
+
166
+ 9. Accepting Warranty or Additional Liability. While redistributing
167
+ the Work or Derivative Works thereof, You may choose to offer,
168
+ and charge a fee for, acceptance of support, warranty, indemnity,
169
+ or other liability obligations and/or rights consistent with this
170
+ License. However, in accepting such obligations, You may act only
171
+ on Your own behalf and on Your sole responsibility, not on behalf
172
+ of any other Contributor, and only if You agree to indemnify,
173
+ defend, and hold each Contributor harmless for any liability
174
+ incurred by, or claims asserted against, such Contributor by reason
175
+ of your accepting any such warranty or additional liability.
176
+
177
+ END OF TERMS AND CONDITIONS
178
+
179
+ APPENDIX: How to apply the Apache License to your work.
180
+
181
+ To apply the Apache License to your work, attach the following
182
+ boilerplate notice, with the fields enclosed by brackets "[]"
183
+ replaced with your own identifying information. (Don't include
184
+ the brackets!) The text should be enclosed in the appropriate
185
+ comment syntax for the file format. We also recommend that a
186
+ file or class name and description of purpose be included on the
187
+ same "printed page" as the copyright notice for easier
188
+ identification within third-party archives.
189
+
190
+ Copyright [yyyy] [name of copyright owner]
191
+
192
+ Licensed under the Apache License, Version 2.0 (the "License");
193
+ you may not use this file except in compliance with the License.
194
+ You may obtain a copy of the License at
195
+
196
+ http://www.apache.org/licenses/LICENSE-2.0
197
+
198
+ Unless required by applicable law or agreed to in writing, software
199
+ distributed under the License is distributed on an "AS IS" BASIS,
200
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201
+ See the License for the specific language governing permissions and
202
+ limitations under the License.
env-llmeval/lib/python3.10/site-packages/setuptools/__init__.py ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Extensions to the 'distutils' for large or complex distributions"""
2
+
3
+ from fnmatch import fnmatchcase
4
+ import functools
5
+ import os
6
+ import re
7
+
8
+ import _distutils_hack.override # noqa: F401
9
+
10
+ import distutils.core
11
+ from distutils.errors import DistutilsOptionError
12
+ from distutils.util import convert_path
13
+
14
+ from ._deprecation_warning import SetuptoolsDeprecationWarning
15
+
16
+ import setuptools.version
17
+ from setuptools.extension import Extension
18
+ from setuptools.dist import Distribution
19
+ from setuptools.depends import Require
20
+ from . import monkey
21
+
22
+
23
+ __all__ = [
24
+ 'setup',
25
+ 'Distribution',
26
+ 'Command',
27
+ 'Extension',
28
+ 'Require',
29
+ 'SetuptoolsDeprecationWarning',
30
+ 'find_packages',
31
+ 'find_namespace_packages',
32
+ ]
33
+
34
+ __version__ = setuptools.version.__version__
35
+
36
+ bootstrap_install_from = None
37
+
38
+
39
+ class PackageFinder:
40
+ """
41
+ Generate a list of all Python packages found within a directory
42
+ """
43
+
44
+ @classmethod
45
+ def find(cls, where='.', exclude=(), include=('*',)):
46
+ """Return a list all Python packages found within directory 'where'
47
+
48
+ 'where' is the root directory which will be searched for packages. It
49
+ should be supplied as a "cross-platform" (i.e. URL-style) path; it will
50
+ be converted to the appropriate local path syntax.
51
+
52
+ 'exclude' is a sequence of package names to exclude; '*' can be used
53
+ as a wildcard in the names, such that 'foo.*' will exclude all
54
+ subpackages of 'foo' (but not 'foo' itself).
55
+
56
+ 'include' is a sequence of package names to include. If it's
57
+ specified, only the named packages will be included. If it's not
58
+ specified, all found packages will be included. 'include' can contain
59
+ shell style wildcard patterns just like 'exclude'.
60
+ """
61
+
62
+ return list(
63
+ cls._find_packages_iter(
64
+ convert_path(where),
65
+ cls._build_filter('ez_setup', '*__pycache__', *exclude),
66
+ cls._build_filter(*include),
67
+ )
68
+ )
69
+
70
+ @classmethod
71
+ def _find_packages_iter(cls, where, exclude, include):
72
+ """
73
+ All the packages found in 'where' that pass the 'include' filter, but
74
+ not the 'exclude' filter.
75
+ """
76
+ for root, dirs, files in os.walk(where, followlinks=True):
77
+ # Copy dirs to iterate over it, then empty dirs.
78
+ all_dirs = dirs[:]
79
+ dirs[:] = []
80
+
81
+ for dir in all_dirs:
82
+ full_path = os.path.join(root, dir)
83
+ rel_path = os.path.relpath(full_path, where)
84
+ package = rel_path.replace(os.path.sep, '.')
85
+
86
+ # Skip directory trees that are not valid packages
87
+ if '.' in dir or not cls._looks_like_package(full_path):
88
+ continue
89
+
90
+ # Should this package be included?
91
+ if include(package) and not exclude(package):
92
+ yield package
93
+
94
+ # Keep searching subdirectories, as there may be more packages
95
+ # down there, even if the parent was excluded.
96
+ dirs.append(dir)
97
+
98
+ @staticmethod
99
+ def _looks_like_package(path):
100
+ """Does a directory look like a package?"""
101
+ return os.path.isfile(os.path.join(path, '__init__.py'))
102
+
103
+ @staticmethod
104
+ def _build_filter(*patterns):
105
+ """
106
+ Given a list of patterns, return a callable that will be true only if
107
+ the input matches at least one of the patterns.
108
+ """
109
+ return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns)
110
+
111
+
112
+ class PEP420PackageFinder(PackageFinder):
113
+ @staticmethod
114
+ def _looks_like_package(path):
115
+ return True
116
+
117
+
118
+ find_packages = PackageFinder.find
119
+ find_namespace_packages = PEP420PackageFinder.find
120
+
121
+
122
+ def _install_setup_requires(attrs):
123
+ # Note: do not use `setuptools.Distribution` directly, as
124
+ # our PEP 517 backend patch `distutils.core.Distribution`.
125
+ class MinimalDistribution(distutils.core.Distribution):
126
+ """
127
+ A minimal version of a distribution for supporting the
128
+ fetch_build_eggs interface.
129
+ """
130
+
131
+ def __init__(self, attrs):
132
+ _incl = 'dependency_links', 'setup_requires'
133
+ filtered = {k: attrs[k] for k in set(_incl) & set(attrs)}
134
+ distutils.core.Distribution.__init__(self, filtered)
135
+
136
+ def finalize_options(self):
137
+ """
138
+ Disable finalize_options to avoid building the working set.
139
+ Ref #2158.
140
+ """
141
+
142
+ dist = MinimalDistribution(attrs)
143
+
144
+ # Honor setup.cfg's options.
145
+ dist.parse_config_files(ignore_option_errors=True)
146
+ if dist.setup_requires:
147
+ dist.fetch_build_eggs(dist.setup_requires)
148
+
149
+
150
+ def setup(**attrs):
151
+ # Make sure we have any requirements needed to interpret 'attrs'.
152
+ _install_setup_requires(attrs)
153
+ return distutils.core.setup(**attrs)
154
+
155
+
156
+ setup.__doc__ = distutils.core.setup.__doc__
157
+
158
+
159
+ _Command = monkey.get_unpatched(distutils.core.Command)
160
+
161
+
162
+ class Command(_Command):
163
+ __doc__ = _Command.__doc__
164
+
165
+ command_consumes_arguments = False
166
+
167
+ def __init__(self, dist, **kw):
168
+ """
169
+ Construct the command for dist, updating
170
+ vars(self) with any keyword parameters.
171
+ """
172
+ _Command.__init__(self, dist)
173
+ vars(self).update(kw)
174
+
175
+ def _ensure_stringlike(self, option, what, default=None):
176
+ val = getattr(self, option)
177
+ if val is None:
178
+ setattr(self, option, default)
179
+ return default
180
+ elif not isinstance(val, str):
181
+ raise DistutilsOptionError(
182
+ "'%s' must be a %s (got `%s`)" % (option, what, val)
183
+ )
184
+ return val
185
+
186
+ def ensure_string_list(self, option):
187
+ r"""Ensure that 'option' is a list of strings. If 'option' is
188
+ currently a string, we split it either on /,\s*/ or /\s+/, so
189
+ "foo bar baz", "foo,bar,baz", and "foo, bar baz" all become
190
+ ["foo", "bar", "baz"].
191
+ """
192
+ val = getattr(self, option)
193
+ if val is None:
194
+ return
195
+ elif isinstance(val, str):
196
+ setattr(self, option, re.split(r',\s*|\s+', val))
197
+ else:
198
+ if isinstance(val, list):
199
+ ok = all(isinstance(v, str) for v in val)
200
+ else:
201
+ ok = False
202
+ if not ok:
203
+ raise DistutilsOptionError(
204
+ "'%s' must be a list of strings (got %r)" % (option, val)
205
+ )
206
+
207
+ def reinitialize_command(self, command, reinit_subcommands=0, **kw):
208
+ cmd = _Command.reinitialize_command(self, command, reinit_subcommands)
209
+ vars(cmd).update(kw)
210
+ return cmd
211
+
212
+
213
+ def _find_all_simple(path):
214
+ """
215
+ Find all files under 'path'
216
+ """
217
+ results = (
218
+ os.path.join(base, file)
219
+ for base, dirs, files in os.walk(path, followlinks=True)
220
+ for file in files
221
+ )
222
+ return filter(os.path.isfile, results)
223
+
224
+
225
+ def findall(dir=os.curdir):
226
+ """
227
+ Find all files under 'dir' and return the list of full filenames.
228
+ Unless dir is '.', return full filenames with dir prepended.
229
+ """
230
+ files = _find_all_simple(dir)
231
+ if dir == os.curdir:
232
+ make_rel = functools.partial(os.path.relpath, start=dir)
233
+ files = map(make_rel, files)
234
+ return list(files)
235
+
236
+
237
+ class sic(str):
238
+ """Treat this string as-is (https://en.wikipedia.org/wiki/Sic)"""
239
+
240
+
241
+ # Apply monkey patches
242
+ monkey.patch_all()
env-llmeval/lib/python3.10/site-packages/setuptools/_deprecation_warning.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ class SetuptoolsDeprecationWarning(Warning):
2
+ """
3
+ Base class for warning deprecations in ``setuptools``
4
+
5
+ This class is not derived from ``DeprecationWarning``, and as such is
6
+ visible by default.
7
+ """
env-llmeval/lib/python3.10/site-packages/setuptools/_distutils/version.py ADDED
@@ -0,0 +1,363 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # distutils/version.py
3
+ #
4
+ # Implements multiple version numbering conventions for the
5
+ # Python Module Distribution Utilities.
6
+ #
7
+ # $Id$
8
+ #
9
+
10
+ """Provides classes to represent module version numbers (one class for
11
+ each style of version numbering). There are currently two such classes
12
+ implemented: StrictVersion and LooseVersion.
13
+
14
+ Every version number class implements the following interface:
15
+ * the 'parse' method takes a string and parses it to some internal
16
+ representation; if the string is an invalid version number,
17
+ 'parse' raises a ValueError exception
18
+ * the class constructor takes an optional string argument which,
19
+ if supplied, is passed to 'parse'
20
+ * __str__ reconstructs the string that was passed to 'parse' (or
21
+ an equivalent string -- ie. one that will generate an equivalent
22
+ version number instance)
23
+ * __repr__ generates Python code to recreate the version number instance
24
+ * _cmp compares the current instance with either another instance
25
+ of the same class or a string (which will be parsed to an instance
26
+ of the same class, thus must follow the same rules)
27
+ """
28
+
29
+ import re
30
+ import warnings
31
+ import contextlib
32
+
33
+
34
+ @contextlib.contextmanager
35
+ def suppress_known_deprecation():
36
+ with warnings.catch_warnings(record=True) as ctx:
37
+ warnings.filterwarnings(
38
+ action='default',
39
+ category=DeprecationWarning,
40
+ message="distutils Version classes are deprecated.",
41
+ )
42
+ yield ctx
43
+
44
+
45
+ class Version:
46
+ """Abstract base class for version numbering classes. Just provides
47
+ constructor (__init__) and reproducer (__repr__), because those
48
+ seem to be the same for all version numbering classes; and route
49
+ rich comparisons to _cmp.
50
+ """
51
+
52
+ def __init__ (self, vstring=None):
53
+ warnings.warn(
54
+ "distutils Version classes are deprecated. "
55
+ "Use packaging.version instead.",
56
+ DeprecationWarning,
57
+ stacklevel=2,
58
+ )
59
+ if vstring:
60
+ self.parse(vstring)
61
+
62
+ def __repr__ (self):
63
+ return "%s ('%s')" % (self.__class__.__name__, str(self))
64
+
65
+ def __eq__(self, other):
66
+ c = self._cmp(other)
67
+ if c is NotImplemented:
68
+ return c
69
+ return c == 0
70
+
71
+ def __lt__(self, other):
72
+ c = self._cmp(other)
73
+ if c is NotImplemented:
74
+ return c
75
+ return c < 0
76
+
77
+ def __le__(self, other):
78
+ c = self._cmp(other)
79
+ if c is NotImplemented:
80
+ return c
81
+ return c <= 0
82
+
83
+ def __gt__(self, other):
84
+ c = self._cmp(other)
85
+ if c is NotImplemented:
86
+ return c
87
+ return c > 0
88
+
89
+ def __ge__(self, other):
90
+ c = self._cmp(other)
91
+ if c is NotImplemented:
92
+ return c
93
+ return c >= 0
94
+
95
+
96
+ # Interface for version-number classes -- must be implemented
97
+ # by the following classes (the concrete ones -- Version should
98
+ # be treated as an abstract class).
99
+ # __init__ (string) - create and take same action as 'parse'
100
+ # (string parameter is optional)
101
+ # parse (string) - convert a string representation to whatever
102
+ # internal representation is appropriate for
103
+ # this style of version numbering
104
+ # __str__ (self) - convert back to a string; should be very similar
105
+ # (if not identical to) the string supplied to parse
106
+ # __repr__ (self) - generate Python code to recreate
107
+ # the instance
108
+ # _cmp (self, other) - compare two version numbers ('other' may
109
+ # be an unparsed version string, or another
110
+ # instance of your version class)
111
+
112
+
113
+ class StrictVersion (Version):
114
+
115
+ """Version numbering for anal retentives and software idealists.
116
+ Implements the standard interface for version number classes as
117
+ described above. A version number consists of two or three
118
+ dot-separated numeric components, with an optional "pre-release" tag
119
+ on the end. The pre-release tag consists of the letter 'a' or 'b'
120
+ followed by a number. If the numeric components of two version
121
+ numbers are equal, then one with a pre-release tag will always
122
+ be deemed earlier (lesser) than one without.
123
+
124
+ The following are valid version numbers (shown in the order that
125
+ would be obtained by sorting according to the supplied cmp function):
126
+
127
+ 0.4 0.4.0 (these two are equivalent)
128
+ 0.4.1
129
+ 0.5a1
130
+ 0.5b3
131
+ 0.5
132
+ 0.9.6
133
+ 1.0
134
+ 1.0.4a3
135
+ 1.0.4b1
136
+ 1.0.4
137
+
138
+ The following are examples of invalid version numbers:
139
+
140
+ 1
141
+ 2.7.2.2
142
+ 1.3.a4
143
+ 1.3pl1
144
+ 1.3c4
145
+
146
+ The rationale for this version numbering system will be explained
147
+ in the distutils documentation.
148
+ """
149
+
150
+ version_re = re.compile(r'^(\d+) \. (\d+) (\. (\d+))? ([ab](\d+))?$',
151
+ re.VERBOSE | re.ASCII)
152
+
153
+
154
+ def parse (self, vstring):
155
+ match = self.version_re.match(vstring)
156
+ if not match:
157
+ raise ValueError("invalid version number '%s'" % vstring)
158
+
159
+ (major, minor, patch, prerelease, prerelease_num) = \
160
+ match.group(1, 2, 4, 5, 6)
161
+
162
+ if patch:
163
+ self.version = tuple(map(int, [major, minor, patch]))
164
+ else:
165
+ self.version = tuple(map(int, [major, minor])) + (0,)
166
+
167
+ if prerelease:
168
+ self.prerelease = (prerelease[0], int(prerelease_num))
169
+ else:
170
+ self.prerelease = None
171
+
172
+
173
+ def __str__ (self):
174
+
175
+ if self.version[2] == 0:
176
+ vstring = '.'.join(map(str, self.version[0:2]))
177
+ else:
178
+ vstring = '.'.join(map(str, self.version))
179
+
180
+ if self.prerelease:
181
+ vstring = vstring + self.prerelease[0] + str(self.prerelease[1])
182
+
183
+ return vstring
184
+
185
+
186
+ def _cmp (self, other):
187
+ if isinstance(other, str):
188
+ with suppress_known_deprecation():
189
+ other = StrictVersion(other)
190
+ elif not isinstance(other, StrictVersion):
191
+ return NotImplemented
192
+
193
+ if self.version != other.version:
194
+ # numeric versions don't match
195
+ # prerelease stuff doesn't matter
196
+ if self.version < other.version:
197
+ return -1
198
+ else:
199
+ return 1
200
+
201
+ # have to compare prerelease
202
+ # case 1: neither has prerelease; they're equal
203
+ # case 2: self has prerelease, other doesn't; other is greater
204
+ # case 3: self doesn't have prerelease, other does: self is greater
205
+ # case 4: both have prerelease: must compare them!
206
+
207
+ if (not self.prerelease and not other.prerelease):
208
+ return 0
209
+ elif (self.prerelease and not other.prerelease):
210
+ return -1
211
+ elif (not self.prerelease and other.prerelease):
212
+ return 1
213
+ elif (self.prerelease and other.prerelease):
214
+ if self.prerelease == other.prerelease:
215
+ return 0
216
+ elif self.prerelease < other.prerelease:
217
+ return -1
218
+ else:
219
+ return 1
220
+ else:
221
+ assert False, "never get here"
222
+
223
+ # end class StrictVersion
224
+
225
+
226
+ # The rules according to Greg Stein:
227
+ # 1) a version number has 1 or more numbers separated by a period or by
228
+ # sequences of letters. If only periods, then these are compared
229
+ # left-to-right to determine an ordering.
230
+ # 2) sequences of letters are part of the tuple for comparison and are
231
+ # compared lexicographically
232
+ # 3) recognize the numeric components may have leading zeroes
233
+ #
234
+ # The LooseVersion class below implements these rules: a version number
235
+ # string is split up into a tuple of integer and string components, and
236
+ # comparison is a simple tuple comparison. This means that version
237
+ # numbers behave in a predictable and obvious way, but a way that might
238
+ # not necessarily be how people *want* version numbers to behave. There
239
+ # wouldn't be a problem if people could stick to purely numeric version
240
+ # numbers: just split on period and compare the numbers as tuples.
241
+ # However, people insist on putting letters into their version numbers;
242
+ # the most common purpose seems to be:
243
+ # - indicating a "pre-release" version
244
+ # ('alpha', 'beta', 'a', 'b', 'pre', 'p')
245
+ # - indicating a post-release patch ('p', 'pl', 'patch')
246
+ # but of course this can't cover all version number schemes, and there's
247
+ # no way to know what a programmer means without asking him.
248
+ #
249
+ # The problem is what to do with letters (and other non-numeric
250
+ # characters) in a version number. The current implementation does the
251
+ # obvious and predictable thing: keep them as strings and compare
252
+ # lexically within a tuple comparison. This has the desired effect if
253
+ # an appended letter sequence implies something "post-release":
254
+ # eg. "0.99" < "0.99pl14" < "1.0", and "5.001" < "5.001m" < "5.002".
255
+ #
256
+ # However, if letters in a version number imply a pre-release version,
257
+ # the "obvious" thing isn't correct. Eg. you would expect that
258
+ # "1.5.1" < "1.5.2a2" < "1.5.2", but under the tuple/lexical comparison
259
+ # implemented here, this just isn't so.
260
+ #
261
+ # Two possible solutions come to mind. The first is to tie the
262
+ # comparison algorithm to a particular set of semantic rules, as has
263
+ # been done in the StrictVersion class above. This works great as long
264
+ # as everyone can go along with bondage and discipline. Hopefully a
265
+ # (large) subset of Python module programmers will agree that the
266
+ # particular flavour of bondage and discipline provided by StrictVersion
267
+ # provides enough benefit to be worth using, and will submit their
268
+ # version numbering scheme to its domination. The free-thinking
269
+ # anarchists in the lot will never give in, though, and something needs
270
+ # to be done to accommodate them.
271
+ #
272
+ # Perhaps a "moderately strict" version class could be implemented that
273
+ # lets almost anything slide (syntactically), and makes some heuristic
274
+ # assumptions about non-digits in version number strings. This could
275
+ # sink into special-case-hell, though; if I was as talented and
276
+ # idiosyncratic as Larry Wall, I'd go ahead and implement a class that
277
+ # somehow knows that "1.2.1" < "1.2.2a2" < "1.2.2" < "1.2.2pl3", and is
278
+ # just as happy dealing with things like "2g6" and "1.13++". I don't
279
+ # think I'm smart enough to do it right though.
280
+ #
281
+ # In any case, I've coded the test suite for this module (see
282
+ # ../test/test_version.py) specifically to fail on things like comparing
283
+ # "1.2a2" and "1.2". That's not because the *code* is doing anything
284
+ # wrong, it's because the simple, obvious design doesn't match my
285
+ # complicated, hairy expectations for real-world version numbers. It
286
+ # would be a snap to fix the test suite to say, "Yep, LooseVersion does
287
+ # the Right Thing" (ie. the code matches the conception). But I'd rather
288
+ # have a conception that matches common notions about version numbers.
289
+
290
+ class LooseVersion (Version):
291
+
292
+ """Version numbering for anarchists and software realists.
293
+ Implements the standard interface for version number classes as
294
+ described above. A version number consists of a series of numbers,
295
+ separated by either periods or strings of letters. When comparing
296
+ version numbers, the numeric components will be compared
297
+ numerically, and the alphabetic components lexically. The following
298
+ are all valid version numbers, in no particular order:
299
+
300
+ 1.5.1
301
+ 1.5.2b2
302
+ 161
303
+ 3.10a
304
+ 8.02
305
+ 3.4j
306
+ 1996.07.12
307
+ 3.2.pl0
308
+ 3.1.1.6
309
+ 2g6
310
+ 11g
311
+ 0.960923
312
+ 2.2beta29
313
+ 1.13++
314
+ 5.5.kw
315
+ 2.0b1pl0
316
+
317
+ In fact, there is no such thing as an invalid version number under
318
+ this scheme; the rules for comparison are simple and predictable,
319
+ but may not always give the results you want (for some definition
320
+ of "want").
321
+ """
322
+
323
+ component_re = re.compile(r'(\d+ | [a-z]+ | \.)', re.VERBOSE)
324
+
325
+ def parse (self, vstring):
326
+ # I've given up on thinking I can reconstruct the version string
327
+ # from the parsed tuple -- so I just store the string here for
328
+ # use by __str__
329
+ self.vstring = vstring
330
+ components = [x for x in self.component_re.split(vstring)
331
+ if x and x != '.']
332
+ for i, obj in enumerate(components):
333
+ try:
334
+ components[i] = int(obj)
335
+ except ValueError:
336
+ pass
337
+
338
+ self.version = components
339
+
340
+
341
+ def __str__ (self):
342
+ return self.vstring
343
+
344
+
345
+ def __repr__ (self):
346
+ return "LooseVersion ('%s')" % str(self)
347
+
348
+
349
+ def _cmp (self, other):
350
+ if isinstance(other, str):
351
+ other = LooseVersion(other)
352
+ elif not isinstance(other, LooseVersion):
353
+ return NotImplemented
354
+
355
+ if self.version == other.version:
356
+ return 0
357
+ if self.version < other.version:
358
+ return -1
359
+ if self.version > other.version:
360
+ return 1
361
+
362
+
363
+ # end class LooseVersion
env-llmeval/lib/python3.10/site-packages/setuptools/archive_util.py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Utilities for extracting common archive formats"""
2
+
3
+ import zipfile
4
+ import tarfile
5
+ import os
6
+ import shutil
7
+ import posixpath
8
+ import contextlib
9
+ from distutils.errors import DistutilsError
10
+
11
+ from pkg_resources import ensure_directory
12
+
13
+ __all__ = [
14
+ "unpack_archive", "unpack_zipfile", "unpack_tarfile", "default_filter",
15
+ "UnrecognizedFormat", "extraction_drivers", "unpack_directory",
16
+ ]
17
+
18
+
19
+ class UnrecognizedFormat(DistutilsError):
20
+ """Couldn't recognize the archive type"""
21
+
22
+
23
+ def default_filter(src, dst):
24
+ """The default progress/filter callback; returns True for all files"""
25
+ return dst
26
+
27
+
28
+ def unpack_archive(
29
+ filename, extract_dir, progress_filter=default_filter,
30
+ drivers=None):
31
+ """Unpack `filename` to `extract_dir`, or raise ``UnrecognizedFormat``
32
+
33
+ `progress_filter` is a function taking two arguments: a source path
34
+ internal to the archive ('/'-separated), and a filesystem path where it
35
+ will be extracted. The callback must return the desired extract path
36
+ (which may be the same as the one passed in), or else ``None`` to skip
37
+ that file or directory. The callback can thus be used to report on the
38
+ progress of the extraction, as well as to filter the items extracted or
39
+ alter their extraction paths.
40
+
41
+ `drivers`, if supplied, must be a non-empty sequence of functions with the
42
+ same signature as this function (minus the `drivers` argument), that raise
43
+ ``UnrecognizedFormat`` if they do not support extracting the designated
44
+ archive type. The `drivers` are tried in sequence until one is found that
45
+ does not raise an error, or until all are exhausted (in which case
46
+ ``UnrecognizedFormat`` is raised). If you do not supply a sequence of
47
+ drivers, the module's ``extraction_drivers`` constant will be used, which
48
+ means that ``unpack_zipfile`` and ``unpack_tarfile`` will be tried, in that
49
+ order.
50
+ """
51
+ for driver in drivers or extraction_drivers:
52
+ try:
53
+ driver(filename, extract_dir, progress_filter)
54
+ except UnrecognizedFormat:
55
+ continue
56
+ else:
57
+ return
58
+ else:
59
+ raise UnrecognizedFormat(
60
+ "Not a recognized archive type: %s" % filename
61
+ )
62
+
63
+
64
+ def unpack_directory(filename, extract_dir, progress_filter=default_filter):
65
+ """"Unpack" a directory, using the same interface as for archives
66
+
67
+ Raises ``UnrecognizedFormat`` if `filename` is not a directory
68
+ """
69
+ if not os.path.isdir(filename):
70
+ raise UnrecognizedFormat("%s is not a directory" % filename)
71
+
72
+ paths = {
73
+ filename: ('', extract_dir),
74
+ }
75
+ for base, dirs, files in os.walk(filename):
76
+ src, dst = paths[base]
77
+ for d in dirs:
78
+ paths[os.path.join(base, d)] = src + d + '/', os.path.join(dst, d)
79
+ for f in files:
80
+ target = os.path.join(dst, f)
81
+ target = progress_filter(src + f, target)
82
+ if not target:
83
+ # skip non-files
84
+ continue
85
+ ensure_directory(target)
86
+ f = os.path.join(base, f)
87
+ shutil.copyfile(f, target)
88
+ shutil.copystat(f, target)
89
+
90
+
91
+ def unpack_zipfile(filename, extract_dir, progress_filter=default_filter):
92
+ """Unpack zip `filename` to `extract_dir`
93
+
94
+ Raises ``UnrecognizedFormat`` if `filename` is not a zipfile (as determined
95
+ by ``zipfile.is_zipfile()``). See ``unpack_archive()`` for an explanation
96
+ of the `progress_filter` argument.
97
+ """
98
+
99
+ if not zipfile.is_zipfile(filename):
100
+ raise UnrecognizedFormat("%s is not a zip file" % (filename,))
101
+
102
+ with zipfile.ZipFile(filename) as z:
103
+ for info in z.infolist():
104
+ name = info.filename
105
+
106
+ # don't extract absolute paths or ones with .. in them
107
+ if name.startswith('/') or '..' in name.split('/'):
108
+ continue
109
+
110
+ target = os.path.join(extract_dir, *name.split('/'))
111
+ target = progress_filter(name, target)
112
+ if not target:
113
+ continue
114
+ if name.endswith('/'):
115
+ # directory
116
+ ensure_directory(target)
117
+ else:
118
+ # file
119
+ ensure_directory(target)
120
+ data = z.read(info.filename)
121
+ with open(target, 'wb') as f:
122
+ f.write(data)
123
+ unix_attributes = info.external_attr >> 16
124
+ if unix_attributes:
125
+ os.chmod(target, unix_attributes)
126
+
127
+
128
+ def _resolve_tar_file_or_dir(tar_obj, tar_member_obj):
129
+ """Resolve any links and extract link targets as normal files."""
130
+ while tar_member_obj is not None and (
131
+ tar_member_obj.islnk() or tar_member_obj.issym()):
132
+ linkpath = tar_member_obj.linkname
133
+ if tar_member_obj.issym():
134
+ base = posixpath.dirname(tar_member_obj.name)
135
+ linkpath = posixpath.join(base, linkpath)
136
+ linkpath = posixpath.normpath(linkpath)
137
+ tar_member_obj = tar_obj._getmember(linkpath)
138
+
139
+ is_file_or_dir = (
140
+ tar_member_obj is not None and
141
+ (tar_member_obj.isfile() or tar_member_obj.isdir())
142
+ )
143
+ if is_file_or_dir:
144
+ return tar_member_obj
145
+
146
+ raise LookupError('Got unknown file type')
147
+
148
+
149
+ def _iter_open_tar(tar_obj, extract_dir, progress_filter):
150
+ """Emit member-destination pairs from a tar archive."""
151
+ # don't do any chowning!
152
+ tar_obj.chown = lambda *args: None
153
+
154
+ with contextlib.closing(tar_obj):
155
+ for member in tar_obj:
156
+ name = member.name
157
+ # don't extract absolute paths or ones with .. in them
158
+ if name.startswith('/') or '..' in name.split('/'):
159
+ continue
160
+
161
+ prelim_dst = os.path.join(extract_dir, *name.split('/'))
162
+
163
+ try:
164
+ member = _resolve_tar_file_or_dir(tar_obj, member)
165
+ except LookupError:
166
+ continue
167
+
168
+ final_dst = progress_filter(name, prelim_dst)
169
+ if not final_dst:
170
+ continue
171
+
172
+ if final_dst.endswith(os.sep):
173
+ final_dst = final_dst[:-1]
174
+
175
+ yield member, final_dst
176
+
177
+
178
+ def unpack_tarfile(filename, extract_dir, progress_filter=default_filter):
179
+ """Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir`
180
+
181
+ Raises ``UnrecognizedFormat`` if `filename` is not a tarfile (as determined
182
+ by ``tarfile.open()``). See ``unpack_archive()`` for an explanation
183
+ of the `progress_filter` argument.
184
+ """
185
+ try:
186
+ tarobj = tarfile.open(filename)
187
+ except tarfile.TarError as e:
188
+ raise UnrecognizedFormat(
189
+ "%s is not a compressed or uncompressed tar file" % (filename,)
190
+ ) from e
191
+
192
+ for member, final_dst in _iter_open_tar(
193
+ tarobj, extract_dir, progress_filter,
194
+ ):
195
+ try:
196
+ # XXX Ugh
197
+ tarobj._extract_member(member, final_dst)
198
+ except tarfile.ExtractError:
199
+ # chown/chmod/mkfifo/mknode/makedev failed
200
+ pass
201
+
202
+ return True
203
+
204
+
205
+ extraction_drivers = unpack_directory, unpack_zipfile, unpack_tarfile
env-llmeval/lib/python3.10/site-packages/setuptools/build_meta.py ADDED
@@ -0,0 +1,290 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """A PEP 517 interface to setuptools
2
+
3
+ Previously, when a user or a command line tool (let's call it a "frontend")
4
+ needed to make a request of setuptools to take a certain action, for
5
+ example, generating a list of installation requirements, the frontend would
6
+ would call "setup.py egg_info" or "setup.py bdist_wheel" on the command line.
7
+
8
+ PEP 517 defines a different method of interfacing with setuptools. Rather
9
+ than calling "setup.py" directly, the frontend should:
10
+
11
+ 1. Set the current directory to the directory with a setup.py file
12
+ 2. Import this module into a safe python interpreter (one in which
13
+ setuptools can potentially set global variables or crash hard).
14
+ 3. Call one of the functions defined in PEP 517.
15
+
16
+ What each function does is defined in PEP 517. However, here is a "casual"
17
+ definition of the functions (this definition should not be relied on for
18
+ bug reports or API stability):
19
+
20
+ - `build_wheel`: build a wheel in the folder and return the basename
21
+ - `get_requires_for_build_wheel`: get the `setup_requires` to build
22
+ - `prepare_metadata_for_build_wheel`: get the `install_requires`
23
+ - `build_sdist`: build an sdist in the folder and return the basename
24
+ - `get_requires_for_build_sdist`: get the `setup_requires` to build
25
+
26
+ Again, this is not a formal definition! Just a "taste" of the module.
27
+ """
28
+
29
+ import io
30
+ import os
31
+ import sys
32
+ import tokenize
33
+ import shutil
34
+ import contextlib
35
+ import tempfile
36
+ import warnings
37
+
38
+ import setuptools
39
+ import distutils
40
+
41
+ from pkg_resources import parse_requirements
42
+
43
+ __all__ = ['get_requires_for_build_sdist',
44
+ 'get_requires_for_build_wheel',
45
+ 'prepare_metadata_for_build_wheel',
46
+ 'build_wheel',
47
+ 'build_sdist',
48
+ '__legacy__',
49
+ 'SetupRequirementsError']
50
+
51
+
52
+ class SetupRequirementsError(BaseException):
53
+ def __init__(self, specifiers):
54
+ self.specifiers = specifiers
55
+
56
+
57
+ class Distribution(setuptools.dist.Distribution):
58
+ def fetch_build_eggs(self, specifiers):
59
+ specifier_list = list(map(str, parse_requirements(specifiers)))
60
+
61
+ raise SetupRequirementsError(specifier_list)
62
+
63
+ @classmethod
64
+ @contextlib.contextmanager
65
+ def patch(cls):
66
+ """
67
+ Replace
68
+ distutils.dist.Distribution with this class
69
+ for the duration of this context.
70
+ """
71
+ orig = distutils.core.Distribution
72
+ distutils.core.Distribution = cls
73
+ try:
74
+ yield
75
+ finally:
76
+ distutils.core.Distribution = orig
77
+
78
+
79
+ @contextlib.contextmanager
80
+ def no_install_setup_requires():
81
+ """Temporarily disable installing setup_requires
82
+
83
+ Under PEP 517, the backend reports build dependencies to the frontend,
84
+ and the frontend is responsible for ensuring they're installed.
85
+ So setuptools (acting as a backend) should not try to install them.
86
+ """
87
+ orig = setuptools._install_setup_requires
88
+ setuptools._install_setup_requires = lambda attrs: None
89
+ try:
90
+ yield
91
+ finally:
92
+ setuptools._install_setup_requires = orig
93
+
94
+
95
+ def _get_immediate_subdirectories(a_dir):
96
+ return [name for name in os.listdir(a_dir)
97
+ if os.path.isdir(os.path.join(a_dir, name))]
98
+
99
+
100
+ def _file_with_extension(directory, extension):
101
+ matching = (
102
+ f for f in os.listdir(directory)
103
+ if f.endswith(extension)
104
+ )
105
+ try:
106
+ file, = matching
107
+ except ValueError:
108
+ raise ValueError(
109
+ 'No distribution was found. Ensure that `setup.py` '
110
+ 'is not empty and that it calls `setup()`.')
111
+ return file
112
+
113
+
114
+ def _open_setup_script(setup_script):
115
+ if not os.path.exists(setup_script):
116
+ # Supply a default setup.py
117
+ return io.StringIO(u"from setuptools import setup; setup()")
118
+
119
+ return getattr(tokenize, 'open', open)(setup_script)
120
+
121
+
122
+ @contextlib.contextmanager
123
+ def suppress_known_deprecation():
124
+ with warnings.catch_warnings():
125
+ warnings.filterwarnings('ignore', 'setup.py install is deprecated')
126
+ yield
127
+
128
+
129
+ class _BuildMetaBackend(object):
130
+
131
+ def _fix_config(self, config_settings):
132
+ config_settings = config_settings or {}
133
+ config_settings.setdefault('--global-option', [])
134
+ return config_settings
135
+
136
+ def _get_build_requires(self, config_settings, requirements):
137
+ config_settings = self._fix_config(config_settings)
138
+
139
+ sys.argv = sys.argv[:1] + ['egg_info'] + \
140
+ config_settings["--global-option"]
141
+ try:
142
+ with Distribution.patch():
143
+ self.run_setup()
144
+ except SetupRequirementsError as e:
145
+ requirements += e.specifiers
146
+
147
+ return requirements
148
+
149
+ def run_setup(self, setup_script='setup.py'):
150
+ # Note that we can reuse our build directory between calls
151
+ # Correctness comes first, then optimization later
152
+ __file__ = setup_script
153
+ __name__ = '__main__'
154
+
155
+ with _open_setup_script(__file__) as f:
156
+ code = f.read().replace(r'\r\n', r'\n')
157
+
158
+ exec(compile(code, __file__, 'exec'), locals())
159
+
160
+ def get_requires_for_build_wheel(self, config_settings=None):
161
+ config_settings = self._fix_config(config_settings)
162
+ return self._get_build_requires(
163
+ config_settings, requirements=['wheel'])
164
+
165
+ def get_requires_for_build_sdist(self, config_settings=None):
166
+ config_settings = self._fix_config(config_settings)
167
+ return self._get_build_requires(config_settings, requirements=[])
168
+
169
+ def prepare_metadata_for_build_wheel(self, metadata_directory,
170
+ config_settings=None):
171
+ sys.argv = sys.argv[:1] + [
172
+ 'dist_info', '--egg-base', metadata_directory]
173
+ with no_install_setup_requires():
174
+ self.run_setup()
175
+
176
+ dist_info_directory = metadata_directory
177
+ while True:
178
+ dist_infos = [f for f in os.listdir(dist_info_directory)
179
+ if f.endswith('.dist-info')]
180
+
181
+ if (
182
+ len(dist_infos) == 0 and
183
+ len(_get_immediate_subdirectories(dist_info_directory)) == 1
184
+ ):
185
+
186
+ dist_info_directory = os.path.join(
187
+ dist_info_directory, os.listdir(dist_info_directory)[0])
188
+ continue
189
+
190
+ assert len(dist_infos) == 1
191
+ break
192
+
193
+ # PEP 517 requires that the .dist-info directory be placed in the
194
+ # metadata_directory. To comply, we MUST copy the directory to the root
195
+ if dist_info_directory != metadata_directory:
196
+ shutil.move(
197
+ os.path.join(dist_info_directory, dist_infos[0]),
198
+ metadata_directory)
199
+ shutil.rmtree(dist_info_directory, ignore_errors=True)
200
+
201
+ return dist_infos[0]
202
+
203
+ def _build_with_temp_dir(self, setup_command, result_extension,
204
+ result_directory, config_settings):
205
+ config_settings = self._fix_config(config_settings)
206
+ result_directory = os.path.abspath(result_directory)
207
+
208
+ # Build in a temporary directory, then copy to the target.
209
+ os.makedirs(result_directory, exist_ok=True)
210
+ with tempfile.TemporaryDirectory(dir=result_directory) as tmp_dist_dir:
211
+ sys.argv = (sys.argv[:1] + setup_command +
212
+ ['--dist-dir', tmp_dist_dir] +
213
+ config_settings["--global-option"])
214
+ with no_install_setup_requires():
215
+ self.run_setup()
216
+
217
+ result_basename = _file_with_extension(
218
+ tmp_dist_dir, result_extension)
219
+ result_path = os.path.join(result_directory, result_basename)
220
+ if os.path.exists(result_path):
221
+ # os.rename will fail overwriting on non-Unix.
222
+ os.remove(result_path)
223
+ os.rename(os.path.join(tmp_dist_dir, result_basename), result_path)
224
+
225
+ return result_basename
226
+
227
+ def build_wheel(self, wheel_directory, config_settings=None,
228
+ metadata_directory=None):
229
+ with suppress_known_deprecation():
230
+ return self._build_with_temp_dir(['bdist_wheel'], '.whl',
231
+ wheel_directory, config_settings)
232
+
233
+ def build_sdist(self, sdist_directory, config_settings=None):
234
+ return self._build_with_temp_dir(['sdist', '--formats', 'gztar'],
235
+ '.tar.gz', sdist_directory,
236
+ config_settings)
237
+
238
+
239
+ class _BuildMetaLegacyBackend(_BuildMetaBackend):
240
+ """Compatibility backend for setuptools
241
+
242
+ This is a version of setuptools.build_meta that endeavors
243
+ to maintain backwards
244
+ compatibility with pre-PEP 517 modes of invocation. It
245
+ exists as a temporary
246
+ bridge between the old packaging mechanism and the new
247
+ packaging mechanism,
248
+ and will eventually be removed.
249
+ """
250
+ def run_setup(self, setup_script='setup.py'):
251
+ # In order to maintain compatibility with scripts assuming that
252
+ # the setup.py script is in a directory on the PYTHONPATH, inject
253
+ # '' into sys.path. (pypa/setuptools#1642)
254
+ sys_path = list(sys.path) # Save the original path
255
+
256
+ script_dir = os.path.dirname(os.path.abspath(setup_script))
257
+ if script_dir not in sys.path:
258
+ sys.path.insert(0, script_dir)
259
+
260
+ # Some setup.py scripts (e.g. in pygame and numpy) use sys.argv[0] to
261
+ # get the directory of the source code. They expect it to refer to the
262
+ # setup.py script.
263
+ sys_argv_0 = sys.argv[0]
264
+ sys.argv[0] = setup_script
265
+
266
+ try:
267
+ super(_BuildMetaLegacyBackend,
268
+ self).run_setup(setup_script=setup_script)
269
+ finally:
270
+ # While PEP 517 frontends should be calling each hook in a fresh
271
+ # subprocess according to the standard (and thus it should not be
272
+ # strictly necessary to restore the old sys.path), we'll restore
273
+ # the original path so that the path manipulation does not persist
274
+ # within the hook after run_setup is called.
275
+ sys.path[:] = sys_path
276
+ sys.argv[0] = sys_argv_0
277
+
278
+
279
+ # The primary backend
280
+ _BACKEND = _BuildMetaBackend()
281
+
282
+ get_requires_for_build_wheel = _BACKEND.get_requires_for_build_wheel
283
+ get_requires_for_build_sdist = _BACKEND.get_requires_for_build_sdist
284
+ prepare_metadata_for_build_wheel = _BACKEND.prepare_metadata_for_build_wheel
285
+ build_wheel = _BACKEND.build_wheel
286
+ build_sdist = _BACKEND.build_sdist
287
+
288
+
289
+ # The legacy backend
290
+ __legacy__ = _BuildMetaLegacyBackend()
env-llmeval/lib/python3.10/site-packages/setuptools/cli-32.exe ADDED
Binary file (65.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/setuptools/cli-arm64.exe ADDED
Binary file (137 kB). View file
 
env-llmeval/lib/python3.10/site-packages/setuptools/cli.exe ADDED
Binary file (65.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/setuptools/errors.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """setuptools.errors
2
+
3
+ Provides exceptions used by setuptools modules.
4
+ """
5
+
6
+ from distutils import errors as _distutils_errors
7
+ from distutils.errors import DistutilsError
8
+
9
+
10
+ class RemovedCommandError(DistutilsError, RuntimeError):
11
+ """Error used for commands that have been removed in setuptools.
12
+
13
+ Since ``setuptools`` is built on ``distutils``, simply removing a command
14
+ from ``setuptools`` will make the behavior fall back to ``distutils``; this
15
+ error is raised if a command exists in ``distutils`` but has been actively
16
+ removed in ``setuptools``.
17
+ """
18
+
19
+
20
+ # Re-export errors from distutils to facilitate the migration to PEP632
21
+
22
+ ByteCompileError = _distutils_errors.DistutilsByteCompileError
23
+ CCompilerError = _distutils_errors.CCompilerError
24
+ ClassError = _distutils_errors.DistutilsClassError
25
+ CompileError = _distutils_errors.CompileError
26
+ ExecError = _distutils_errors.DistutilsExecError
27
+ FileError = _distutils_errors.DistutilsFileError
28
+ InternalError = _distutils_errors.DistutilsInternalError
29
+ LibError = _distutils_errors.LibError
30
+ LinkError = _distutils_errors.LinkError
31
+ ModuleError = _distutils_errors.DistutilsModuleError
32
+ OptionError = _distutils_errors.DistutilsOptionError
33
+ PlatformError = _distutils_errors.DistutilsPlatformError
34
+ PreprocessError = _distutils_errors.PreprocessError
35
+ SetupError = _distutils_errors.DistutilsSetupError
36
+ TemplateError = _distutils_errors.DistutilsTemplateError
37
+ UnknownFileError = _distutils_errors.UnknownFileError
38
+
39
+ # The root error class in the hierarchy
40
+ BaseError = _distutils_errors.DistutilsError
env-llmeval/lib/python3.10/site-packages/setuptools/extension.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import functools
3
+ import distutils.core
4
+ import distutils.errors
5
+ import distutils.extension
6
+
7
+ from .monkey import get_unpatched
8
+
9
+
10
+ def _have_cython():
11
+ """
12
+ Return True if Cython can be imported.
13
+ """
14
+ cython_impl = 'Cython.Distutils.build_ext'
15
+ try:
16
+ # from (cython_impl) import build_ext
17
+ __import__(cython_impl, fromlist=['build_ext']).build_ext
18
+ return True
19
+ except Exception:
20
+ pass
21
+ return False
22
+
23
+
24
+ # for compatibility
25
+ have_pyrex = _have_cython
26
+
27
+ _Extension = get_unpatched(distutils.core.Extension)
28
+
29
+
30
+ class Extension(_Extension):
31
+ """Extension that uses '.c' files in place of '.pyx' files"""
32
+
33
+ def __init__(self, name, sources, *args, **kw):
34
+ # The *args is needed for compatibility as calls may use positional
35
+ # arguments. py_limited_api may be set only via keyword.
36
+ self.py_limited_api = kw.pop("py_limited_api", False)
37
+ _Extension.__init__(self, name, sources, *args, **kw)
38
+
39
+ def _convert_pyx_sources_to_lang(self):
40
+ """
41
+ Replace sources with .pyx extensions to sources with the target
42
+ language extension. This mechanism allows language authors to supply
43
+ pre-converted sources but to prefer the .pyx sources.
44
+ """
45
+ if _have_cython():
46
+ # the build has Cython, so allow it to compile the .pyx files
47
+ return
48
+ lang = self.language or ''
49
+ target_ext = '.cpp' if lang.lower() == 'c++' else '.c'
50
+ sub = functools.partial(re.sub, '.pyx$', target_ext)
51
+ self.sources = list(map(sub, self.sources))
52
+
53
+
54
+ class Library(Extension):
55
+ """Just like a regular Extension, but built as a library instead"""
env-llmeval/lib/python3.10/site-packages/setuptools/extern/__init__.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib.util
2
+ import sys
3
+
4
+
5
+ class VendorImporter:
6
+ """
7
+ A PEP 302 meta path importer for finding optionally-vendored
8
+ or otherwise naturally-installed packages from root_name.
9
+ """
10
+
11
+ def __init__(self, root_name, vendored_names=(), vendor_pkg=None):
12
+ self.root_name = root_name
13
+ self.vendored_names = set(vendored_names)
14
+ self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor')
15
+
16
+ @property
17
+ def search_path(self):
18
+ """
19
+ Search first the vendor package then as a natural package.
20
+ """
21
+ yield self.vendor_pkg + '.'
22
+ yield ''
23
+
24
+ def _module_matches_namespace(self, fullname):
25
+ """Figure out if the target module is vendored."""
26
+ root, base, target = fullname.partition(self.root_name + '.')
27
+ return not root and any(map(target.startswith, self.vendored_names))
28
+
29
+ def load_module(self, fullname):
30
+ """
31
+ Iterate over the search path to locate and load fullname.
32
+ """
33
+ root, base, target = fullname.partition(self.root_name + '.')
34
+ for prefix in self.search_path:
35
+ try:
36
+ extant = prefix + target
37
+ __import__(extant)
38
+ mod = sys.modules[extant]
39
+ sys.modules[fullname] = mod
40
+ return mod
41
+ except ImportError:
42
+ pass
43
+ else:
44
+ raise ImportError(
45
+ "The '{target}' package is required; "
46
+ "normally this is bundled with this package so if you get "
47
+ "this warning, consult the packager of your "
48
+ "distribution.".format(**locals())
49
+ )
50
+
51
+ def create_module(self, spec):
52
+ return self.load_module(spec.name)
53
+
54
+ def exec_module(self, module):
55
+ pass
56
+
57
+ def find_spec(self, fullname, path=None, target=None):
58
+ """Return a module spec for vendored names."""
59
+ return (
60
+ importlib.util.spec_from_loader(fullname, self)
61
+ if self._module_matches_namespace(fullname) else None
62
+ )
63
+
64
+ def install(self):
65
+ """
66
+ Install this importer into sys.meta_path if not already present.
67
+ """
68
+ if self not in sys.meta_path:
69
+ sys.meta_path.append(self)
70
+
71
+
72
+ names = 'packaging', 'pyparsing', 'ordered_set', 'more_itertools',
73
+ VendorImporter(__name__, names, 'setuptools._vendor').install()
env-llmeval/lib/python3.10/site-packages/setuptools/gui-32.exe ADDED
Binary file (65.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/setuptools/gui-64.exe ADDED
Binary file (75.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/setuptools/gui.exe ADDED
Binary file (65.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/setuptools/installer.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import os
3
+ import subprocess
4
+ import sys
5
+ import tempfile
6
+ import warnings
7
+ from distutils import log
8
+ from distutils.errors import DistutilsError
9
+
10
+ import pkg_resources
11
+ from setuptools.wheel import Wheel
12
+ from ._deprecation_warning import SetuptoolsDeprecationWarning
13
+
14
+
15
+ def _fixup_find_links(find_links):
16
+ """Ensure find-links option end-up being a list of strings."""
17
+ if isinstance(find_links, str):
18
+ return find_links.split()
19
+ assert isinstance(find_links, (tuple, list))
20
+ return find_links
21
+
22
+
23
+ def fetch_build_egg(dist, req): # noqa: C901 # is too complex (16) # FIXME
24
+ """Fetch an egg needed for building.
25
+
26
+ Use pip/wheel to fetch/build a wheel."""
27
+ warnings.warn(
28
+ "setuptools.installer is deprecated. Requirements should "
29
+ "be satisfied by a PEP 517 installer.",
30
+ SetuptoolsDeprecationWarning,
31
+ )
32
+ # Warn if wheel is not available
33
+ try:
34
+ pkg_resources.get_distribution('wheel')
35
+ except pkg_resources.DistributionNotFound:
36
+ dist.announce('WARNING: The wheel package is not available.', log.WARN)
37
+ # Ignore environment markers; if supplied, it is required.
38
+ req = strip_marker(req)
39
+ # Take easy_install options into account, but do not override relevant
40
+ # pip environment variables (like PIP_INDEX_URL or PIP_QUIET); they'll
41
+ # take precedence.
42
+ opts = dist.get_option_dict('easy_install')
43
+ if 'allow_hosts' in opts:
44
+ raise DistutilsError('the `allow-hosts` option is not supported '
45
+ 'when using pip to install requirements.')
46
+ quiet = 'PIP_QUIET' not in os.environ and 'PIP_VERBOSE' not in os.environ
47
+ if 'PIP_INDEX_URL' in os.environ:
48
+ index_url = None
49
+ elif 'index_url' in opts:
50
+ index_url = opts['index_url'][1]
51
+ else:
52
+ index_url = None
53
+ find_links = (
54
+ _fixup_find_links(opts['find_links'][1])[:] if 'find_links' in opts
55
+ else []
56
+ )
57
+ if dist.dependency_links:
58
+ find_links.extend(dist.dependency_links)
59
+ eggs_dir = os.path.realpath(dist.get_egg_cache_dir())
60
+ environment = pkg_resources.Environment()
61
+ for egg_dist in pkg_resources.find_distributions(eggs_dir):
62
+ if egg_dist in req and environment.can_add(egg_dist):
63
+ return egg_dist
64
+ with tempfile.TemporaryDirectory() as tmpdir:
65
+ cmd = [
66
+ sys.executable, '-m', 'pip',
67
+ '--disable-pip-version-check',
68
+ 'wheel', '--no-deps',
69
+ '-w', tmpdir,
70
+ ]
71
+ if quiet:
72
+ cmd.append('--quiet')
73
+ if index_url is not None:
74
+ cmd.extend(('--index-url', index_url))
75
+ for link in find_links or []:
76
+ cmd.extend(('--find-links', link))
77
+ # If requirement is a PEP 508 direct URL, directly pass
78
+ # the URL to pip, as `req @ url` does not work on the
79
+ # command line.
80
+ cmd.append(req.url or str(req))
81
+ try:
82
+ subprocess.check_call(cmd)
83
+ except subprocess.CalledProcessError as e:
84
+ raise DistutilsError(str(e)) from e
85
+ wheel = Wheel(glob.glob(os.path.join(tmpdir, '*.whl'))[0])
86
+ dist_location = os.path.join(eggs_dir, wheel.egg_name())
87
+ wheel.install_as_egg(dist_location)
88
+ dist_metadata = pkg_resources.PathMetadata(
89
+ dist_location, os.path.join(dist_location, 'EGG-INFO'))
90
+ dist = pkg_resources.Distribution.from_filename(
91
+ dist_location, metadata=dist_metadata)
92
+ return dist
93
+
94
+
95
+ def strip_marker(req):
96
+ """
97
+ Return a new requirement without the environment marker to avoid
98
+ calling pip with something like `babel; extra == "i18n"`, which
99
+ would always be ignored.
100
+ """
101
+ # create a copy to avoid mutating the input
102
+ req = pkg_resources.Requirement.parse(str(req))
103
+ req.marker = None
104
+ return req
env-llmeval/lib/python3.10/site-packages/setuptools/launch.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Launch the Python script on the command line after
3
+ setuptools is bootstrapped via import.
4
+ """
5
+
6
+ # Note that setuptools gets imported implicitly by the
7
+ # invocation of this script using python -m setuptools.launch
8
+
9
+ import tokenize
10
+ import sys
11
+
12
+
13
+ def run():
14
+ """
15
+ Run the script in sys.argv[1] as if it had
16
+ been invoked naturally.
17
+ """
18
+ __builtins__
19
+ script_name = sys.argv[1]
20
+ namespace = dict(
21
+ __file__=script_name,
22
+ __name__='__main__',
23
+ __doc__=None,
24
+ )
25
+ sys.argv[:] = sys.argv[1:]
26
+
27
+ open_ = getattr(tokenize, 'open', open)
28
+ with open_(script_name) as fid:
29
+ script = fid.read()
30
+ norm_script = script.replace('\\r\\n', '\\n')
31
+ code = compile(norm_script, script_name, 'exec')
32
+ exec(code, namespace)
33
+
34
+
35
+ if __name__ == '__main__':
36
+ run()
env-llmeval/lib/python3.10/site-packages/setuptools/monkey.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Monkey patching of distutils.
3
+ """
4
+
5
+ import sys
6
+ import distutils.filelist
7
+ import platform
8
+ import types
9
+ import functools
10
+ from importlib import import_module
11
+ import inspect
12
+
13
+ import setuptools
14
+
15
+ __all__ = []
16
+ """
17
+ Everything is private. Contact the project team
18
+ if you think you need this functionality.
19
+ """
20
+
21
+
22
+ def _get_mro(cls):
23
+ """
24
+ Returns the bases classes for cls sorted by the MRO.
25
+
26
+ Works around an issue on Jython where inspect.getmro will not return all
27
+ base classes if multiple classes share the same name. Instead, this
28
+ function will return a tuple containing the class itself, and the contents
29
+ of cls.__bases__. See https://github.com/pypa/setuptools/issues/1024.
30
+ """
31
+ if platform.python_implementation() == "Jython":
32
+ return (cls,) + cls.__bases__
33
+ return inspect.getmro(cls)
34
+
35
+
36
+ def get_unpatched(item):
37
+ lookup = (
38
+ get_unpatched_class if isinstance(item, type) else
39
+ get_unpatched_function if isinstance(item, types.FunctionType) else
40
+ lambda item: None
41
+ )
42
+ return lookup(item)
43
+
44
+
45
+ def get_unpatched_class(cls):
46
+ """Protect against re-patching the distutils if reloaded
47
+
48
+ Also ensures that no other distutils extension monkeypatched the distutils
49
+ first.
50
+ """
51
+ external_bases = (
52
+ cls
53
+ for cls in _get_mro(cls)
54
+ if not cls.__module__.startswith('setuptools')
55
+ )
56
+ base = next(external_bases)
57
+ if not base.__module__.startswith('distutils'):
58
+ msg = "distutils has already been patched by %r" % cls
59
+ raise AssertionError(msg)
60
+ return base
61
+
62
+
63
+ def patch_all():
64
+ # we can't patch distutils.cmd, alas
65
+ distutils.core.Command = setuptools.Command
66
+
67
+ has_issue_12885 = sys.version_info <= (3, 5, 3)
68
+
69
+ if has_issue_12885:
70
+ # fix findall bug in distutils (http://bugs.python.org/issue12885)
71
+ distutils.filelist.findall = setuptools.findall
72
+
73
+ needs_warehouse = (
74
+ sys.version_info < (2, 7, 13)
75
+ or
76
+ (3, 4) < sys.version_info < (3, 4, 6)
77
+ or
78
+ (3, 5) < sys.version_info <= (3, 5, 3)
79
+ )
80
+
81
+ if needs_warehouse:
82
+ warehouse = 'https://upload.pypi.org/legacy/'
83
+ distutils.config.PyPIRCCommand.DEFAULT_REPOSITORY = warehouse
84
+
85
+ _patch_distribution_metadata()
86
+
87
+ # Install Distribution throughout the distutils
88
+ for module in distutils.dist, distutils.core, distutils.cmd:
89
+ module.Distribution = setuptools.dist.Distribution
90
+
91
+ # Install the patched Extension
92
+ distutils.core.Extension = setuptools.extension.Extension
93
+ distutils.extension.Extension = setuptools.extension.Extension
94
+ if 'distutils.command.build_ext' in sys.modules:
95
+ sys.modules['distutils.command.build_ext'].Extension = (
96
+ setuptools.extension.Extension
97
+ )
98
+
99
+ patch_for_msvc_specialized_compiler()
100
+
101
+
102
+ def _patch_distribution_metadata():
103
+ """Patch write_pkg_file and read_pkg_file for higher metadata standards"""
104
+ for attr in ('write_pkg_file', 'read_pkg_file', 'get_metadata_version'):
105
+ new_val = getattr(setuptools.dist, attr)
106
+ setattr(distutils.dist.DistributionMetadata, attr, new_val)
107
+
108
+
109
+ def patch_func(replacement, target_mod, func_name):
110
+ """
111
+ Patch func_name in target_mod with replacement
112
+
113
+ Important - original must be resolved by name to avoid
114
+ patching an already patched function.
115
+ """
116
+ original = getattr(target_mod, func_name)
117
+
118
+ # set the 'unpatched' attribute on the replacement to
119
+ # point to the original.
120
+ vars(replacement).setdefault('unpatched', original)
121
+
122
+ # replace the function in the original module
123
+ setattr(target_mod, func_name, replacement)
124
+
125
+
126
+ def get_unpatched_function(candidate):
127
+ return getattr(candidate, 'unpatched')
128
+
129
+
130
+ def patch_for_msvc_specialized_compiler():
131
+ """
132
+ Patch functions in distutils to use standalone Microsoft Visual C++
133
+ compilers.
134
+ """
135
+ # import late to avoid circular imports on Python < 3.5
136
+ msvc = import_module('setuptools.msvc')
137
+
138
+ if platform.system() != 'Windows':
139
+ # Compilers only available on Microsoft Windows
140
+ return
141
+
142
+ def patch_params(mod_name, func_name):
143
+ """
144
+ Prepare the parameters for patch_func to patch indicated function.
145
+ """
146
+ repl_prefix = 'msvc9_' if 'msvc9' in mod_name else 'msvc14_'
147
+ repl_name = repl_prefix + func_name.lstrip('_')
148
+ repl = getattr(msvc, repl_name)
149
+ mod = import_module(mod_name)
150
+ if not hasattr(mod, func_name):
151
+ raise ImportError(func_name)
152
+ return repl, mod, func_name
153
+
154
+ # Python 2.7 to 3.4
155
+ msvc9 = functools.partial(patch_params, 'distutils.msvc9compiler')
156
+
157
+ # Python 3.5+
158
+ msvc14 = functools.partial(patch_params, 'distutils._msvccompiler')
159
+
160
+ try:
161
+ # Patch distutils.msvc9compiler
162
+ patch_func(*msvc9('find_vcvarsall'))
163
+ patch_func(*msvc9('query_vcvarsall'))
164
+ except ImportError:
165
+ pass
166
+
167
+ try:
168
+ # Patch distutils._msvccompiler._get_vc_env
169
+ patch_func(*msvc14('_get_vc_env'))
170
+ except ImportError:
171
+ pass
172
+
173
+ try:
174
+ # Patch distutils._msvccompiler.gen_lib_options for Numpy
175
+ patch_func(*msvc14('gen_lib_options'))
176
+ except ImportError:
177
+ pass