diff --git a/env-llmeval/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/INSTALLER b/env-llmeval/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/env-llmeval/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/LICENSE b/env-llmeval/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..7082a2d5b9047bfc09589f387053e24ea490bc54 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/LICENSE @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2013-2019 Nikolay Kim and Andrew Svetlov + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/env-llmeval/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/METADATA b/env-llmeval/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..fc964525f05e8e34961f0398b1930b8dec64ef26 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/METADATA @@ -0,0 +1,128 @@ +Metadata-Version: 2.1 +Name: aiosignal +Version: 1.3.1 +Summary: aiosignal: a list of registered asynchronous callbacks +Home-page: https://github.com/aio-libs/aiosignal +Maintainer: aiohttp team +Maintainer-email: team@aiohttp.org +License: Apache 2.0 +Project-URL: Chat: Gitter, https://gitter.im/aio-libs/Lobby +Project-URL: CI: GitHub Actions, https://github.com/aio-libs/aiosignal/actions +Project-URL: Coverage: codecov, https://codecov.io/github/aio-libs/aiosignal +Project-URL: Docs: RTD, https://docs.aiosignal.org +Project-URL: GitHub: issues, https://github.com/aio-libs/aiosignal/issues +Project-URL: GitHub: repo, https://github.com/aio-libs/aiosignal +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Intended Audience :: Developers +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Development Status :: 5 - Production/Stable +Classifier: Operating System :: POSIX +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Operating System :: Microsoft :: Windows +Classifier: Framework :: AsyncIO +Requires-Python: >=3.7 +Description-Content-Type: text/x-rst +License-File: LICENSE +Requires-Dist: frozenlist (>=1.1.0) + +========= +aiosignal +========= + +.. image:: https://github.com/aio-libs/aiosignal/workflows/CI/badge.svg + :target: https://github.com/aio-libs/aiosignal/actions?query=workflow%3ACI + :alt: GitHub status for master branch + +.. image:: https://codecov.io/gh/aio-libs/aiosignal/branch/master/graph/badge.svg + :target: https://codecov.io/gh/aio-libs/aiosignal + :alt: codecov.io status for master branch + +.. image:: https://badge.fury.io/py/aiosignal.svg + :target: https://pypi.org/project/aiosignal + :alt: Latest PyPI package version + +.. image:: https://readthedocs.org/projects/aiosignal/badge/?version=latest + :target: https://aiosignal.readthedocs.io/ + :alt: Latest Read The Docs + +.. image:: https://img.shields.io/discourse/topics?server=https%3A%2F%2Faio-libs.discourse.group%2F + :target: https://aio-libs.discourse.group/ + :alt: Discourse group for io-libs + +.. image:: https://badges.gitter.im/Join%20Chat.svg + :target: https://gitter.im/aio-libs/Lobby + :alt: Chat on Gitter + +Introduction +============ + +A project to manage callbacks in `asyncio` projects. + +``Signal`` is a list of registered asynchronous callbacks. + +The signal's life-cycle has two stages: after creation its content +could be filled by using standard list operations: ``sig.append()`` +etc. + +After you call ``sig.freeze()`` the signal is *frozen*: adding, removing +and dropping callbacks is forbidden. + +The only available operation is calling the previously registered +callbacks by using ``await sig.send(data)``. + +For concrete usage examples see the `Signals + +section of the `Web Server Advanced +` chapter of the `aiohttp +documentation`_. + + +Installation +------------ + +:: + + $ pip install aiosignal + +The library requires Python 3.6 or newer. + + +Documentation +============= + +https://aiosignal.readthedocs.io/ + +Communication channels +====================== + +*gitter chat* https://gitter.im/aio-libs/Lobby + +Requirements +============ + +- Python >= 3.6 +- frozenlist >= 1.0.0 + +License +======= + +``aiosignal`` is offered under the Apache 2 license. + +Source code +=========== + +The project is hosted on GitHub_ + +Please file an issue in the `bug tracker +`_ if you have found a bug +or have some suggestions to improve the library. + +.. _GitHub: https://github.com/aio-libs/aiosignal +.. _aiohttp documentation: https://docs.aiohttp.org/ diff --git a/env-llmeval/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/RECORD b/env-llmeval/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..ea4440e8647550ac24f031b8852c3d3c20ed5fe5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/RECORD @@ -0,0 +1,10 @@ +aiosignal-1.3.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +aiosignal-1.3.1.dist-info/LICENSE,sha256=b9UkPpLdf5jsacesN3co50kFcJ_1J6W_mNbQJjwE9bY,11332 +aiosignal-1.3.1.dist-info/METADATA,sha256=c0HRnlYzfXKztZPTFDlPfygizTherhG5WdwXlvco0Ug,4008 +aiosignal-1.3.1.dist-info/RECORD,, +aiosignal-1.3.1.dist-info/WHEEL,sha256=ZL1lC_LiPDNRgDnOl2taCMc83aPEUZgHHv2h-LDgdiM,92 +aiosignal-1.3.1.dist-info/top_level.txt,sha256=z45aNOKGDdrI1roqZY3BGXQ22kJFPHBmVdwtLYLtXC0,10 +aiosignal/__init__.py,sha256=zQNfFYRSd84bswvpFv8ZWjEr5DeYwV3LXbMSyo2222s,867 +aiosignal/__init__.pyi,sha256=xeCddYSS8fZAkz8S4HuKSR2IDe3N7RW_LKcXDPPA1Xk,311 +aiosignal/__pycache__/__init__.cpython-310.pyc,, +aiosignal/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/env-llmeval/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/WHEEL b/env-llmeval/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..5e1f087ca1ac49327ef76b101df80489a03c2e7f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.38.2) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/env-llmeval/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/top_level.txt b/env-llmeval/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..ac6df3afe74a5fd43afc7ab7f8393571a495fdc5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/top_level.txt @@ -0,0 +1 @@ +aiosignal diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/test_counter.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/test_counter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..02fab1dcaf3149313eab4b31829b707f6cd28581 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/test_counter.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/test_models.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/test_models.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..190e441325edc7d4c81e91d2120c1cc29fcd7a20 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/test/unit/lm/__pycache__/test_models.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/test/unit/translate/__init__.py b/env-llmeval/lib/python3.10/site-packages/nltk/test/unit/translate/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/test/unit/translate/test_bleu.py b/env-llmeval/lib/python3.10/site-packages/nltk/test/unit/translate/test_bleu.py new file mode 100644 index 0000000000000000000000000000000000000000..8fa1e07903036885be24a23392ea68c16065dfde --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/test/unit/translate/test_bleu.py @@ -0,0 +1,405 @@ +""" +Tests for BLEU translation evaluation metric +""" + +import io +import unittest + +from nltk.data import find +from nltk.translate.bleu_score import ( + SmoothingFunction, + brevity_penalty, + closest_ref_length, + corpus_bleu, + modified_precision, + sentence_bleu, +) + + +class TestBLEU(unittest.TestCase): + def test_modified_precision(self): + """ + Examples from the original BLEU paper + https://www.aclweb.org/anthology/P02-1040.pdf + """ + # Example 1: the "the*" example. + # Reference sentences. + ref1 = "the cat is on the mat".split() + ref2 = "there is a cat on the mat".split() + # Hypothesis sentence(s). + hyp1 = "the the the the the the the".split() + + references = [ref1, ref2] + + # Testing modified unigram precision. + hyp1_unigram_precision = float(modified_precision(references, hyp1, n=1)) + assert round(hyp1_unigram_precision, 4) == 0.2857 + # With assertAlmostEqual at 4 place precision. + self.assertAlmostEqual(hyp1_unigram_precision, 0.28571428, places=4) + + # Testing modified bigram precision. + assert float(modified_precision(references, hyp1, n=2)) == 0.0 + + # Example 2: the "of the" example. + # Reference sentences + ref1 = str( + "It is a guide to action that ensures that the military " + "will forever heed Party commands" + ).split() + ref2 = str( + "It is the guiding principle which guarantees the military " + "forces always being under the command of the Party" + ).split() + ref3 = str( + "It is the practical guide for the army always to heed " + "the directions of the party" + ).split() + # Hypothesis sentence(s). + hyp1 = "of the".split() + + references = [ref1, ref2, ref3] + # Testing modified unigram precision. + assert float(modified_precision(references, hyp1, n=1)) == 1.0 + + # Testing modified bigram precision. + assert float(modified_precision(references, hyp1, n=2)) == 1.0 + + # Example 3: Proper MT outputs. + hyp1 = str( + "It is a guide to action which ensures that the military " + "always obeys the commands of the party" + ).split() + hyp2 = str( + "It is to insure the troops forever hearing the activity " + "guidebook that party direct" + ).split() + + references = [ref1, ref2, ref3] + + # Unigram precision. + hyp1_unigram_precision = float(modified_precision(references, hyp1, n=1)) + hyp2_unigram_precision = float(modified_precision(references, hyp2, n=1)) + # Test unigram precision with assertAlmostEqual at 4 place precision. + self.assertAlmostEqual(hyp1_unigram_precision, 0.94444444, places=4) + self.assertAlmostEqual(hyp2_unigram_precision, 0.57142857, places=4) + # Test unigram precision with rounding. + assert round(hyp1_unigram_precision, 4) == 0.9444 + assert round(hyp2_unigram_precision, 4) == 0.5714 + + # Bigram precision + hyp1_bigram_precision = float(modified_precision(references, hyp1, n=2)) + hyp2_bigram_precision = float(modified_precision(references, hyp2, n=2)) + # Test bigram precision with assertAlmostEqual at 4 place precision. + self.assertAlmostEqual(hyp1_bigram_precision, 0.58823529, places=4) + self.assertAlmostEqual(hyp2_bigram_precision, 0.07692307, places=4) + # Test bigram precision with rounding. + assert round(hyp1_bigram_precision, 4) == 0.5882 + assert round(hyp2_bigram_precision, 4) == 0.0769 + + def test_brevity_penalty(self): + # Test case from brevity_penalty_closest function in mteval-v13a.pl. + # Same test cases as in the doctest in nltk.translate.bleu_score.py + references = [["a"] * 11, ["a"] * 8] + hypothesis = ["a"] * 7 + hyp_len = len(hypothesis) + closest_ref_len = closest_ref_length(references, hyp_len) + self.assertAlmostEqual( + brevity_penalty(closest_ref_len, hyp_len), 0.8669, places=4 + ) + + references = [["a"] * 11, ["a"] * 8, ["a"] * 6, ["a"] * 7] + hypothesis = ["a"] * 7 + hyp_len = len(hypothesis) + closest_ref_len = closest_ref_length(references, hyp_len) + assert brevity_penalty(closest_ref_len, hyp_len) == 1.0 + + def test_zero_matches(self): + # Test case where there's 0 matches + references = ["The candidate has no alignment to any of the references".split()] + hypothesis = "John loves Mary".split() + + # Test BLEU to nth order of n-grams, where n is len(hypothesis). + for n in range(1, len(hypothesis)): + weights = (1.0 / n,) * n # Uniform weights. + assert sentence_bleu(references, hypothesis, weights) == 0 + + def test_full_matches(self): + # Test case where there's 100% matches + references = ["John loves Mary".split()] + hypothesis = "John loves Mary".split() + + # Test BLEU to nth order of n-grams, where n is len(hypothesis). + for n in range(1, len(hypothesis)): + weights = (1.0 / n,) * n # Uniform weights. + assert sentence_bleu(references, hypothesis, weights) == 1.0 + + def test_partial_matches_hypothesis_longer_than_reference(self): + references = ["John loves Mary".split()] + hypothesis = "John loves Mary who loves Mike".split() + # Since no 4-grams matches were found the result should be zero + # exp(w_1 * 1 * w_2 * 1 * w_3 * 1 * w_4 * -inf) = 0 + self.assertAlmostEqual(sentence_bleu(references, hypothesis), 0.0, places=4) + # Checks that the warning has been raised because len(reference) < 4. + try: + self.assertWarns(UserWarning, sentence_bleu, references, hypothesis) + except AttributeError: + pass # unittest.TestCase.assertWarns is only supported in Python >= 3.2. + + +# @unittest.skip("Skipping fringe cases for BLEU.") +class TestBLEUFringeCases(unittest.TestCase): + def test_case_where_n_is_bigger_than_hypothesis_length(self): + # Test BLEU to nth order of n-grams, where n > len(hypothesis). + references = ["John loves Mary ?".split()] + hypothesis = "John loves Mary".split() + n = len(hypothesis) + 1 # + weights = (1.0 / n,) * n # Uniform weights. + # Since no n-grams matches were found the result should be zero + # exp(w_1 * 1 * w_2 * 1 * w_3 * 1 * w_4 * -inf) = 0 + self.assertAlmostEqual( + sentence_bleu(references, hypothesis, weights), 0.0, places=4 + ) + # Checks that the warning has been raised because len(hypothesis) < 4. + try: + self.assertWarns(UserWarning, sentence_bleu, references, hypothesis) + except AttributeError: + pass # unittest.TestCase.assertWarns is only supported in Python >= 3.2. + + # Test case where n > len(hypothesis) but so is n > len(reference), and + # it's a special case where reference == hypothesis. + references = ["John loves Mary".split()] + hypothesis = "John loves Mary".split() + # Since no 4-grams matches were found the result should be zero + # exp(w_1 * 1 * w_2 * 1 * w_3 * 1 * w_4 * -inf) = 0 + self.assertAlmostEqual( + sentence_bleu(references, hypothesis, weights), 0.0, places=4 + ) + + def test_empty_hypothesis(self): + # Test case where there's hypothesis is empty. + references = ["The candidate has no alignment to any of the references".split()] + hypothesis = [] + assert sentence_bleu(references, hypothesis) == 0 + + def test_length_one_hypothesis(self): + # Test case where there's hypothesis is of length 1 in Smoothing method 4. + references = ["The candidate has no alignment to any of the references".split()] + hypothesis = ["Foo"] + method4 = SmoothingFunction().method4 + try: + sentence_bleu(references, hypothesis, smoothing_function=method4) + except ValueError: + pass # unittest.TestCase.assertWarns is only supported in Python >= 3.2. + + def test_empty_references(self): + # Test case where there's reference is empty. + references = [[]] + hypothesis = "John loves Mary".split() + assert sentence_bleu(references, hypothesis) == 0 + + def test_empty_references_and_hypothesis(self): + # Test case where both references and hypothesis is empty. + references = [[]] + hypothesis = [] + assert sentence_bleu(references, hypothesis) == 0 + + def test_reference_or_hypothesis_shorter_than_fourgrams(self): + # Test case where the length of reference or hypothesis + # is shorter than 4. + references = ["let it go".split()] + hypothesis = "let go it".split() + # Checks that the value the hypothesis and reference returns is 0.0 + # exp(w_1 * 1 * w_2 * 1 * w_3 * 1 * w_4 * -inf) = 0 + self.assertAlmostEqual(sentence_bleu(references, hypothesis), 0.0, places=4) + # Checks that the warning has been raised. + try: + self.assertWarns(UserWarning, sentence_bleu, references, hypothesis) + except AttributeError: + pass # unittest.TestCase.assertWarns is only supported in Python >= 3.2. + + +class TestBLEUvsMteval13a(unittest.TestCase): + def test_corpus_bleu(self): + ref_file = find("models/wmt15_eval/ref.ru") + hyp_file = find("models/wmt15_eval/google.ru") + mteval_output_file = find("models/wmt15_eval/mteval-13a.output") + + # Reads the BLEU scores from the `mteval-13a.output` file. + # The order of the list corresponds to the order of the ngrams. + with open(mteval_output_file) as mteval_fin: + # The numbers are located in the last 2nd line of the file. + # The first and 2nd item in the list are the score and system names. + mteval_bleu_scores = map(float, mteval_fin.readlines()[-2].split()[1:-1]) + + with open(ref_file, encoding="utf8") as ref_fin: + with open(hyp_file, encoding="utf8") as hyp_fin: + # Whitespace tokenize the file. + # Note: split() automatically strip(). + hypothesis = list(map(lambda x: x.split(), hyp_fin)) + # Note that the corpus_bleu input is list of list of references. + references = list(map(lambda x: [x.split()], ref_fin)) + # Without smoothing. + for i, mteval_bleu in zip(range(1, 10), mteval_bleu_scores): + nltk_bleu = corpus_bleu( + references, hypothesis, weights=(1.0 / i,) * i + ) + # Check that the BLEU scores difference is less than 0.005 . + # Note: This is an approximate comparison; as much as + # +/- 0.01 BLEU might be "statistically significant", + # the actual translation quality might not be. + assert abs(mteval_bleu - nltk_bleu) < 0.005 + + # With the same smoothing method used in mteval-v13a.pl + chencherry = SmoothingFunction() + for i, mteval_bleu in zip(range(1, 10), mteval_bleu_scores): + nltk_bleu = corpus_bleu( + references, + hypothesis, + weights=(1.0 / i,) * i, + smoothing_function=chencherry.method3, + ) + assert abs(mteval_bleu - nltk_bleu) < 0.005 + + +class TestBLEUWithBadSentence(unittest.TestCase): + def test_corpus_bleu_with_bad_sentence(self): + hyp = "Teo S yb , oe uNb , R , T t , , t Tue Ar saln S , , 5istsi l , 5oe R ulO sae oR R" + ref = str( + "Their tasks include changing a pump on the faulty stokehold ." + "Likewise , two species that are very similar in morphology " + "were distinguished using genetics ." + ) + references = [[ref.split()]] + hypotheses = [hyp.split()] + try: # Check that the warning is raised since no. of 2-grams < 0. + with self.assertWarns(UserWarning): + # Verify that the BLEU output is undesired since no. of 2-grams < 0. + self.assertAlmostEqual( + corpus_bleu(references, hypotheses), 0.0, places=4 + ) + except AttributeError: # unittest.TestCase.assertWarns is only supported in Python >= 3.2. + self.assertAlmostEqual(corpus_bleu(references, hypotheses), 0.0, places=4) + + +class TestBLEUWithMultipleWeights(unittest.TestCase): + def test_corpus_bleu_with_multiple_weights(self): + hyp1 = [ + "It", + "is", + "a", + "guide", + "to", + "action", + "which", + "ensures", + "that", + "the", + "military", + "always", + "obeys", + "the", + "commands", + "of", + "the", + "party", + ] + ref1a = [ + "It", + "is", + "a", + "guide", + "to", + "action", + "that", + "ensures", + "that", + "the", + "military", + "will", + "forever", + "heed", + "Party", + "commands", + ] + ref1b = [ + "It", + "is", + "the", + "guiding", + "principle", + "which", + "guarantees", + "the", + "military", + "forces", + "always", + "being", + "under", + "the", + "command", + "of", + "the", + "Party", + ] + ref1c = [ + "It", + "is", + "the", + "practical", + "guide", + "for", + "the", + "army", + "always", + "to", + "heed", + "the", + "directions", + "of", + "the", + "party", + ] + hyp2 = [ + "he", + "read", + "the", + "book", + "because", + "he", + "was", + "interested", + "in", + "world", + "history", + ] + ref2a = [ + "he", + "was", + "interested", + "in", + "world", + "history", + "because", + "he", + "read", + "the", + "book", + ] + weight_1 = (1, 0, 0, 0) + weight_2 = (0.25, 0.25, 0.25, 0.25) + weight_3 = (0, 0, 0, 0, 1) + + bleu_scores = corpus_bleu( + list_of_references=[[ref1a, ref1b, ref1c], [ref2a]], + hypotheses=[hyp1, hyp2], + weights=[weight_1, weight_2, weight_3], + ) + assert bleu_scores[0] == corpus_bleu( + [[ref1a, ref1b, ref1c], [ref2a]], [hyp1, hyp2], weight_1 + ) + assert bleu_scores[1] == corpus_bleu( + [[ref1a, ref1b, ref1c], [ref2a]], [hyp1, hyp2], weight_2 + ) + assert bleu_scores[2] == corpus_bleu( + [[ref1a, ref1b, ref1c], [ref2a]], [hyp1, hyp2], weight_3 + ) diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/test/unit/translate/test_ibm5.py b/env-llmeval/lib/python3.10/site-packages/nltk/test/unit/translate/test_ibm5.py new file mode 100644 index 0000000000000000000000000000000000000000..7c29c47de230c0e128cb969514787b2ded0451ef --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/test/unit/translate/test_ibm5.py @@ -0,0 +1,160 @@ +""" +Tests for IBM Model 5 training methods +""" + +import unittest +from collections import defaultdict + +from nltk.translate import AlignedSent, IBMModel, IBMModel4, IBMModel5 +from nltk.translate.ibm_model import AlignmentInfo + + +class TestIBMModel5(unittest.TestCase): + def test_set_uniform_vacancy_probabilities_of_max_displacements(self): + # arrange + src_classes = {"schinken": 0, "eier": 0, "spam": 1} + trg_classes = {"ham": 0, "eggs": 1, "spam": 2} + corpus = [ + AlignedSent(["ham", "eggs"], ["schinken", "schinken", "eier"]), + AlignedSent(["spam", "spam", "spam", "spam"], ["spam", "spam"]), + ] + model5 = IBMModel5(corpus, 0, src_classes, trg_classes) + + # act + model5.set_uniform_probabilities(corpus) + + # assert + # number of vacancy difference values = + # 2 * number of words in longest target sentence + expected_prob = 1.0 / (2 * 4) + + # examine the boundary values for (dv, max_v, trg_class) + self.assertEqual(model5.head_vacancy_table[4][4][0], expected_prob) + self.assertEqual(model5.head_vacancy_table[-3][1][2], expected_prob) + self.assertEqual(model5.non_head_vacancy_table[4][4][0], expected_prob) + self.assertEqual(model5.non_head_vacancy_table[-3][1][2], expected_prob) + + def test_set_uniform_vacancy_probabilities_of_non_domain_values(self): + # arrange + src_classes = {"schinken": 0, "eier": 0, "spam": 1} + trg_classes = {"ham": 0, "eggs": 1, "spam": 2} + corpus = [ + AlignedSent(["ham", "eggs"], ["schinken", "schinken", "eier"]), + AlignedSent(["spam", "spam", "spam", "spam"], ["spam", "spam"]), + ] + model5 = IBMModel5(corpus, 0, src_classes, trg_classes) + + # act + model5.set_uniform_probabilities(corpus) + + # assert + # examine dv and max_v values that are not in the training data domain + self.assertEqual(model5.head_vacancy_table[5][4][0], IBMModel.MIN_PROB) + self.assertEqual(model5.head_vacancy_table[-4][1][2], IBMModel.MIN_PROB) + self.assertEqual(model5.head_vacancy_table[4][0][0], IBMModel.MIN_PROB) + self.assertEqual(model5.non_head_vacancy_table[5][4][0], IBMModel.MIN_PROB) + self.assertEqual(model5.non_head_vacancy_table[-4][1][2], IBMModel.MIN_PROB) + + def test_prob_t_a_given_s(self): + # arrange + src_sentence = ["ich", "esse", "ja", "gern", "räucherschinken"] + trg_sentence = ["i", "love", "to", "eat", "smoked", "ham"] + src_classes = {"räucherschinken": 0, "ja": 1, "ich": 2, "esse": 3, "gern": 4} + trg_classes = {"ham": 0, "smoked": 1, "i": 3, "love": 4, "to": 2, "eat": 4} + corpus = [AlignedSent(trg_sentence, src_sentence)] + alignment_info = AlignmentInfo( + (0, 1, 4, 0, 2, 5, 5), + [None] + src_sentence, + ["UNUSED"] + trg_sentence, + [[3], [1], [4], [], [2], [5, 6]], + ) + + head_vacancy_table = defaultdict( + lambda: defaultdict(lambda: defaultdict(float)) + ) + head_vacancy_table[1 - 0][6][3] = 0.97 # ich -> i + head_vacancy_table[3 - 0][5][4] = 0.97 # esse -> eat + head_vacancy_table[1 - 2][4][4] = 0.97 # gern -> love + head_vacancy_table[2 - 0][2][1] = 0.97 # räucherschinken -> smoked + + non_head_vacancy_table = defaultdict( + lambda: defaultdict(lambda: defaultdict(float)) + ) + non_head_vacancy_table[1 - 0][1][0] = 0.96 # räucherschinken -> ham + + translation_table = defaultdict(lambda: defaultdict(float)) + translation_table["i"]["ich"] = 0.98 + translation_table["love"]["gern"] = 0.98 + translation_table["to"][None] = 0.98 + translation_table["eat"]["esse"] = 0.98 + translation_table["smoked"]["räucherschinken"] = 0.98 + translation_table["ham"]["räucherschinken"] = 0.98 + + fertility_table = defaultdict(lambda: defaultdict(float)) + fertility_table[1]["ich"] = 0.99 + fertility_table[1]["esse"] = 0.99 + fertility_table[0]["ja"] = 0.99 + fertility_table[1]["gern"] = 0.99 + fertility_table[2]["räucherschinken"] = 0.999 + fertility_table[1][None] = 0.99 + + probabilities = { + "p1": 0.167, + "translation_table": translation_table, + "fertility_table": fertility_table, + "head_vacancy_table": head_vacancy_table, + "non_head_vacancy_table": non_head_vacancy_table, + "head_distortion_table": None, + "non_head_distortion_table": None, + "alignment_table": None, + } + + model5 = IBMModel5(corpus, 0, src_classes, trg_classes, probabilities) + + # act + probability = model5.prob_t_a_given_s(alignment_info) + + # assert + null_generation = 5 * pow(0.167, 1) * pow(0.833, 4) + fertility = 1 * 0.99 * 1 * 0.99 * 1 * 0.99 * 1 * 0.99 * 2 * 0.999 + lexical_translation = 0.98 * 0.98 * 0.98 * 0.98 * 0.98 * 0.98 + vacancy = 0.97 * 0.97 * 1 * 0.97 * 0.97 * 0.96 + expected_probability = ( + null_generation * fertility * lexical_translation * vacancy + ) + self.assertEqual(round(probability, 4), round(expected_probability, 4)) + + def test_prune(self): + # arrange + alignment_infos = [ + AlignmentInfo((1, 1), None, None, None), + AlignmentInfo((1, 2), None, None, None), + AlignmentInfo((2, 1), None, None, None), + AlignmentInfo((2, 2), None, None, None), + AlignmentInfo((0, 0), None, None, None), + ] + min_factor = IBMModel5.MIN_SCORE_FACTOR + best_score = 0.9 + scores = { + (1, 1): min(min_factor * 1.5, 1) * best_score, # above threshold + (1, 2): best_score, + (2, 1): min_factor * best_score, # at threshold + (2, 2): min_factor * best_score * 0.5, # low score + (0, 0): min(min_factor * 1.1, 1) * 1.2, # above threshold + } + corpus = [AlignedSent(["a"], ["b"])] + original_prob_function = IBMModel4.model4_prob_t_a_given_s + # mock static method + IBMModel4.model4_prob_t_a_given_s = staticmethod( + lambda a, model: scores[a.alignment] + ) + model5 = IBMModel5(corpus, 0, None, None) + + # act + pruned_alignments = model5.prune(alignment_infos) + + # assert + self.assertEqual(len(pruned_alignments), 3) + + # restore static method + IBMModel4.model4_prob_t_a_given_s = original_prob_function diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/test/unit/translate/test_meteor.py b/env-llmeval/lib/python3.10/site-packages/nltk/test/unit/translate/test_meteor.py new file mode 100644 index 0000000000000000000000000000000000000000..13d8e311c9337266a9cdc1b2ecfd67ef58cfb5b2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/test/unit/translate/test_meteor.py @@ -0,0 +1,20 @@ +import unittest + +from nltk.translate.meteor_score import meteor_score + + +class TestMETEOR(unittest.TestCase): + reference = [["this", "is", "a", "test"], ["this", "is" "test"]] + candidate = ["THIS", "Is", "a", "tEST"] + + def test_meteor(self): + score = meteor_score(self.reference, self.candidate, preprocess=str.lower) + assert score == 0.9921875 + + def test_reference_type_check(self): + str_reference = [" ".join(ref) for ref in self.reference] + self.assertRaises(TypeError, meteor_score, str_reference, self.candidate) + + def test_candidate_type_check(self): + str_candidate = " ".join(self.candidate) + self.assertRaises(TypeError, meteor_score, self.reference, str_candidate) diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/translate/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/translate/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f11e5f082fd31fe8205dab62bfb7ea9909e81ebc Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/translate/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/translate/__pycache__/api.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/translate/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4d0657fb86fa11f547965983f7a5834253402957 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/translate/__pycache__/api.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/translate/__pycache__/chrf_score.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/translate/__pycache__/chrf_score.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f7aef470884b8d3d2e0de29d75debb550bfa7a9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/translate/__pycache__/chrf_score.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm1.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm1.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1e8a02e45bd8c784a8cd924c7217c7bc3f26734 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm1.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm5.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm5.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..82c893473789f25412287abfc92ec64bade6cb70 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm5.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/translate/__pycache__/ribes_score.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/translate/__pycache__/ribes_score.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3325452a66ca1ff0d6734c324409a6f9305479e9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/translate/__pycache__/ribes_score.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/translate/bleu_score.py b/env-llmeval/lib/python3.10/site-packages/nltk/translate/bleu_score.py new file mode 100644 index 0000000000000000000000000000000000000000..1b2cc949db964b029f4e7324cbbc7236d3ff9248 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/translate/bleu_score.py @@ -0,0 +1,685 @@ +# Natural Language Toolkit: BLEU Score +# +# Copyright (C) 2001-2023 NLTK Project +# Authors: Chin Yee Lee, Hengfeng Li, Ruxin Hou, Calvin Tanujaya Lim +# Contributors: Björn Mattsson, Dmitrijs Milajevs, Liling Tan +# URL: +# For license information, see LICENSE.TXT + +"""BLEU score implementation.""" + +import math +import sys +import warnings +from collections import Counter +from fractions import Fraction + +from nltk.util import ngrams + + +def sentence_bleu( + references, + hypothesis, + weights=(0.25, 0.25, 0.25, 0.25), + smoothing_function=None, + auto_reweigh=False, +): + """ + Calculate BLEU score (Bilingual Evaluation Understudy) from + Papineni, Kishore, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2002. + "BLEU: a method for automatic evaluation of machine translation." + In Proceedings of ACL. https://www.aclweb.org/anthology/P02-1040.pdf + + >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', + ... 'ensures', 'that', 'the', 'military', 'always', + ... 'obeys', 'the', 'commands', 'of', 'the', 'party'] + + >>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops', + ... 'forever', 'hearing', 'the', 'activity', 'guidebook', + ... 'that', 'party', 'direct'] + + >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', + ... 'ensures', 'that', 'the', 'military', 'will', 'forever', + ... 'heed', 'Party', 'commands'] + + >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which', + ... 'guarantees', 'the', 'military', 'forces', 'always', + ... 'being', 'under', 'the', 'command', 'of', 'the', + ... 'Party'] + + >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', + ... 'army', 'always', 'to', 'heed', 'the', 'directions', + ... 'of', 'the', 'party'] + + >>> sentence_bleu([reference1, reference2, reference3], hypothesis1) # doctest: +ELLIPSIS + 0.5045... + + If there is no ngrams overlap for any order of n-grams, BLEU returns the + value 0. This is because the precision for the order of n-grams without + overlap is 0, and the geometric mean in the final BLEU score computation + multiplies the 0 with the precision of other n-grams. This results in 0 + (independently of the precision of the other n-gram orders). The following + example has zero 3-gram and 4-gram overlaps: + + >>> round(sentence_bleu([reference1, reference2, reference3], hypothesis2),4) # doctest: +ELLIPSIS + 0.0 + + To avoid this harsh behaviour when no ngram overlaps are found a smoothing + function can be used. + + >>> chencherry = SmoothingFunction() + >>> sentence_bleu([reference1, reference2, reference3], hypothesis2, + ... smoothing_function=chencherry.method1) # doctest: +ELLIPSIS + 0.0370... + + The default BLEU calculates a score for up to 4-grams using uniform + weights (this is called BLEU-4). To evaluate your translations with + higher/lower order ngrams, use customized weights. E.g. when accounting + for up to 5-grams with uniform weights (this is called BLEU-5) use: + + >>> weights = (1./5., 1./5., 1./5., 1./5., 1./5.) + >>> sentence_bleu([reference1, reference2, reference3], hypothesis1, weights) # doctest: +ELLIPSIS + 0.3920... + + Multiple BLEU scores can be computed at once, by supplying a list of weights. + E.g. for computing BLEU-2, BLEU-3 *and* BLEU-4 in one computation, use: + >>> weights = [ + ... (1./2., 1./2.), + ... (1./3., 1./3., 1./3.), + ... (1./4., 1./4., 1./4., 1./4.) + ... ] + >>> sentence_bleu([reference1, reference2, reference3], hypothesis1, weights) # doctest: +ELLIPSIS + [0.7453..., 0.6240..., 0.5045...] + + :param references: reference sentences + :type references: list(list(str)) + :param hypothesis: a hypothesis sentence + :type hypothesis: list(str) + :param weights: weights for unigrams, bigrams, trigrams and so on (one or a list of weights) + :type weights: tuple(float) / list(tuple(float)) + :param smoothing_function: + :type smoothing_function: SmoothingFunction + :param auto_reweigh: Option to re-normalize the weights uniformly. + :type auto_reweigh: bool + :return: The sentence-level BLEU score. Returns a list if multiple weights were supplied. + :rtype: float / list(float) + """ + return corpus_bleu( + [references], [hypothesis], weights, smoothing_function, auto_reweigh + ) + + +def corpus_bleu( + list_of_references, + hypotheses, + weights=(0.25, 0.25, 0.25, 0.25), + smoothing_function=None, + auto_reweigh=False, +): + """ + Calculate a single corpus-level BLEU score (aka. system-level BLEU) for all + the hypotheses and their respective references. + + Instead of averaging the sentence level BLEU scores (i.e. macro-average + precision), the original BLEU metric (Papineni et al. 2002) accounts for + the micro-average precision (i.e. summing the numerators and denominators + for each hypothesis-reference(s) pairs before the division). + + >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', + ... 'ensures', 'that', 'the', 'military', 'always', + ... 'obeys', 'the', 'commands', 'of', 'the', 'party'] + >>> ref1a = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', + ... 'ensures', 'that', 'the', 'military', 'will', 'forever', + ... 'heed', 'Party', 'commands'] + >>> ref1b = ['It', 'is', 'the', 'guiding', 'principle', 'which', + ... 'guarantees', 'the', 'military', 'forces', 'always', + ... 'being', 'under', 'the', 'command', 'of', 'the', 'Party'] + >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', + ... 'army', 'always', 'to', 'heed', 'the', 'directions', + ... 'of', 'the', 'party'] + + >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', + ... 'interested', 'in', 'world', 'history'] + >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', + ... 'because', 'he', 'read', 'the', 'book'] + + >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] + >>> hypotheses = [hyp1, hyp2] + >>> corpus_bleu(list_of_references, hypotheses) # doctest: +ELLIPSIS + 0.5920... + + The example below show that corpus_bleu() is different from averaging + sentence_bleu() for hypotheses + + >>> score1 = sentence_bleu([ref1a, ref1b, ref1c], hyp1) + >>> score2 = sentence_bleu([ref2a], hyp2) + >>> (score1 + score2) / 2 # doctest: +ELLIPSIS + 0.6223... + + Custom weights may be supplied to fine-tune the BLEU score further. + A tuple of float weights for unigrams, bigrams, trigrams and so on can be given. + >>> weights = (0.1, 0.3, 0.5, 0.1) + >>> corpus_bleu(list_of_references, hypotheses, weights=weights) # doctest: +ELLIPSIS + 0.5818... + + This particular weight gave extra value to trigrams. + Furthermore, multiple weights can be given, resulting in multiple BLEU scores. + >>> weights = [ + ... (0.5, 0.5), + ... (0.333, 0.333, 0.334), + ... (0.25, 0.25, 0.25, 0.25), + ... (0.2, 0.2, 0.2, 0.2, 0.2) + ... ] + >>> corpus_bleu(list_of_references, hypotheses, weights=weights) # doctest: +ELLIPSIS + [0.8242..., 0.7067..., 0.5920..., 0.4719...] + + :param list_of_references: a corpus of lists of reference sentences, w.r.t. hypotheses + :type list_of_references: list(list(list(str))) + :param hypotheses: a list of hypothesis sentences + :type hypotheses: list(list(str)) + :param weights: weights for unigrams, bigrams, trigrams and so on (one or a list of weights) + :type weights: tuple(float) / list(tuple(float)) + :param smoothing_function: + :type smoothing_function: SmoothingFunction + :param auto_reweigh: Option to re-normalize the weights uniformly. + :type auto_reweigh: bool + :return: The corpus-level BLEU score. + :rtype: float + """ + # Before proceeding to compute BLEU, perform sanity checks. + + p_numerators = Counter() # Key = ngram order, and value = no. of ngram matches. + p_denominators = Counter() # Key = ngram order, and value = no. of ngram in ref. + hyp_lengths, ref_lengths = 0, 0 + + assert len(list_of_references) == len(hypotheses), ( + "The number of hypotheses and their reference(s) should be the " "same " + ) + + try: + weights[0][0] + except TypeError: + weights = [weights] + max_weight_length = max(len(weight) for weight in weights) + + # Iterate through each hypothesis and their corresponding references. + for references, hypothesis in zip(list_of_references, hypotheses): + # For each order of ngram, calculate the numerator and + # denominator for the corpus-level modified precision. + for i in range(1, max_weight_length + 1): + p_i = modified_precision(references, hypothesis, i) + p_numerators[i] += p_i.numerator + p_denominators[i] += p_i.denominator + + # Calculate the hypothesis length and the closest reference length. + # Adds them to the corpus-level hypothesis and reference counts. + hyp_len = len(hypothesis) + hyp_lengths += hyp_len + ref_lengths += closest_ref_length(references, hyp_len) + + # Calculate corpus-level brevity penalty. + bp = brevity_penalty(ref_lengths, hyp_lengths) + + # Collects the various precision values for the different ngram orders. + p_n = [ + Fraction(p_numerators[i], p_denominators[i], _normalize=False) + for i in range(1, max_weight_length + 1) + ] + + # Returns 0 if there's no matching n-grams + # We only need to check for p_numerators[1] == 0, since if there's + # no unigrams, there won't be any higher order ngrams. + if p_numerators[1] == 0: + return 0 if len(weights) == 1 else [0] * len(weights) + + # If there's no smoothing, set use method0 from SmoothinFunction class. + if not smoothing_function: + smoothing_function = SmoothingFunction().method0 + # Smoothen the modified precision. + # Note: smoothing_function() may convert values into floats; + # it tries to retain the Fraction object as much as the + # smoothing method allows. + p_n = smoothing_function( + p_n, references=references, hypothesis=hypothesis, hyp_len=hyp_lengths + ) + + bleu_scores = [] + for weight in weights: + # Uniformly re-weighting based on maximum hypothesis lengths if largest + # order of n-grams < 4 and weights is set at default. + if auto_reweigh: + if hyp_lengths < 4 and weight == (0.25, 0.25, 0.25, 0.25): + weight = (1 / hyp_lengths,) * hyp_lengths + + s = (w_i * math.log(p_i) for w_i, p_i in zip(weight, p_n) if p_i > 0) + s = bp * math.exp(math.fsum(s)) + bleu_scores.append(s) + return bleu_scores[0] if len(weights) == 1 else bleu_scores + + +def modified_precision(references, hypothesis, n): + """ + Calculate modified ngram precision. + + The normal precision method may lead to some wrong translations with + high-precision, e.g., the translation, in which a word of reference + repeats several times, has very high precision. + + This function only returns the Fraction object that contains the numerator + and denominator necessary to calculate the corpus-level precision. + To calculate the modified precision for a single pair of hypothesis and + references, cast the Fraction object into a float. + + The famous "the the the ... " example shows that you can get BLEU precision + by duplicating high frequency words. + + >>> reference1 = 'the cat is on the mat'.split() + >>> reference2 = 'there is a cat on the mat'.split() + >>> hypothesis1 = 'the the the the the the the'.split() + >>> references = [reference1, reference2] + >>> float(modified_precision(references, hypothesis1, n=1)) # doctest: +ELLIPSIS + 0.2857... + + In the modified n-gram precision, a reference word will be considered + exhausted after a matching hypothesis word is identified, e.g. + + >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', + ... 'ensures', 'that', 'the', 'military', 'will', + ... 'forever', 'heed', 'Party', 'commands'] + >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which', + ... 'guarantees', 'the', 'military', 'forces', 'always', + ... 'being', 'under', 'the', 'command', 'of', 'the', + ... 'Party'] + >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', + ... 'army', 'always', 'to', 'heed', 'the', 'directions', + ... 'of', 'the', 'party'] + >>> hypothesis = 'of the'.split() + >>> references = [reference1, reference2, reference3] + >>> float(modified_precision(references, hypothesis, n=1)) + 1.0 + >>> float(modified_precision(references, hypothesis, n=2)) + 1.0 + + An example of a normal machine translation hypothesis: + + >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', + ... 'ensures', 'that', 'the', 'military', 'always', + ... 'obeys', 'the', 'commands', 'of', 'the', 'party'] + + >>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops', + ... 'forever', 'hearing', 'the', 'activity', 'guidebook', + ... 'that', 'party', 'direct'] + + >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', + ... 'ensures', 'that', 'the', 'military', 'will', + ... 'forever', 'heed', 'Party', 'commands'] + + >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which', + ... 'guarantees', 'the', 'military', 'forces', 'always', + ... 'being', 'under', 'the', 'command', 'of', 'the', + ... 'Party'] + + >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', + ... 'army', 'always', 'to', 'heed', 'the', 'directions', + ... 'of', 'the', 'party'] + >>> references = [reference1, reference2, reference3] + >>> float(modified_precision(references, hypothesis1, n=1)) # doctest: +ELLIPSIS + 0.9444... + >>> float(modified_precision(references, hypothesis2, n=1)) # doctest: +ELLIPSIS + 0.5714... + >>> float(modified_precision(references, hypothesis1, n=2)) # doctest: +ELLIPSIS + 0.5882352941176471 + >>> float(modified_precision(references, hypothesis2, n=2)) # doctest: +ELLIPSIS + 0.07692... + + + :param references: A list of reference translations. + :type references: list(list(str)) + :param hypothesis: A hypothesis translation. + :type hypothesis: list(str) + :param n: The ngram order. + :type n: int + :return: BLEU's modified precision for the nth order ngram. + :rtype: Fraction + """ + # Extracts all ngrams in hypothesis + # Set an empty Counter if hypothesis is empty. + counts = Counter(ngrams(hypothesis, n)) if len(hypothesis) >= n else Counter() + # Extract a union of references' counts. + # max_counts = reduce(or_, [Counter(ngrams(ref, n)) for ref in references]) + max_counts = {} + for reference in references: + reference_counts = ( + Counter(ngrams(reference, n)) if len(reference) >= n else Counter() + ) + for ngram in counts: + max_counts[ngram] = max(max_counts.get(ngram, 0), reference_counts[ngram]) + + # Assigns the intersection between hypothesis and references' counts. + clipped_counts = { + ngram: min(count, max_counts[ngram]) for ngram, count in counts.items() + } + + numerator = sum(clipped_counts.values()) + # Ensures that denominator is minimum 1 to avoid ZeroDivisionError. + # Usually this happens when the ngram order is > len(reference). + denominator = max(1, sum(counts.values())) + + return Fraction(numerator, denominator, _normalize=False) + + +def closest_ref_length(references, hyp_len): + """ + This function finds the reference that is the closest length to the + hypothesis. The closest reference length is referred to as *r* variable + from the brevity penalty formula in Papineni et. al. (2002) + + :param references: A list of reference translations. + :type references: list(list(str)) + :param hyp_len: The length of the hypothesis. + :type hyp_len: int + :return: The length of the reference that's closest to the hypothesis. + :rtype: int + """ + ref_lens = (len(reference) for reference in references) + closest_ref_len = min( + ref_lens, key=lambda ref_len: (abs(ref_len - hyp_len), ref_len) + ) + return closest_ref_len + + +def brevity_penalty(closest_ref_len, hyp_len): + """ + Calculate brevity penalty. + + As the modified n-gram precision still has the problem from the short + length sentence, brevity penalty is used to modify the overall BLEU + score according to length. + + An example from the paper. There are three references with length 12, 15 + and 17. And a concise hypothesis of the length 12. The brevity penalty is 1. + + >>> reference1 = list('aaaaaaaaaaaa') # i.e. ['a'] * 12 + >>> reference2 = list('aaaaaaaaaaaaaaa') # i.e. ['a'] * 15 + >>> reference3 = list('aaaaaaaaaaaaaaaaa') # i.e. ['a'] * 17 + >>> hypothesis = list('aaaaaaaaaaaa') # i.e. ['a'] * 12 + >>> references = [reference1, reference2, reference3] + >>> hyp_len = len(hypothesis) + >>> closest_ref_len = closest_ref_length(references, hyp_len) + >>> brevity_penalty(closest_ref_len, hyp_len) + 1.0 + + In case a hypothesis translation is shorter than the references, penalty is + applied. + + >>> references = [['a'] * 28, ['a'] * 28] + >>> hypothesis = ['a'] * 12 + >>> hyp_len = len(hypothesis) + >>> closest_ref_len = closest_ref_length(references, hyp_len) + >>> brevity_penalty(closest_ref_len, hyp_len) + 0.2635971381157267 + + The length of the closest reference is used to compute the penalty. If the + length of a hypothesis is 12, and the reference lengths are 13 and 2, the + penalty is applied because the hypothesis length (12) is less then the + closest reference length (13). + + >>> references = [['a'] * 13, ['a'] * 2] + >>> hypothesis = ['a'] * 12 + >>> hyp_len = len(hypothesis) + >>> closest_ref_len = closest_ref_length(references, hyp_len) + >>> brevity_penalty(closest_ref_len, hyp_len) # doctest: +ELLIPSIS + 0.9200... + + The brevity penalty doesn't depend on reference order. More importantly, + when two reference sentences are at the same distance, the shortest + reference sentence length is used. + + >>> references = [['a'] * 13, ['a'] * 11] + >>> hypothesis = ['a'] * 12 + >>> hyp_len = len(hypothesis) + >>> closest_ref_len = closest_ref_length(references, hyp_len) + >>> bp1 = brevity_penalty(closest_ref_len, hyp_len) + >>> hyp_len = len(hypothesis) + >>> closest_ref_len = closest_ref_length(reversed(references), hyp_len) + >>> bp2 = brevity_penalty(closest_ref_len, hyp_len) + >>> bp1 == bp2 == 1 + True + + A test example from mteval-v13a.pl (starting from the line 705): + + >>> references = [['a'] * 11, ['a'] * 8] + >>> hypothesis = ['a'] * 7 + >>> hyp_len = len(hypothesis) + >>> closest_ref_len = closest_ref_length(references, hyp_len) + >>> brevity_penalty(closest_ref_len, hyp_len) # doctest: +ELLIPSIS + 0.8668... + + >>> references = [['a'] * 11, ['a'] * 8, ['a'] * 6, ['a'] * 7] + >>> hypothesis = ['a'] * 7 + >>> hyp_len = len(hypothesis) + >>> closest_ref_len = closest_ref_length(references, hyp_len) + >>> brevity_penalty(closest_ref_len, hyp_len) + 1.0 + + :param hyp_len: The length of the hypothesis for a single sentence OR the + sum of all the hypotheses' lengths for a corpus + :type hyp_len: int + :param closest_ref_len: The length of the closest reference for a single + hypothesis OR the sum of all the closest references for every hypotheses. + :type closest_ref_len: int + :return: BLEU's brevity penalty. + :rtype: float + """ + if hyp_len > closest_ref_len: + return 1 + # If hypothesis is empty, brevity penalty = 0 should result in BLEU = 0.0 + elif hyp_len == 0: + return 0 + else: + return math.exp(1 - closest_ref_len / hyp_len) + + +class SmoothingFunction: + """ + This is an implementation of the smoothing techniques + for segment-level BLEU scores that was presented in + Boxing Chen and Collin Cherry (2014) A Systematic Comparison of + Smoothing Techniques for Sentence-Level BLEU. In WMT14. + http://acl2014.org/acl2014/W14-33/pdf/W14-3346.pdf + """ + + def __init__(self, epsilon=0.1, alpha=5, k=5): + """ + This will initialize the parameters required for the various smoothing + techniques, the default values are set to the numbers used in the + experiments from Chen and Cherry (2014). + + >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', 'ensures', + ... 'that', 'the', 'military', 'always', 'obeys', 'the', + ... 'commands', 'of', 'the', 'party'] + >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', 'ensures', + ... 'that', 'the', 'military', 'will', 'forever', 'heed', + ... 'Party', 'commands'] + + >>> chencherry = SmoothingFunction() + >>> print(sentence_bleu([reference1], hypothesis1)) # doctest: +ELLIPSIS + 0.4118... + >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method0)) # doctest: +ELLIPSIS + 0.4118... + >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method1)) # doctest: +ELLIPSIS + 0.4118... + >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method2)) # doctest: +ELLIPSIS + 0.4452... + >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method3)) # doctest: +ELLIPSIS + 0.4118... + >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method4)) # doctest: +ELLIPSIS + 0.4118... + >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method5)) # doctest: +ELLIPSIS + 0.4905... + >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method6)) # doctest: +ELLIPSIS + 0.4135... + >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method7)) # doctest: +ELLIPSIS + 0.4905... + + :param epsilon: the epsilon value use in method 1 + :type epsilon: float + :param alpha: the alpha value use in method 6 + :type alpha: int + :param k: the k value use in method 4 + :type k: int + """ + self.epsilon = epsilon + self.alpha = alpha + self.k = k + + def method0(self, p_n, *args, **kwargs): + """ + No smoothing. + """ + p_n_new = [] + for i, p_i in enumerate(p_n): + if p_i.numerator != 0: + p_n_new.append(p_i) + else: + _msg = str( + "\nThe hypothesis contains 0 counts of {}-gram overlaps.\n" + "Therefore the BLEU score evaluates to 0, independently of\n" + "how many N-gram overlaps of lower order it contains.\n" + "Consider using lower n-gram order or use " + "SmoothingFunction()" + ).format(i + 1) + warnings.warn(_msg) + # When numerator==0 where denonminator==0 or !=0, the result + # for the precision score should be equal to 0 or undefined. + # Due to BLEU geometric mean computation in logarithm space, + # we we need to take the return sys.float_info.min such that + # math.log(sys.float_info.min) returns a 0 precision score. + p_n_new.append(sys.float_info.min) + return p_n_new + + def method1(self, p_n, *args, **kwargs): + """ + Smoothing method 1: Add *epsilon* counts to precision with 0 counts. + """ + return [ + (p_i.numerator + self.epsilon) / p_i.denominator + if p_i.numerator == 0 + else p_i + for p_i in p_n + ] + + def method2(self, p_n, *args, **kwargs): + """ + Smoothing method 2: Add 1 to both numerator and denominator from + Chin-Yew Lin and Franz Josef Och (2004) ORANGE: a Method for + Evaluating Automatic Evaluation Metrics for Machine Translation. + In COLING 2004. + """ + return [ + Fraction(p_n[i].numerator + 1, p_n[i].denominator + 1, _normalize=False) + if i != 0 + else p_n[0] + for i in range(len(p_n)) + ] + + def method3(self, p_n, *args, **kwargs): + """ + Smoothing method 3: NIST geometric sequence smoothing + The smoothing is computed by taking 1 / ( 2^k ), instead of 0, for each + precision score whose matching n-gram count is null. + k is 1 for the first 'n' value for which the n-gram match count is null/ + + For example, if the text contains: + + - one 2-gram match + - and (consequently) two 1-gram matches + + the n-gram count for each individual precision score would be: + + - n=1 => prec_count = 2 (two unigrams) + - n=2 => prec_count = 1 (one bigram) + - n=3 => prec_count = 1/2 (no trigram, taking 'smoothed' value of 1 / ( 2^k ), with k=1) + - n=4 => prec_count = 1/4 (no fourgram, taking 'smoothed' value of 1 / ( 2^k ), with k=2) + """ + incvnt = 1 # From the mteval-v13a.pl, it's referred to as k. + for i, p_i in enumerate(p_n): + if p_i.numerator == 0: + p_n[i] = 1 / (2**incvnt * p_i.denominator) + incvnt += 1 + return p_n + + def method4(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs): + """ + Smoothing method 4: + Shorter translations may have inflated precision values due to having + smaller denominators; therefore, we give them proportionally + smaller smoothed counts. Instead of scaling to 1/(2^k), Chen and Cherry + suggests dividing by 1/ln(len(T)), where T is the length of the translation. + """ + incvnt = 1 + hyp_len = hyp_len if hyp_len else len(hypothesis) + for i, p_i in enumerate(p_n): + if p_i.numerator == 0 and hyp_len > 1: + # incvnt = i + 1 * self.k / math.log( + # hyp_len + # ) # Note that this K is different from the K from NIST. + # p_n[i] = incvnt / p_i.denominator\ + numerator = 1 / (2**incvnt * self.k / math.log(hyp_len)) + p_n[i] = numerator / p_i.denominator + incvnt += 1 + return p_n + + def method5(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs): + """ + Smoothing method 5: + The matched counts for similar values of n should be similar. To a + calculate the n-gram matched count, it averages the n−1, n and n+1 gram + matched counts. + """ + hyp_len = hyp_len if hyp_len else len(hypothesis) + m = {} + # Requires an precision value for an addition ngram order. + p_n_plus1 = p_n + [modified_precision(references, hypothesis, 5)] + m[-1] = p_n[0] + 1 + for i, p_i in enumerate(p_n): + p_n[i] = (m[i - 1] + p_i + p_n_plus1[i + 1]) / 3 + m[i] = p_n[i] + return p_n + + def method6(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs): + """ + Smoothing method 6: + Interpolates the maximum likelihood estimate of the precision *p_n* with + a prior estimate *pi0*. The prior is estimated by assuming that the ratio + between pn and pn−1 will be the same as that between pn−1 and pn−2; from + Gao and He (2013) Training MRF-Based Phrase Translation Models using + Gradient Ascent. In NAACL. + """ + hyp_len = hyp_len if hyp_len else len(hypothesis) + # This smoothing only works when p_1 and p_2 is non-zero. + # Raise an error with an appropriate message when the input is too short + # to use this smoothing technique. + assert p_n[2], "This smoothing method requires non-zero precision for bigrams." + for i, p_i in enumerate(p_n): + if i in [0, 1]: # Skips the first 2 orders of ngrams. + continue + else: + pi0 = 0 if p_n[i - 2] == 0 else p_n[i - 1] ** 2 / p_n[i - 2] + # No. of ngrams in translation that matches the reference. + m = p_i.numerator + # No. of ngrams in translation. + l = sum(1 for _ in ngrams(hypothesis, i + 1)) + # Calculates the interpolated precision. + p_n[i] = (m + self.alpha * pi0) / (l + self.alpha) + return p_n + + def method7(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs): + """ + Smoothing method 7: + Interpolates methods 4 and 5. + """ + hyp_len = hyp_len if hyp_len else len(hypothesis) + p_n = self.method4(p_n, references, hypothesis, hyp_len) + p_n = self.method5(p_n, references, hypothesis, hyp_len) + return p_n diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/translate/chrf_score.py b/env-llmeval/lib/python3.10/site-packages/nltk/translate/chrf_score.py new file mode 100644 index 0000000000000000000000000000000000000000..d4b54f3a07166ba5179b2850cca82b21fe7c39f1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/translate/chrf_score.py @@ -0,0 +1,222 @@ +# Natural Language Toolkit: ChrF score +# +# Copyright (C) 2001-2023 NLTK Project +# Authors: Maja Popovic +# Contributors: Liling Tan, Aleš Tamchyna (Memsource) +# URL: +# For license information, see LICENSE.TXT + +""" ChrF score implementation """ +import re +from collections import Counter, defaultdict + +from nltk.util import ngrams + + +def sentence_chrf( + reference, hypothesis, min_len=1, max_len=6, beta=3.0, ignore_whitespace=True +): + """ + Calculates the sentence level CHRF (Character n-gram F-score) described in + - Maja Popovic. 2015. CHRF: Character n-gram F-score for Automatic MT Evaluation. + In Proceedings of the 10th Workshop on Machine Translation. + https://www.statmt.org/wmt15/pdf/WMT49.pdf + - Maja Popovic. 2016. CHRF Deconstructed: β Parameters and n-gram Weights. + In Proceedings of the 1st Conference on Machine Translation. + https://www.statmt.org/wmt16/pdf/W16-2341.pdf + + This implementation of CHRF only supports a single reference at the moment. + + For details not reported in the paper, consult Maja Popovic's original + implementation: https://github.com/m-popovic/chrF + + The code should output results equivalent to running CHRF++ with the + following options: -nw 0 -b 3 + + An example from the original BLEU paper + https://www.aclweb.org/anthology/P02-1040.pdf + + >>> ref1 = str('It is a guide to action that ensures that the military ' + ... 'will forever heed Party commands').split() + >>> hyp1 = str('It is a guide to action which ensures that the military ' + ... 'always obeys the commands of the party').split() + >>> hyp2 = str('It is to insure the troops forever hearing the activity ' + ... 'guidebook that party direct').split() + >>> sentence_chrf(ref1, hyp1) # doctest: +ELLIPSIS + 0.6349... + >>> sentence_chrf(ref1, hyp2) # doctest: +ELLIPSIS + 0.3330... + + The infamous "the the the ... " example + + >>> ref = 'the cat is on the mat'.split() + >>> hyp = 'the the the the the the the'.split() + >>> sentence_chrf(ref, hyp) # doctest: +ELLIPSIS + 0.1468... + + An example to show that this function allows users to use strings instead of + tokens, i.e. list(str) as inputs. + + >>> ref1 = str('It is a guide to action that ensures that the military ' + ... 'will forever heed Party commands') + >>> hyp1 = str('It is a guide to action which ensures that the military ' + ... 'always obeys the commands of the party') + >>> sentence_chrf(ref1, hyp1) # doctest: +ELLIPSIS + 0.6349... + >>> type(ref1) == type(hyp1) == str + True + >>> sentence_chrf(ref1.split(), hyp1.split()) # doctest: +ELLIPSIS + 0.6349... + + To skip the unigrams and only use 2- to 3-grams: + + >>> sentence_chrf(ref1, hyp1, min_len=2, max_len=3) # doctest: +ELLIPSIS + 0.6617... + + :param references: reference sentence + :type references: list(str) / str + :param hypothesis: a hypothesis sentence + :type hypothesis: list(str) / str + :param min_len: The minimum order of n-gram this function should extract. + :type min_len: int + :param max_len: The maximum order of n-gram this function should extract. + :type max_len: int + :param beta: the parameter to assign more importance to recall over precision + :type beta: float + :param ignore_whitespace: ignore whitespace characters in scoring + :type ignore_whitespace: bool + :return: the sentence level CHRF score. + :rtype: float + """ + return corpus_chrf( + [reference], + [hypothesis], + min_len, + max_len, + beta=beta, + ignore_whitespace=ignore_whitespace, + ) + + +def _preprocess(sent, ignore_whitespace): + if type(sent) != str: + # turn list of tokens into a string + sent = " ".join(sent) + + if ignore_whitespace: + sent = re.sub(r"\s+", "", sent) + return sent + + +def chrf_precision_recall_fscore_support( + reference, hypothesis, n, beta=3.0, epsilon=1e-16 +): + """ + This function computes the precision, recall and fscore from the ngram + overlaps. It returns the `support` which is the true positive score. + + By underspecifying the input type, the function will be agnostic as to how + it computes the ngrams and simply take the whichever element in the list; + it could be either token or character. + + :param reference: The reference sentence. + :type reference: list + :param hypothesis: The hypothesis sentence. + :type hypothesis: list + :param n: Extract up to the n-th order ngrams + :type n: int + :param beta: The parameter to assign more importance to recall over precision. + :type beta: float + :param epsilon: The fallback value if the hypothesis or reference is empty. + :type epsilon: float + :return: Returns the precision, recall and f-score and support (true positive). + :rtype: tuple(float) + """ + ref_ngrams = Counter(ngrams(reference, n)) + hyp_ngrams = Counter(ngrams(hypothesis, n)) + + # calculate the number of ngram matches + overlap_ngrams = ref_ngrams & hyp_ngrams + tp = sum(overlap_ngrams.values()) # True positives. + tpfp = sum(hyp_ngrams.values()) # True positives + False positives. + tpfn = sum(ref_ngrams.values()) # True positives + False negatives. + + try: + prec = tp / tpfp # precision + rec = tp / tpfn # recall + factor = beta**2 + fscore = (1 + factor) * (prec * rec) / (factor * prec + rec) + except ZeroDivisionError: + prec = rec = fscore = epsilon + return prec, rec, fscore, tp + + +def corpus_chrf( + references, hypotheses, min_len=1, max_len=6, beta=3.0, ignore_whitespace=True +): + """ + Calculates the corpus level CHRF (Character n-gram F-score), it is the + macro-averaged value of the sentence/segment level CHRF score. + + This implementation of CHRF only supports a single reference at the moment. + + >>> ref1 = str('It is a guide to action that ensures that the military ' + ... 'will forever heed Party commands').split() + >>> ref2 = str('It is the guiding principle which guarantees the military ' + ... 'forces always being under the command of the Party').split() + >>> + >>> hyp1 = str('It is a guide to action which ensures that the military ' + ... 'always obeys the commands of the party').split() + >>> hyp2 = str('It is to insure the troops forever hearing the activity ' + ... 'guidebook that party direct') + >>> corpus_chrf([ref1, ref2, ref1, ref2], [hyp1, hyp2, hyp2, hyp1]) # doctest: +ELLIPSIS + 0.3910... + + :param references: a corpus of list of reference sentences, w.r.t. hypotheses + :type references: list(list(str)) + :param hypotheses: a list of hypothesis sentences + :type hypotheses: list(list(str)) + :param min_len: The minimum order of n-gram this function should extract. + :type min_len: int + :param max_len: The maximum order of n-gram this function should extract. + :type max_len: int + :param beta: the parameter to assign more importance to recall over precision + :type beta: float + :param ignore_whitespace: ignore whitespace characters in scoring + :type ignore_whitespace: bool + :return: the sentence level CHRF score. + :rtype: float + """ + + assert len(references) == len( + hypotheses + ), "The number of hypotheses and their references should be the same" + num_sents = len(hypotheses) + + # Keep f-scores for each n-gram order separate + ngram_fscores = defaultdict(lambda: list()) + + # Iterate through each hypothesis and their corresponding references. + for reference, hypothesis in zip(references, hypotheses): + + # preprocess both reference and hypothesis + reference = _preprocess(reference, ignore_whitespace) + hypothesis = _preprocess(hypothesis, ignore_whitespace) + + # Calculate f-scores for each sentence and for each n-gram order + # separately. + for n in range(min_len, max_len + 1): + # Compute the precision, recall, fscore and support. + prec, rec, fscore, tp = chrf_precision_recall_fscore_support( + reference, hypothesis, n, beta=beta + ) + ngram_fscores[n].append(fscore) + + # how many n-gram sizes + num_ngram_sizes = len(ngram_fscores) + + # sum of f-scores over all sentences for each n-gram order + total_scores = [sum(fscores) for n, fscores in ngram_fscores.items()] + + # macro-average over n-gram orders and over all sentences + return (sum(total_scores) / num_ngram_sizes) / num_sents diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/translate/gale_church.py b/env-llmeval/lib/python3.10/site-packages/nltk/translate/gale_church.py new file mode 100644 index 0000000000000000000000000000000000000000..d7c81940d9ac27c159b680d688343e67e9ef9c58 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/translate/gale_church.py @@ -0,0 +1,263 @@ +# Natural Language Toolkit: Gale-Church Aligner +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Torsten Marek +# Contributor: Cassidy Laidlaw, Liling Tan +# URL: +# For license information, see LICENSE.TXT + +""" + +A port of the Gale-Church Aligner. + +Gale & Church (1993), A Program for Aligning Sentences in Bilingual Corpora. +https://aclweb.org/anthology/J93-1004.pdf + +""" + +import math + +try: + from norm import logsf as norm_logsf + from scipy.stats import norm +except ImportError: + + def erfcc(x): + """Complementary error function.""" + z = abs(x) + t = 1 / (1 + 0.5 * z) + r = t * math.exp( + -z * z + - 1.26551223 + + t + * ( + 1.00002368 + + t + * ( + 0.37409196 + + t + * ( + 0.09678418 + + t + * ( + -0.18628806 + + t + * ( + 0.27886807 + + t + * ( + -1.13520398 + + t + * (1.48851587 + t * (-0.82215223 + t * 0.17087277)) + ) + ) + ) + ) + ) + ) + ) + if x >= 0.0: + return r + else: + return 2.0 - r + + def norm_cdf(x): + """Return the area under the normal distribution from M{-∞..x}.""" + return 1 - 0.5 * erfcc(x / math.sqrt(2)) + + def norm_logsf(x): + try: + return math.log(1 - norm_cdf(x)) + except ValueError: + return float("-inf") + + +LOG2 = math.log(2) + + +class LanguageIndependent: + # These are the language-independent probabilities and parameters + # given in Gale & Church + + # for the computation, l_1 is always the language with less characters + PRIORS = { + (1, 0): 0.0099, + (0, 1): 0.0099, + (1, 1): 0.89, + (2, 1): 0.089, + (1, 2): 0.089, + (2, 2): 0.011, + } + + AVERAGE_CHARACTERS = 1 + VARIANCE_CHARACTERS = 6.8 + + +def trace(backlinks, source_sents_lens, target_sents_lens): + """ + Traverse the alignment cost from the tracebacks and retrieves + appropriate sentence pairs. + + :param backlinks: A dictionary where the key is the alignment points and value is the cost (referencing the LanguageIndependent.PRIORS) + :type backlinks: dict + :param source_sents_lens: A list of target sentences' lengths + :type source_sents_lens: list(int) + :param target_sents_lens: A list of target sentences' lengths + :type target_sents_lens: list(int) + """ + links = [] + position = (len(source_sents_lens), len(target_sents_lens)) + while position != (0, 0) and all(p >= 0 for p in position): + try: + s, t = backlinks[position] + except TypeError: + position = (position[0] - 1, position[1] - 1) + continue + for i in range(s): + for j in range(t): + links.append((position[0] - i - 1, position[1] - j - 1)) + position = (position[0] - s, position[1] - t) + + return links[::-1] + + +def align_log_prob(i, j, source_sents, target_sents, alignment, params): + """Returns the log probability of the two sentences C{source_sents[i]}, C{target_sents[j]} + being aligned with a specific C{alignment}. + + @param i: The offset of the source sentence. + @param j: The offset of the target sentence. + @param source_sents: The list of source sentence lengths. + @param target_sents: The list of target sentence lengths. + @param alignment: The alignment type, a tuple of two integers. + @param params: The sentence alignment parameters. + + @returns: The log probability of a specific alignment between the two sentences, given the parameters. + """ + l_s = sum(source_sents[i - offset - 1] for offset in range(alignment[0])) + l_t = sum(target_sents[j - offset - 1] for offset in range(alignment[1])) + try: + # actually, the paper says l_s * params.VARIANCE_CHARACTERS, this is based on the C + # reference implementation. With l_s in the denominator, insertions are impossible. + m = (l_s + l_t / params.AVERAGE_CHARACTERS) / 2 + delta = (l_s * params.AVERAGE_CHARACTERS - l_t) / math.sqrt( + m * params.VARIANCE_CHARACTERS + ) + except ZeroDivisionError: + return float("-inf") + + return -(LOG2 + norm_logsf(abs(delta)) + math.log(params.PRIORS[alignment])) + + +def align_blocks(source_sents_lens, target_sents_lens, params=LanguageIndependent): + """Return the sentence alignment of two text blocks (usually paragraphs). + + >>> align_blocks([5,5,5], [7,7,7]) + [(0, 0), (1, 1), (2, 2)] + >>> align_blocks([10,5,5], [12,20]) + [(0, 0), (1, 1), (2, 1)] + >>> align_blocks([12,20], [10,5,5]) + [(0, 0), (1, 1), (1, 2)] + >>> align_blocks([10,2,10,10,2,10], [12,3,20,3,12]) + [(0, 0), (1, 1), (2, 2), (3, 2), (4, 3), (5, 4)] + + @param source_sents_lens: The list of source sentence lengths. + @param target_sents_lens: The list of target sentence lengths. + @param params: the sentence alignment parameters. + @return: The sentence alignments, a list of index pairs. + """ + + alignment_types = list(params.PRIORS.keys()) + + # there are always three rows in the history (with the last of them being filled) + D = [[]] + + backlinks = {} + + for i in range(len(source_sents_lens) + 1): + for j in range(len(target_sents_lens) + 1): + min_dist = float("inf") + min_align = None + for a in alignment_types: + prev_i = -1 - a[0] + prev_j = j - a[1] + if prev_i < -len(D) or prev_j < 0: + continue + p = D[prev_i][prev_j] + align_log_prob( + i, j, source_sents_lens, target_sents_lens, a, params + ) + if p < min_dist: + min_dist = p + min_align = a + + if min_dist == float("inf"): + min_dist = 0 + + backlinks[(i, j)] = min_align + D[-1].append(min_dist) + + if len(D) > 2: + D.pop(0) + D.append([]) + + return trace(backlinks, source_sents_lens, target_sents_lens) + + +def align_texts(source_blocks, target_blocks, params=LanguageIndependent): + """Creates the sentence alignment of two texts. + + Texts can consist of several blocks. Block boundaries cannot be crossed by sentence + alignment links. + + Each block consists of a list that contains the lengths (in characters) of the sentences + in this block. + + @param source_blocks: The list of blocks in the source text. + @param target_blocks: The list of blocks in the target text. + @param params: the sentence alignment parameters. + + @returns: A list of sentence alignment lists + """ + if len(source_blocks) != len(target_blocks): + raise ValueError( + "Source and target texts do not have the same number of blocks." + ) + + return [ + align_blocks(source_block, target_block, params) + for source_block, target_block in zip(source_blocks, target_blocks) + ] + + +# File I/O functions; may belong in a corpus reader + + +def split_at(it, split_value): + """Splits an iterator C{it} at values of C{split_value}. + + Each instance of C{split_value} is swallowed. The iterator produces + subiterators which need to be consumed fully before the next subiterator + can be used. + """ + + def _chunk_iterator(first): + v = first + while v != split_value: + yield v + v = it.next() + + while True: + yield _chunk_iterator(it.next()) + + +def parse_token_stream(stream, soft_delimiter, hard_delimiter): + """Parses a stream of tokens and splits it into sentences (using C{soft_delimiter} tokens) + and blocks (using C{hard_delimiter} tokens) for use with the L{align_texts} function. + """ + return [ + [ + sum(len(token) for token in sentence_it) + for sentence_it in split_at(block_it, soft_delimiter) + ] + for block_it in split_at(stream, hard_delimiter) + ] diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/translate/gdfa.py b/env-llmeval/lib/python3.10/site-packages/nltk/translate/gdfa.py new file mode 100644 index 0000000000000000000000000000000000000000..57df0cea63b35bfbf83f9d330bf137563b332a33 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/translate/gdfa.py @@ -0,0 +1,138 @@ +# Natural Language Toolkit: GDFA word alignment symmetrization +# +# Copyright (C) 2001-2023 NLTK Project +# Authors: Liling Tan +# URL: +# For license information, see LICENSE.TXT + +from collections import defaultdict + + +def grow_diag_final_and(srclen, trglen, e2f, f2e): + """ + This module symmetrisatizes the source-to-target and target-to-source + word alignment output and produces, aka. GDFA algorithm (Koehn, 2005). + + Step 1: Find the intersection of the bidirectional alignment. + + Step 2: Search for additional neighbor alignment points to be added, given + these criteria: (i) neighbor alignments points are not in the + intersection and (ii) neighbor alignments are in the union. + + Step 3: Add all other alignment points that are not in the intersection, not in + the neighboring alignments that met the criteria but in the original + forward/backward alignment outputs. + + >>> forw = ('0-0 2-1 9-2 21-3 10-4 7-5 11-6 9-7 12-8 1-9 3-10 ' + ... '4-11 17-12 17-13 25-14 13-15 24-16 11-17 28-18') + >>> back = ('0-0 1-9 2-9 3-10 4-11 5-12 6-6 7-5 8-6 9-7 10-4 ' + ... '11-6 12-8 13-12 15-12 17-13 18-13 19-12 20-13 ' + ... '21-3 22-12 23-14 24-17 25-15 26-17 27-18 28-18') + >>> srctext = ("この よう な ハロー 白色 わい 星 の L 関数 " + ... "は L と 共 に 不連続 に 増加 する こと が " + ... "期待 さ れる こと を 示し た 。") + >>> trgtext = ("Therefore , we expect that the luminosity function " + ... "of such halo white dwarfs increases discontinuously " + ... "with the luminosity .") + >>> srclen = len(srctext.split()) + >>> trglen = len(trgtext.split()) + >>> + >>> gdfa = grow_diag_final_and(srclen, trglen, forw, back) + >>> gdfa == sorted(set([(28, 18), (6, 6), (24, 17), (2, 1), (15, 12), (13, 12), + ... (2, 9), (3, 10), (26, 17), (25, 15), (8, 6), (9, 7), (20, + ... 13), (18, 13), (0, 0), (10, 4), (13, 15), (23, 14), (7, 5), + ... (25, 14), (1, 9), (17, 13), (4, 11), (11, 17), (9, 2), (22, + ... 12), (27, 18), (24, 16), (21, 3), (19, 12), (17, 12), (5, + ... 12), (11, 6), (12, 8)])) + True + + References: + Koehn, P., A. Axelrod, A. Birch, C. Callison, M. Osborne, and D. Talbot. + 2005. Edinburgh System Description for the 2005 IWSLT Speech + Translation Evaluation. In MT Eval Workshop. + + :type srclen: int + :param srclen: the number of tokens in the source language + :type trglen: int + :param trglen: the number of tokens in the target language + :type e2f: str + :param e2f: the forward word alignment outputs from source-to-target + language (in pharaoh output format) + :type f2e: str + :param f2e: the backward word alignment outputs from target-to-source + language (in pharaoh output format) + :rtype: set(tuple(int)) + :return: the symmetrized alignment points from the GDFA algorithm + """ + + # Converts pharaoh text format into list of tuples. + e2f = [tuple(map(int, a.split("-"))) for a in e2f.split()] + f2e = [tuple(map(int, a.split("-"))) for a in f2e.split()] + + neighbors = [(-1, 0), (0, -1), (1, 0), (0, 1), (-1, -1), (-1, 1), (1, -1), (1, 1)] + alignment = set(e2f).intersection(set(f2e)) # Find the intersection. + union = set(e2f).union(set(f2e)) + + # *aligned* is used to check if neighbors are aligned in grow_diag() + aligned = defaultdict(set) + for i, j in alignment: + aligned["e"].add(i) + aligned["f"].add(j) + + def grow_diag(): + """ + Search for the neighbor points and them to the intersected alignment + points if criteria are met. + """ + prev_len = len(alignment) - 1 + # iterate until no new points added + while prev_len < len(alignment): + no_new_points = True + # for english word e = 0 ... en + for e in range(srclen): + # for foreign word f = 0 ... fn + for f in range(trglen): + # if ( e aligned with f) + if (e, f) in alignment: + # for each neighboring point (e-new, f-new) + for neighbor in neighbors: + neighbor = tuple(i + j for i, j in zip((e, f), neighbor)) + e_new, f_new = neighbor + # if ( ( e-new not aligned and f-new not aligned) + # and (e-new, f-new in union(e2f, f2e) ) + if ( + e_new not in aligned and f_new not in aligned + ) and neighbor in union: + alignment.add(neighbor) + aligned["e"].add(e_new) + aligned["f"].add(f_new) + prev_len += 1 + no_new_points = False + # iterate until no new points added + if no_new_points: + break + + def final_and(a): + """ + Adds remaining points that are not in the intersection, not in the + neighboring alignments but in the original *e2f* and *f2e* alignments + """ + # for english word e = 0 ... en + for e_new in range(srclen): + # for foreign word f = 0 ... fn + for f_new in range(trglen): + # if ( ( e-new not aligned and f-new not aligned) + # and (e-new, f-new in union(e2f, f2e) ) + if ( + e_new not in aligned + and f_new not in aligned + and (e_new, f_new) in union + ): + alignment.add((e_new, f_new)) + aligned["e"].add(e_new) + aligned["f"].add(f_new) + + grow_diag() + final_and(e2f) + final_and(f2e) + return sorted(alignment) diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/translate/gleu_score.py b/env-llmeval/lib/python3.10/site-packages/nltk/translate/gleu_score.py new file mode 100644 index 0000000000000000000000000000000000000000..81932a73fb5bdd34e539dfd9d1b46f179fc26558 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/translate/gleu_score.py @@ -0,0 +1,190 @@ +# Natural Language Toolkit: GLEU Score +# +# Copyright (C) 2001-2023 NLTK Project +# Authors: +# Contributors: Mike Schuster, Michael Wayne Goodman, Liling Tan +# URL: +# For license information, see LICENSE.TXT + +""" GLEU score implementation. """ + +from collections import Counter + +from nltk.util import everygrams, ngrams + + +def sentence_gleu(references, hypothesis, min_len=1, max_len=4): + """ + Calculates the sentence level GLEU (Google-BLEU) score described in + + Yonghui Wu, Mike Schuster, Zhifeng Chen, Quoc V. Le, Mohammad Norouzi, + Wolfgang Macherey, Maxim Krikun, Yuan Cao, Qin Gao, Klaus Macherey, + Jeff Klingner, Apurva Shah, Melvin Johnson, Xiaobing Liu, Lukasz Kaiser, + Stephan Gouws, Yoshikiyo Kato, Taku Kudo, Hideto Kazawa, Keith Stevens, + George Kurian, Nishant Patil, Wei Wang, Cliff Young, Jason Smith, + Jason Riesa, Alex Rudnick, Oriol Vinyals, Greg Corrado, Macduff Hughes, + Jeffrey Dean. (2016) Google’s Neural Machine Translation System: + Bridging the Gap between Human and Machine Translation. + eprint arXiv:1609.08144. https://arxiv.org/pdf/1609.08144v2.pdf + Retrieved on 27 Oct 2016. + + From Wu et al. (2016): + "The BLEU score has some undesirable properties when used for single + sentences, as it was designed to be a corpus measure. We therefore + use a slightly different score for our RL experiments which we call + the 'GLEU score'. For the GLEU score, we record all sub-sequences of + 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then + compute a recall, which is the ratio of the number of matching n-grams + to the number of total n-grams in the target (ground truth) sequence, + and a precision, which is the ratio of the number of matching n-grams + to the number of total n-grams in the generated output sequence. Then + GLEU score is simply the minimum of recall and precision. This GLEU + score's range is always between 0 (no matches) and 1 (all match) and + it is symmetrical when switching output and target. According to + our experiments, GLEU score correlates quite well with the BLEU + metric on a corpus level but does not have its drawbacks for our per + sentence reward objective." + + Note: The initial implementation only allowed a single reference, but now + a list of references is required (which is consistent with + bleu_score.sentence_bleu()). + + The infamous "the the the ... " example + + >>> ref = 'the cat is on the mat'.split() + >>> hyp = 'the the the the the the the'.split() + >>> sentence_gleu([ref], hyp) # doctest: +ELLIPSIS + 0.0909... + + An example to evaluate normal machine translation outputs + + >>> ref1 = str('It is a guide to action that ensures that the military ' + ... 'will forever heed Party commands').split() + >>> hyp1 = str('It is a guide to action which ensures that the military ' + ... 'always obeys the commands of the party').split() + >>> hyp2 = str('It is to insure the troops forever hearing the activity ' + ... 'guidebook that party direct').split() + >>> sentence_gleu([ref1], hyp1) # doctest: +ELLIPSIS + 0.4393... + >>> sentence_gleu([ref1], hyp2) # doctest: +ELLIPSIS + 0.1206... + + :param references: a list of reference sentences + :type references: list(list(str)) + :param hypothesis: a hypothesis sentence + :type hypothesis: list(str) + :param min_len: The minimum order of n-gram this function should extract. + :type min_len: int + :param max_len: The maximum order of n-gram this function should extract. + :type max_len: int + :return: the sentence level GLEU score. + :rtype: float + """ + return corpus_gleu([references], [hypothesis], min_len=min_len, max_len=max_len) + + +def corpus_gleu(list_of_references, hypotheses, min_len=1, max_len=4): + """ + Calculate a single corpus-level GLEU score (aka. system-level GLEU) for all + the hypotheses and their respective references. + + Instead of averaging the sentence level GLEU scores (i.e. macro-average + precision), Wu et al. (2016) sum up the matching tokens and the max of + hypothesis and reference tokens for each sentence, then compute using the + aggregate values. + + From Mike Schuster (via email): + "For the corpus, we just add up the two statistics n_match and + n_all = max(n_all_output, n_all_target) for all sentences, then + calculate gleu_score = n_match / n_all, so it is not just a mean of + the sentence gleu scores (in our case, longer sentences count more, + which I think makes sense as they are more difficult to translate)." + + >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', + ... 'ensures', 'that', 'the', 'military', 'always', + ... 'obeys', 'the', 'commands', 'of', 'the', 'party'] + >>> ref1a = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', + ... 'ensures', 'that', 'the', 'military', 'will', 'forever', + ... 'heed', 'Party', 'commands'] + >>> ref1b = ['It', 'is', 'the', 'guiding', 'principle', 'which', + ... 'guarantees', 'the', 'military', 'forces', 'always', + ... 'being', 'under', 'the', 'command', 'of', 'the', 'Party'] + >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', + ... 'army', 'always', 'to', 'heed', 'the', 'directions', + ... 'of', 'the', 'party'] + + >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', + ... 'interested', 'in', 'world', 'history'] + >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', + ... 'because', 'he', 'read', 'the', 'book'] + + >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] + >>> hypotheses = [hyp1, hyp2] + >>> corpus_gleu(list_of_references, hypotheses) # doctest: +ELLIPSIS + 0.5673... + + The example below show that corpus_gleu() is different from averaging + sentence_gleu() for hypotheses + + >>> score1 = sentence_gleu([ref1a], hyp1) + >>> score2 = sentence_gleu([ref2a], hyp2) + >>> (score1 + score2) / 2 # doctest: +ELLIPSIS + 0.6144... + + :param list_of_references: a list of reference sentences, w.r.t. hypotheses + :type list_of_references: list(list(list(str))) + :param hypotheses: a list of hypothesis sentences + :type hypotheses: list(list(str)) + :param min_len: The minimum order of n-gram this function should extract. + :type min_len: int + :param max_len: The maximum order of n-gram this function should extract. + :type max_len: int + :return: The corpus-level GLEU score. + :rtype: float + """ + # sanity check + assert len(list_of_references) == len( + hypotheses + ), "The number of hypotheses and their reference(s) should be the same" + + # sum matches and max-token-lengths over all sentences + corpus_n_match = 0 + corpus_n_all = 0 + + for references, hypothesis in zip(list_of_references, hypotheses): + hyp_ngrams = Counter(everygrams(hypothesis, min_len, max_len)) + tpfp = sum(hyp_ngrams.values()) # True positives + False positives. + + hyp_counts = [] + for reference in references: + ref_ngrams = Counter(everygrams(reference, min_len, max_len)) + tpfn = sum(ref_ngrams.values()) # True positives + False negatives. + + overlap_ngrams = ref_ngrams & hyp_ngrams + tp = sum(overlap_ngrams.values()) # True positives. + + # While GLEU is defined as the minimum of precision and + # recall, we can reduce the number of division operations by one by + # instead finding the maximum of the denominators for the precision + # and recall formulae, since the numerators are the same: + # precision = tp / tpfp + # recall = tp / tpfn + # gleu_score = min(precision, recall) == tp / max(tpfp, tpfn) + n_all = max(tpfp, tpfn) + + if n_all > 0: + hyp_counts.append((tp, n_all)) + + # use the reference yielding the highest score + if hyp_counts: + n_match, n_all = max(hyp_counts, key=lambda hc: hc[0] / hc[1]) + corpus_n_match += n_match + corpus_n_all += n_all + + # corner case: empty corpus or empty references---don't divide by zero! + if corpus_n_all == 0: + gleu_score = 0.0 + else: + gleu_score = corpus_n_match / corpus_n_all + + return gleu_score diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/translate/ibm2.py b/env-llmeval/lib/python3.10/site-packages/nltk/translate/ibm2.py new file mode 100644 index 0000000000000000000000000000000000000000..0b3ff375f045f4a809778ea8d3221e6b62e5e2ad --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/translate/ibm2.py @@ -0,0 +1,319 @@ +# Natural Language Toolkit: IBM Model 2 +# +# Copyright (C) 2001-2013 NLTK Project +# Authors: Chin Yee Lee, Hengfeng Li, Ruxin Hou, Calvin Tanujaya Lim +# URL: +# For license information, see LICENSE.TXT + +""" +Lexical translation model that considers word order. + +IBM Model 2 improves on Model 1 by accounting for word order. +An alignment probability is introduced, a(i | j,l,m), which predicts +a source word position, given its aligned target word's position. + +The EM algorithm used in Model 2 is: + +:E step: In the training data, collect counts, weighted by prior + probabilities. + + - (a) count how many times a source language word is translated + into a target language word + - (b) count how many times a particular position in the source + sentence is aligned to a particular position in the target + sentence + +:M step: Estimate new probabilities based on the counts from the E step + +Notations +--------- + +:i: Position in the source sentence + Valid values are 0 (for NULL), 1, 2, ..., length of source sentence +:j: Position in the target sentence + Valid values are 1, 2, ..., length of target sentence +:l: Number of words in the source sentence, excluding NULL +:m: Number of words in the target sentence +:s: A word in the source language +:t: A word in the target language + +References +---------- + +Philipp Koehn. 2010. Statistical Machine Translation. +Cambridge University Press, New York. + +Peter E Brown, Stephen A. Della Pietra, Vincent J. Della Pietra, and +Robert L. Mercer. 1993. The Mathematics of Statistical Machine +Translation: Parameter Estimation. Computational Linguistics, 19 (2), +263-311. +""" + +import warnings +from collections import defaultdict + +from nltk.translate import AlignedSent, Alignment, IBMModel, IBMModel1 +from nltk.translate.ibm_model import Counts + + +class IBMModel2(IBMModel): + """ + Lexical translation model that considers word order + + >>> bitext = [] + >>> bitext.append(AlignedSent(['klein', 'ist', 'das', 'haus'], ['the', 'house', 'is', 'small'])) + >>> bitext.append(AlignedSent(['das', 'haus', 'ist', 'ja', 'groß'], ['the', 'house', 'is', 'big'])) + >>> bitext.append(AlignedSent(['das', 'buch', 'ist', 'ja', 'klein'], ['the', 'book', 'is', 'small'])) + >>> bitext.append(AlignedSent(['das', 'haus'], ['the', 'house'])) + >>> bitext.append(AlignedSent(['das', 'buch'], ['the', 'book'])) + >>> bitext.append(AlignedSent(['ein', 'buch'], ['a', 'book'])) + + >>> ibm2 = IBMModel2(bitext, 5) + + >>> print(round(ibm2.translation_table['buch']['book'], 3)) + 1.0 + >>> print(round(ibm2.translation_table['das']['book'], 3)) + 0.0 + >>> print(round(ibm2.translation_table['buch'][None], 3)) + 0.0 + >>> print(round(ibm2.translation_table['ja'][None], 3)) + 0.0 + + >>> print(round(ibm2.alignment_table[1][1][2][2], 3)) + 0.939 + >>> print(round(ibm2.alignment_table[1][2][2][2], 3)) + 0.0 + >>> print(round(ibm2.alignment_table[2][2][4][5], 3)) + 1.0 + + >>> test_sentence = bitext[2] + >>> test_sentence.words + ['das', 'buch', 'ist', 'ja', 'klein'] + >>> test_sentence.mots + ['the', 'book', 'is', 'small'] + >>> test_sentence.alignment + Alignment([(0, 0), (1, 1), (2, 2), (3, 2), (4, 3)]) + + """ + + def __init__(self, sentence_aligned_corpus, iterations, probability_tables=None): + """ + Train on ``sentence_aligned_corpus`` and create a lexical + translation model and an alignment model. + + Translation direction is from ``AlignedSent.mots`` to + ``AlignedSent.words``. + + :param sentence_aligned_corpus: Sentence-aligned parallel corpus + :type sentence_aligned_corpus: list(AlignedSent) + + :param iterations: Number of iterations to run training algorithm + :type iterations: int + + :param probability_tables: Optional. Use this to pass in custom + probability values. If not specified, probabilities will be + set to a uniform distribution, or some other sensible value. + If specified, all the following entries must be present: + ``translation_table``, ``alignment_table``. + See ``IBMModel`` for the type and purpose of these tables. + :type probability_tables: dict[str]: object + """ + super().__init__(sentence_aligned_corpus) + + if probability_tables is None: + # Get translation probabilities from IBM Model 1 + # Run more iterations of training for Model 1, since it is + # faster than Model 2 + ibm1 = IBMModel1(sentence_aligned_corpus, 2 * iterations) + self.translation_table = ibm1.translation_table + self.set_uniform_probabilities(sentence_aligned_corpus) + else: + # Set user-defined probabilities + self.translation_table = probability_tables["translation_table"] + self.alignment_table = probability_tables["alignment_table"] + + for n in range(0, iterations): + self.train(sentence_aligned_corpus) + + self.align_all(sentence_aligned_corpus) + + def set_uniform_probabilities(self, sentence_aligned_corpus): + # a(i | j,l,m) = 1 / (l+1) for all i, j, l, m + l_m_combinations = set() + for aligned_sentence in sentence_aligned_corpus: + l = len(aligned_sentence.mots) + m = len(aligned_sentence.words) + if (l, m) not in l_m_combinations: + l_m_combinations.add((l, m)) + initial_prob = 1 / (l + 1) + if initial_prob < IBMModel.MIN_PROB: + warnings.warn( + "A source sentence is too long (" + + str(l) + + " words). Results may be less accurate." + ) + + for i in range(0, l + 1): + for j in range(1, m + 1): + self.alignment_table[i][j][l][m] = initial_prob + + def train(self, parallel_corpus): + counts = Model2Counts() + for aligned_sentence in parallel_corpus: + src_sentence = [None] + aligned_sentence.mots + trg_sentence = ["UNUSED"] + aligned_sentence.words # 1-indexed + l = len(aligned_sentence.mots) + m = len(aligned_sentence.words) + + # E step (a): Compute normalization factors to weigh counts + total_count = self.prob_all_alignments(src_sentence, trg_sentence) + + # E step (b): Collect counts + for j in range(1, m + 1): + t = trg_sentence[j] + for i in range(0, l + 1): + s = src_sentence[i] + count = self.prob_alignment_point(i, j, src_sentence, trg_sentence) + normalized_count = count / total_count[t] + + counts.update_lexical_translation(normalized_count, s, t) + counts.update_alignment(normalized_count, i, j, l, m) + + # M step: Update probabilities with maximum likelihood estimates + self.maximize_lexical_translation_probabilities(counts) + self.maximize_alignment_probabilities(counts) + + def maximize_alignment_probabilities(self, counts): + MIN_PROB = IBMModel.MIN_PROB + for i, j_s in counts.alignment.items(): + for j, src_sentence_lengths in j_s.items(): + for l, trg_sentence_lengths in src_sentence_lengths.items(): + for m in trg_sentence_lengths: + estimate = ( + counts.alignment[i][j][l][m] + / counts.alignment_for_any_i[j][l][m] + ) + self.alignment_table[i][j][l][m] = max(estimate, MIN_PROB) + + def prob_all_alignments(self, src_sentence, trg_sentence): + """ + Computes the probability of all possible word alignments, + expressed as a marginal distribution over target words t + + Each entry in the return value represents the contribution to + the total alignment probability by the target word t. + + To obtain probability(alignment | src_sentence, trg_sentence), + simply sum the entries in the return value. + + :return: Probability of t for all s in ``src_sentence`` + :rtype: dict(str): float + """ + alignment_prob_for_t = defaultdict(lambda: 0.0) + for j in range(1, len(trg_sentence)): + t = trg_sentence[j] + for i in range(0, len(src_sentence)): + alignment_prob_for_t[t] += self.prob_alignment_point( + i, j, src_sentence, trg_sentence + ) + return alignment_prob_for_t + + def prob_alignment_point(self, i, j, src_sentence, trg_sentence): + """ + Probability that position j in ``trg_sentence`` is aligned to + position i in the ``src_sentence`` + """ + l = len(src_sentence) - 1 + m = len(trg_sentence) - 1 + s = src_sentence[i] + t = trg_sentence[j] + return self.translation_table[t][s] * self.alignment_table[i][j][l][m] + + def prob_t_a_given_s(self, alignment_info): + """ + Probability of target sentence and an alignment given the + source sentence + """ + prob = 1.0 + l = len(alignment_info.src_sentence) - 1 + m = len(alignment_info.trg_sentence) - 1 + + for j, i in enumerate(alignment_info.alignment): + if j == 0: + continue # skip the dummy zeroeth element + trg_word = alignment_info.trg_sentence[j] + src_word = alignment_info.src_sentence[i] + prob *= ( + self.translation_table[trg_word][src_word] + * self.alignment_table[i][j][l][m] + ) + + return max(prob, IBMModel.MIN_PROB) + + def align_all(self, parallel_corpus): + for sentence_pair in parallel_corpus: + self.align(sentence_pair) + + def align(self, sentence_pair): + """ + Determines the best word alignment for one sentence pair from + the corpus that the model was trained on. + + The best alignment will be set in ``sentence_pair`` when the + method returns. In contrast with the internal implementation of + IBM models, the word indices in the ``Alignment`` are zero- + indexed, not one-indexed. + + :param sentence_pair: A sentence in the source language and its + counterpart sentence in the target language + :type sentence_pair: AlignedSent + """ + best_alignment = [] + + l = len(sentence_pair.mots) + m = len(sentence_pair.words) + + for j, trg_word in enumerate(sentence_pair.words): + # Initialize trg_word to align with the NULL token + best_prob = ( + self.translation_table[trg_word][None] + * self.alignment_table[0][j + 1][l][m] + ) + best_prob = max(best_prob, IBMModel.MIN_PROB) + best_alignment_point = None + for i, src_word in enumerate(sentence_pair.mots): + align_prob = ( + self.translation_table[trg_word][src_word] + * self.alignment_table[i + 1][j + 1][l][m] + ) + if align_prob >= best_prob: + best_prob = align_prob + best_alignment_point = i + + best_alignment.append((j, best_alignment_point)) + + sentence_pair.alignment = Alignment(best_alignment) + + +class Model2Counts(Counts): + """ + Data object to store counts of various parameters during training. + Includes counts for alignment. + """ + + def __init__(self): + super().__init__() + self.alignment = defaultdict( + lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: 0.0))) + ) + self.alignment_for_any_i = defaultdict( + lambda: defaultdict(lambda: defaultdict(lambda: 0.0)) + ) + + def update_lexical_translation(self, count, s, t): + self.t_given_s[t][s] += count + self.any_t_given_s[s] += count + + def update_alignment(self, count, i, j, l, m): + self.alignment[i][j][l][m] += count + self.alignment_for_any_i[j][l][m] += count diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/translate/ibm5.py b/env-llmeval/lib/python3.10/site-packages/nltk/translate/ibm5.py new file mode 100644 index 0000000000000000000000000000000000000000..98ed2ec0aec4535fd6b4e18abbf8ecd8f696a9e6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/translate/ibm5.py @@ -0,0 +1,663 @@ +# Natural Language Toolkit: IBM Model 5 +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Tah Wei Hoon +# URL: +# For license information, see LICENSE.TXT + +""" +Translation model that keeps track of vacant positions in the target +sentence to decide where to place translated words. + +Translation can be viewed as a process where each word in the source +sentence is stepped through sequentially, generating translated words +for each source word. The target sentence can be viewed as being made +up of ``m`` empty slots initially, which gradually fill up as generated +words are placed in them. + +Models 3 and 4 use distortion probabilities to decide how to place +translated words. For simplicity, these models ignore the history of +which slots have already been occupied with translated words. +Consider the placement of the last translated word: there is only one +empty slot left in the target sentence, so the distortion probability +should be 1.0 for that position and 0.0 everywhere else. However, the +distortion probabilities for Models 3 and 4 are set up such that all +positions are under consideration. + +IBM Model 5 fixes this deficiency by accounting for occupied slots +during translation. It introduces the vacancy function v(j), the number +of vacancies up to, and including, position j in the target sentence. + +Terminology +----------- + +:Maximum vacancy: + The number of valid slots that a word can be placed in. + This is not necessarily the same as the number of vacant slots. + For example, if a tablet contains more than one word, the head word + cannot be placed at the last vacant slot because there will be no + space for the other words in the tablet. The number of valid slots + has to take into account the length of the tablet. + Non-head words cannot be placed before the head word, so vacancies + to the left of the head word are ignored. +:Vacancy difference: + For a head word: (v(j) - v(center of previous cept)) + Can be positive or negative. + For a non-head word: (v(j) - v(position of previously placed word)) + Always positive, because successive words in a tablet are assumed to + appear to the right of the previous word. + +Positioning of target words fall under three cases: + +1. Words generated by NULL are distributed uniformly +2. For a head word t, its position is modeled by the probability + v_head(dv | max_v,word_class_t(t)) +3. For a non-head word t, its position is modeled by the probability + v_non_head(dv | max_v,word_class_t(t)) + +dv and max_v are defined differently for head and non-head words. + +The EM algorithm used in Model 5 is: + +:E step: In the training data, collect counts, weighted by prior + probabilities. + + - (a) count how many times a source language word is translated + into a target language word + - (b) for a particular word class and maximum vacancy, count how + many times a head word and the previous cept's center have + a particular difference in number of vacancies + - (b) for a particular word class and maximum vacancy, count how + many times a non-head word and the previous target word + have a particular difference in number of vacancies + - (d) count how many times a source word is aligned to phi number + of target words + - (e) count how many times NULL is aligned to a target word + +:M step: Estimate new probabilities based on the counts from the E step + +Like Model 4, there are too many possible alignments to consider. Thus, +a hill climbing approach is used to sample good candidates. In addition, +pruning is used to weed out unlikely alignments based on Model 4 scores. + +Notations +--------- + +:i: Position in the source sentence + Valid values are 0 (for NULL), 1, 2, ..., length of source sentence +:j: Position in the target sentence + Valid values are 1, 2, ..., length of target sentence +:l: Number of words in the source sentence, excluding NULL +:m: Number of words in the target sentence +:s: A word in the source language +:t: A word in the target language +:phi: Fertility, the number of target words produced by a source word +:p1: Probability that a target word produced by a source word is + accompanied by another target word that is aligned to NULL +:p0: 1 - p1 +:max_v: Maximum vacancy +:dv: Vacancy difference, Δv + +The definition of v_head here differs from GIZA++, section 4.7 of +[Brown et al., 1993], and [Koehn, 2010]. In the latter cases, v_head is +v_head(v(j) | v(center of previous cept),max_v,word_class(t)). + +Here, we follow appendix B of [Brown et al., 1993] and combine v(j) with +v(center of previous cept) to obtain dv: +v_head(v(j) - v(center of previous cept) | max_v,word_class(t)). + +References +---------- + +Philipp Koehn. 2010. Statistical Machine Translation. +Cambridge University Press, New York. + +Peter E Brown, Stephen A. Della Pietra, Vincent J. Della Pietra, and +Robert L. Mercer. 1993. The Mathematics of Statistical Machine +Translation: Parameter Estimation. Computational Linguistics, 19 (2), +263-311. +""" + +import warnings +from collections import defaultdict +from math import factorial + +from nltk.translate import AlignedSent, Alignment, IBMModel, IBMModel4 +from nltk.translate.ibm_model import Counts, longest_target_sentence_length + + +class IBMModel5(IBMModel): + """ + Translation model that keeps track of vacant positions in the target + sentence to decide where to place translated words + + >>> bitext = [] + >>> bitext.append(AlignedSent(['klein', 'ist', 'das', 'haus'], ['the', 'house', 'is', 'small'])) + >>> bitext.append(AlignedSent(['das', 'haus', 'war', 'ja', 'groß'], ['the', 'house', 'was', 'big'])) + >>> bitext.append(AlignedSent(['das', 'buch', 'ist', 'ja', 'klein'], ['the', 'book', 'is', 'small'])) + >>> bitext.append(AlignedSent(['ein', 'haus', 'ist', 'klein'], ['a', 'house', 'is', 'small'])) + >>> bitext.append(AlignedSent(['das', 'haus'], ['the', 'house'])) + >>> bitext.append(AlignedSent(['das', 'buch'], ['the', 'book'])) + >>> bitext.append(AlignedSent(['ein', 'buch'], ['a', 'book'])) + >>> bitext.append(AlignedSent(['ich', 'fasse', 'das', 'buch', 'zusammen'], ['i', 'summarize', 'the', 'book'])) + >>> bitext.append(AlignedSent(['fasse', 'zusammen'], ['summarize'])) + >>> src_classes = {'the': 0, 'a': 0, 'small': 1, 'big': 1, 'house': 2, 'book': 2, 'is': 3, 'was': 3, 'i': 4, 'summarize': 5 } + >>> trg_classes = {'das': 0, 'ein': 0, 'haus': 1, 'buch': 1, 'klein': 2, 'groß': 2, 'ist': 3, 'war': 3, 'ja': 4, 'ich': 5, 'fasse': 6, 'zusammen': 6 } + + >>> ibm5 = IBMModel5(bitext, 5, src_classes, trg_classes) + + >>> print(round(ibm5.head_vacancy_table[1][1][1], 3)) + 1.0 + >>> print(round(ibm5.head_vacancy_table[2][1][1], 3)) + 0.0 + >>> print(round(ibm5.non_head_vacancy_table[3][3][6], 3)) + 1.0 + + >>> print(round(ibm5.fertility_table[2]['summarize'], 3)) + 1.0 + >>> print(round(ibm5.fertility_table[1]['book'], 3)) + 1.0 + + >>> print(round(ibm5.p1, 3)) + 0.033 + + >>> test_sentence = bitext[2] + >>> test_sentence.words + ['das', 'buch', 'ist', 'ja', 'klein'] + >>> test_sentence.mots + ['the', 'book', 'is', 'small'] + >>> test_sentence.alignment + Alignment([(0, 0), (1, 1), (2, 2), (3, None), (4, 3)]) + + """ + + MIN_SCORE_FACTOR = 0.2 + """ + Alignments with scores below this factor are pruned during sampling + """ + + def __init__( + self, + sentence_aligned_corpus, + iterations, + source_word_classes, + target_word_classes, + probability_tables=None, + ): + """ + Train on ``sentence_aligned_corpus`` and create a lexical + translation model, vacancy models, a fertility model, and a + model for generating NULL-aligned words. + + Translation direction is from ``AlignedSent.mots`` to + ``AlignedSent.words``. + + :param sentence_aligned_corpus: Sentence-aligned parallel corpus + :type sentence_aligned_corpus: list(AlignedSent) + + :param iterations: Number of iterations to run training algorithm + :type iterations: int + + :param source_word_classes: Lookup table that maps a source word + to its word class, the latter represented by an integer id + :type source_word_classes: dict[str]: int + + :param target_word_classes: Lookup table that maps a target word + to its word class, the latter represented by an integer id + :type target_word_classes: dict[str]: int + + :param probability_tables: Optional. Use this to pass in custom + probability values. If not specified, probabilities will be + set to a uniform distribution, or some other sensible value. + If specified, all the following entries must be present: + ``translation_table``, ``alignment_table``, + ``fertility_table``, ``p1``, ``head_distortion_table``, + ``non_head_distortion_table``, ``head_vacancy_table``, + ``non_head_vacancy_table``. See ``IBMModel``, ``IBMModel4``, + and ``IBMModel5`` for the type and purpose of these tables. + :type probability_tables: dict[str]: object + """ + super().__init__(sentence_aligned_corpus) + self.reset_probabilities() + self.src_classes = source_word_classes + self.trg_classes = target_word_classes + + if probability_tables is None: + # Get probabilities from IBM model 4 + ibm4 = IBMModel4( + sentence_aligned_corpus, + iterations, + source_word_classes, + target_word_classes, + ) + self.translation_table = ibm4.translation_table + self.alignment_table = ibm4.alignment_table + self.fertility_table = ibm4.fertility_table + self.p1 = ibm4.p1 + self.head_distortion_table = ibm4.head_distortion_table + self.non_head_distortion_table = ibm4.non_head_distortion_table + self.set_uniform_probabilities(sentence_aligned_corpus) + else: + # Set user-defined probabilities + self.translation_table = probability_tables["translation_table"] + self.alignment_table = probability_tables["alignment_table"] + self.fertility_table = probability_tables["fertility_table"] + self.p1 = probability_tables["p1"] + self.head_distortion_table = probability_tables["head_distortion_table"] + self.non_head_distortion_table = probability_tables[ + "non_head_distortion_table" + ] + self.head_vacancy_table = probability_tables["head_vacancy_table"] + self.non_head_vacancy_table = probability_tables["non_head_vacancy_table"] + + for n in range(0, iterations): + self.train(sentence_aligned_corpus) + + def reset_probabilities(self): + super().reset_probabilities() + self.head_vacancy_table = defaultdict( + lambda: defaultdict(lambda: defaultdict(lambda: self.MIN_PROB)) + ) + """ + dict[int][int][int]: float. Probability(vacancy difference | + number of remaining valid positions,target word class). + Values accessed as ``head_vacancy_table[dv][v_max][trg_class]``. + """ + + self.non_head_vacancy_table = defaultdict( + lambda: defaultdict(lambda: defaultdict(lambda: self.MIN_PROB)) + ) + """ + dict[int][int][int]: float. Probability(vacancy difference | + number of remaining valid positions,target word class). + Values accessed as ``non_head_vacancy_table[dv][v_max][trg_class]``. + """ + + def set_uniform_probabilities(self, sentence_aligned_corpus): + """ + Set vacancy probabilities uniformly to + 1 / cardinality of vacancy difference values + """ + max_m = longest_target_sentence_length(sentence_aligned_corpus) + + # The maximum vacancy difference occurs when a word is placed in + # the last available position m of the target sentence and the + # previous word position has no vacancies. + # The minimum is 1-max_v, when a word is placed in the first + # available position and the previous word is placed beyond the + # last available position. + # Thus, the number of possible vacancy difference values is + # (max_v) - (1-max_v) + 1 = 2 * max_v. + if max_m > 0 and (1 / (2 * max_m)) < IBMModel.MIN_PROB: + warnings.warn( + "A target sentence is too long (" + + str(max_m) + + " words). Results may be less accurate." + ) + + for max_v in range(1, max_m + 1): + for dv in range(1, max_m + 1): + initial_prob = 1 / (2 * max_v) + self.head_vacancy_table[dv][max_v] = defaultdict(lambda: initial_prob) + self.head_vacancy_table[-(dv - 1)][max_v] = defaultdict( + lambda: initial_prob + ) + self.non_head_vacancy_table[dv][max_v] = defaultdict( + lambda: initial_prob + ) + self.non_head_vacancy_table[-(dv - 1)][max_v] = defaultdict( + lambda: initial_prob + ) + + def train(self, parallel_corpus): + counts = Model5Counts() + for aligned_sentence in parallel_corpus: + l = len(aligned_sentence.mots) + m = len(aligned_sentence.words) + + # Sample the alignment space + sampled_alignments, best_alignment = self.sample(aligned_sentence) + # Record the most probable alignment + aligned_sentence.alignment = Alignment( + best_alignment.zero_indexed_alignment() + ) + + # E step (a): Compute normalization factors to weigh counts + total_count = self.prob_of_alignments(sampled_alignments) + + # E step (b): Collect counts + for alignment_info in sampled_alignments: + count = self.prob_t_a_given_s(alignment_info) + normalized_count = count / total_count + + for j in range(1, m + 1): + counts.update_lexical_translation( + normalized_count, alignment_info, j + ) + + slots = Slots(m) + for i in range(1, l + 1): + counts.update_vacancy( + normalized_count, alignment_info, i, self.trg_classes, slots + ) + + counts.update_null_generation(normalized_count, alignment_info) + counts.update_fertility(normalized_count, alignment_info) + + # M step: Update probabilities with maximum likelihood estimates + # If any probability is less than MIN_PROB, clamp it to MIN_PROB + existing_alignment_table = self.alignment_table + self.reset_probabilities() + self.alignment_table = existing_alignment_table # don't retrain + + self.maximize_lexical_translation_probabilities(counts) + self.maximize_vacancy_probabilities(counts) + self.maximize_fertility_probabilities(counts) + self.maximize_null_generation_probabilities(counts) + + def sample(self, sentence_pair): + """ + Sample the most probable alignments from the entire alignment + space according to Model 4 + + Note that Model 4 scoring is used instead of Model 5 because the + latter is too expensive to compute. + + First, determine the best alignment according to IBM Model 2. + With this initial alignment, use hill climbing to determine the + best alignment according to a IBM Model 4. Add this + alignment and its neighbors to the sample set. Repeat this + process with other initial alignments obtained by pegging an + alignment point. Finally, prune alignments that have + substantially lower Model 4 scores than the best alignment. + + :param sentence_pair: Source and target language sentence pair + to generate a sample of alignments from + :type sentence_pair: AlignedSent + + :return: A set of best alignments represented by their ``AlignmentInfo`` + and the best alignment of the set for convenience + :rtype: set(AlignmentInfo), AlignmentInfo + """ + sampled_alignments, best_alignment = super().sample(sentence_pair) + return self.prune(sampled_alignments), best_alignment + + def prune(self, alignment_infos): + """ + Removes alignments from ``alignment_infos`` that have + substantially lower Model 4 scores than the best alignment + + :return: Pruned alignments + :rtype: set(AlignmentInfo) + """ + alignments = [] + best_score = 0 + + for alignment_info in alignment_infos: + score = IBMModel4.model4_prob_t_a_given_s(alignment_info, self) + best_score = max(score, best_score) + alignments.append((alignment_info, score)) + + threshold = IBMModel5.MIN_SCORE_FACTOR * best_score + alignments = [a[0] for a in alignments if a[1] > threshold] + return set(alignments) + + def hillclimb(self, alignment_info, j_pegged=None): + """ + Starting from the alignment in ``alignment_info``, look at + neighboring alignments iteratively for the best one, according + to Model 4 + + Note that Model 4 scoring is used instead of Model 5 because the + latter is too expensive to compute. + + There is no guarantee that the best alignment in the alignment + space will be found, because the algorithm might be stuck in a + local maximum. + + :param j_pegged: If specified, the search will be constrained to + alignments where ``j_pegged`` remains unchanged + :type j_pegged: int + + :return: The best alignment found from hill climbing + :rtype: AlignmentInfo + """ + alignment = alignment_info # alias with shorter name + max_probability = IBMModel4.model4_prob_t_a_given_s(alignment, self) + + while True: + old_alignment = alignment + for neighbor_alignment in self.neighboring(alignment, j_pegged): + neighbor_probability = IBMModel4.model4_prob_t_a_given_s( + neighbor_alignment, self + ) + + if neighbor_probability > max_probability: + alignment = neighbor_alignment + max_probability = neighbor_probability + + if alignment == old_alignment: + # Until there are no better alignments + break + + alignment.score = max_probability + return alignment + + def prob_t_a_given_s(self, alignment_info): + """ + Probability of target sentence and an alignment given the + source sentence + """ + probability = 1.0 + MIN_PROB = IBMModel.MIN_PROB + slots = Slots(len(alignment_info.trg_sentence) - 1) + + def null_generation_term(): + # Binomial distribution: B(m - null_fertility, p1) + value = 1.0 + p1 = self.p1 + p0 = 1 - p1 + null_fertility = alignment_info.fertility_of_i(0) + m = len(alignment_info.trg_sentence) - 1 + value *= pow(p1, null_fertility) * pow(p0, m - 2 * null_fertility) + if value < MIN_PROB: + return MIN_PROB + + # Combination: (m - null_fertility) choose null_fertility + for i in range(1, null_fertility + 1): + value *= (m - null_fertility - i + 1) / i + return value + + def fertility_term(): + value = 1.0 + src_sentence = alignment_info.src_sentence + for i in range(1, len(src_sentence)): + fertility = alignment_info.fertility_of_i(i) + value *= ( + factorial(fertility) + * self.fertility_table[fertility][src_sentence[i]] + ) + if value < MIN_PROB: + return MIN_PROB + return value + + def lexical_translation_term(j): + t = alignment_info.trg_sentence[j] + i = alignment_info.alignment[j] + s = alignment_info.src_sentence[i] + return self.translation_table[t][s] + + def vacancy_term(i): + value = 1.0 + tablet = alignment_info.cepts[i] + tablet_length = len(tablet) + total_vacancies = slots.vacancies_at(len(slots)) + + # case 1: NULL-aligned words + if tablet_length == 0: + return value + + # case 2: head word + j = tablet[0] + previous_cept = alignment_info.previous_cept(j) + previous_center = alignment_info.center_of_cept(previous_cept) + dv = slots.vacancies_at(j) - slots.vacancies_at(previous_center) + max_v = total_vacancies - tablet_length + 1 + trg_class = self.trg_classes[alignment_info.trg_sentence[j]] + value *= self.head_vacancy_table[dv][max_v][trg_class] + slots.occupy(j) # mark position as occupied + total_vacancies -= 1 + if value < MIN_PROB: + return MIN_PROB + + # case 3: non-head words + for k in range(1, tablet_length): + previous_position = tablet[k - 1] + previous_vacancies = slots.vacancies_at(previous_position) + j = tablet[k] + dv = slots.vacancies_at(j) - previous_vacancies + max_v = total_vacancies - tablet_length + k + 1 - previous_vacancies + trg_class = self.trg_classes[alignment_info.trg_sentence[j]] + value *= self.non_head_vacancy_table[dv][max_v][trg_class] + slots.occupy(j) # mark position as occupied + total_vacancies -= 1 + if value < MIN_PROB: + return MIN_PROB + + return value + + # end nested functions + + # Abort computation whenever probability falls below MIN_PROB at + # any point, since MIN_PROB can be considered as zero + probability *= null_generation_term() + if probability < MIN_PROB: + return MIN_PROB + + probability *= fertility_term() + if probability < MIN_PROB: + return MIN_PROB + + for j in range(1, len(alignment_info.trg_sentence)): + probability *= lexical_translation_term(j) + if probability < MIN_PROB: + return MIN_PROB + + for i in range(1, len(alignment_info.src_sentence)): + probability *= vacancy_term(i) + if probability < MIN_PROB: + return MIN_PROB + + return probability + + def maximize_vacancy_probabilities(self, counts): + MIN_PROB = IBMModel.MIN_PROB + head_vacancy_table = self.head_vacancy_table + for dv, max_vs in counts.head_vacancy.items(): + for max_v, trg_classes in max_vs.items(): + for t_cls in trg_classes: + estimate = ( + counts.head_vacancy[dv][max_v][t_cls] + / counts.head_vacancy_for_any_dv[max_v][t_cls] + ) + head_vacancy_table[dv][max_v][t_cls] = max(estimate, MIN_PROB) + + non_head_vacancy_table = self.non_head_vacancy_table + for dv, max_vs in counts.non_head_vacancy.items(): + for max_v, trg_classes in max_vs.items(): + for t_cls in trg_classes: + estimate = ( + counts.non_head_vacancy[dv][max_v][t_cls] + / counts.non_head_vacancy_for_any_dv[max_v][t_cls] + ) + non_head_vacancy_table[dv][max_v][t_cls] = max(estimate, MIN_PROB) + + +class Model5Counts(Counts): + """ + Data object to store counts of various parameters during training. + Includes counts for vacancies. + """ + + def __init__(self): + super().__init__() + self.head_vacancy = defaultdict( + lambda: defaultdict(lambda: defaultdict(lambda: 0.0)) + ) + self.head_vacancy_for_any_dv = defaultdict(lambda: defaultdict(lambda: 0.0)) + self.non_head_vacancy = defaultdict( + lambda: defaultdict(lambda: defaultdict(lambda: 0.0)) + ) + self.non_head_vacancy_for_any_dv = defaultdict(lambda: defaultdict(lambda: 0.0)) + + def update_vacancy(self, count, alignment_info, i, trg_classes, slots): + """ + :param count: Value to add to the vacancy counts + :param alignment_info: Alignment under consideration + :param i: Source word position under consideration + :param trg_classes: Target word classes + :param slots: Vacancy states of the slots in the target sentence. + Output parameter that will be modified as new words are placed + in the target sentence. + """ + tablet = alignment_info.cepts[i] + tablet_length = len(tablet) + total_vacancies = slots.vacancies_at(len(slots)) + + # case 1: NULL aligned words + if tablet_length == 0: + return # ignore zero fertility words + + # case 2: head word + j = tablet[0] + previous_cept = alignment_info.previous_cept(j) + previous_center = alignment_info.center_of_cept(previous_cept) + dv = slots.vacancies_at(j) - slots.vacancies_at(previous_center) + max_v = total_vacancies - tablet_length + 1 + trg_class = trg_classes[alignment_info.trg_sentence[j]] + self.head_vacancy[dv][max_v][trg_class] += count + self.head_vacancy_for_any_dv[max_v][trg_class] += count + slots.occupy(j) # mark position as occupied + total_vacancies -= 1 + + # case 3: non-head words + for k in range(1, tablet_length): + previous_position = tablet[k - 1] + previous_vacancies = slots.vacancies_at(previous_position) + j = tablet[k] + dv = slots.vacancies_at(j) - previous_vacancies + max_v = total_vacancies - tablet_length + k + 1 - previous_vacancies + trg_class = trg_classes[alignment_info.trg_sentence[j]] + self.non_head_vacancy[dv][max_v][trg_class] += count + self.non_head_vacancy_for_any_dv[max_v][trg_class] += count + slots.occupy(j) # mark position as occupied + total_vacancies -= 1 + + +class Slots: + """ + Represents positions in a target sentence. Used to keep track of + which slot (position) is occupied. + """ + + def __init__(self, target_sentence_length): + self._slots = [False] * (target_sentence_length + 1) # 1-indexed + + def occupy(self, position): + """ + :return: Mark slot at ``position`` as occupied + """ + self._slots[position] = True + + def vacancies_at(self, position): + """ + :return: Number of vacant slots up to, and including, ``position`` + """ + vacancies = 0 + for k in range(1, position + 1): + if not self._slots[k]: + vacancies += 1 + return vacancies + + def __len__(self): + return len(self._slots) - 1 # exclude dummy zeroeth element diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/translate/metrics.py b/env-llmeval/lib/python3.10/site-packages/nltk/translate/metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..88444087f65395428c87a6c5d805c682958b6e55 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/translate/metrics.py @@ -0,0 +1,41 @@ +# Natural Language Toolkit: Translation metrics +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Will Zhang +# Guan Gui +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + + +def alignment_error_rate(reference, hypothesis, possible=None): + """ + Return the Alignment Error Rate (AER) of an alignment + with respect to a "gold standard" reference alignment. + Return an error rate between 0.0 (perfect alignment) and 1.0 (no + alignment). + + >>> from nltk.translate import Alignment + >>> ref = Alignment([(0, 0), (1, 1), (2, 2)]) + >>> test = Alignment([(0, 0), (1, 2), (2, 1)]) + >>> alignment_error_rate(ref, test) # doctest: +ELLIPSIS + 0.6666666666666667 + + :type reference: Alignment + :param reference: A gold standard alignment (sure alignments) + :type hypothesis: Alignment + :param hypothesis: A hypothesis alignment (aka. candidate alignments) + :type possible: Alignment or None + :param possible: A gold standard reference of possible alignments + (defaults to *reference* if None) + :rtype: float or None + """ + + if possible is None: + possible = reference + else: + assert reference.issubset(possible) # sanity check + + return 1.0 - (len(hypothesis & reference) + len(hypothesis & possible)) / float( + len(hypothesis) + len(reference) + ) diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/translate/nist_score.py b/env-llmeval/lib/python3.10/site-packages/nltk/translate/nist_score.py new file mode 100644 index 0000000000000000000000000000000000000000..0035a9dcdae5f1acf703c2c957353f880db22615 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/translate/nist_score.py @@ -0,0 +1,195 @@ +# Natural Language Toolkit: NIST Score +# +# Copyright (C) 2001-2023 NLTK Project +# Authors: +# Contributors: +# URL: +# For license information, see LICENSE.TXT + +"""NIST score implementation.""" + +import fractions +import math +from collections import Counter + +from nltk.util import ngrams + + +def sentence_nist(references, hypothesis, n=5): + """ + Calculate NIST score from + George Doddington. 2002. "Automatic evaluation of machine translation quality + using n-gram co-occurrence statistics." Proceedings of HLT. + Morgan Kaufmann Publishers Inc. https://dl.acm.org/citation.cfm?id=1289189.1289273 + + DARPA commissioned NIST to develop an MT evaluation facility based on the BLEU + score. The official script used by NIST to compute BLEU and NIST score is + mteval-14.pl. The main differences are: + + - BLEU uses geometric mean of the ngram overlaps, NIST uses arithmetic mean. + - NIST has a different brevity penalty + - NIST score from mteval-14.pl has a self-contained tokenizer + + Note: The mteval-14.pl includes a smoothing function for BLEU score that is NOT + used in the NIST score computation. + + >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', + ... 'ensures', 'that', 'the', 'military', 'always', + ... 'obeys', 'the', 'commands', 'of', 'the', 'party'] + + >>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops', + ... 'forever', 'hearing', 'the', 'activity', 'guidebook', + ... 'that', 'party', 'direct'] + + >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', + ... 'ensures', 'that', 'the', 'military', 'will', 'forever', + ... 'heed', 'Party', 'commands'] + + >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which', + ... 'guarantees', 'the', 'military', 'forces', 'always', + ... 'being', 'under', 'the', 'command', 'of', 'the', + ... 'Party'] + + >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', + ... 'army', 'always', 'to', 'heed', 'the', 'directions', + ... 'of', 'the', 'party'] + + >>> sentence_nist([reference1, reference2, reference3], hypothesis1) # doctest: +ELLIPSIS + 3.3709... + + >>> sentence_nist([reference1, reference2, reference3], hypothesis2) # doctest: +ELLIPSIS + 1.4619... + + :param references: reference sentences + :type references: list(list(str)) + :param hypothesis: a hypothesis sentence + :type hypothesis: list(str) + :param n: highest n-gram order + :type n: int + """ + return corpus_nist([references], [hypothesis], n) + + +def corpus_nist(list_of_references, hypotheses, n=5): + """ + Calculate a single corpus-level NIST score (aka. system-level BLEU) for all + the hypotheses and their respective references. + + :param references: a corpus of lists of reference sentences, w.r.t. hypotheses + :type references: list(list(list(str))) + :param hypotheses: a list of hypothesis sentences + :type hypotheses: list(list(str)) + :param n: highest n-gram order + :type n: int + """ + # Before proceeding to compute NIST, perform sanity checks. + assert len(list_of_references) == len( + hypotheses + ), "The number of hypotheses and their reference(s) should be the same" + + # Collect the ngram coounts from the reference sentences. + ngram_freq = Counter() + total_reference_words = 0 + for ( + references + ) in list_of_references: # For each source sent, there's a list of reference sents. + for reference in references: + # For each order of ngram, count the ngram occurrences. + for i in range(1, n + 1): + ngram_freq.update(ngrams(reference, i)) + total_reference_words += len(reference) + + # Compute the information weights based on the reference sentences. + # Eqn 2 in Doddington (2002): + # Info(w_1 ... w_n) = log_2 [ (# of occurrences of w_1 ... w_n-1) / (# of occurrences of w_1 ... w_n) ] + information_weights = {} + for _ngram in ngram_freq: # w_1 ... w_n + _mgram = _ngram[:-1] # w_1 ... w_n-1 + # From https://github.com/moses-smt/mosesdecoder/blob/master/scripts/generic/mteval-v13a.pl#L546 + # it's computed as such: + # denominator = ngram_freq[_mgram] if _mgram and _mgram in ngram_freq else denominator = total_reference_words + # information_weights[_ngram] = -1 * math.log(ngram_freq[_ngram]/denominator) / math.log(2) + # + # Mathematically, it's equivalent to the our implementation: + if _mgram and _mgram in ngram_freq: + numerator = ngram_freq[_mgram] + else: + numerator = total_reference_words + information_weights[_ngram] = math.log(numerator / ngram_freq[_ngram], 2) + + # Micro-average. + nist_precision_numerator_per_ngram = Counter() + nist_precision_denominator_per_ngram = Counter() + l_ref, l_sys = 0, 0 + # For each order of ngram. + for i in range(1, n + 1): + # Iterate through each hypothesis and their corresponding references. + for references, hypothesis in zip(list_of_references, hypotheses): + hyp_len = len(hypothesis) + + # Find reference with the best NIST score. + nist_score_per_ref = [] + for reference in references: + _ref_len = len(reference) + # Counter of ngrams in hypothesis. + hyp_ngrams = ( + Counter(ngrams(hypothesis, i)) + if len(hypothesis) >= i + else Counter() + ) + ref_ngrams = ( + Counter(ngrams(reference, i)) if len(reference) >= i else Counter() + ) + ngram_overlaps = hyp_ngrams & ref_ngrams + # Precision part of the score in Eqn 3 + _numerator = sum( + information_weights[_ngram] * count + for _ngram, count in ngram_overlaps.items() + ) + _denominator = sum(hyp_ngrams.values()) + _precision = 0 if _denominator == 0 else _numerator / _denominator + nist_score_per_ref.append( + (_precision, _numerator, _denominator, _ref_len) + ) + # Best reference. + precision, numerator, denominator, ref_len = max(nist_score_per_ref) + nist_precision_numerator_per_ngram[i] += numerator + nist_precision_denominator_per_ngram[i] += denominator + l_ref += ref_len + l_sys += hyp_len + + # Final NIST micro-average mean aggregation. + nist_precision = 0 + for i in nist_precision_numerator_per_ngram: + precision = ( + nist_precision_numerator_per_ngram[i] + / nist_precision_denominator_per_ngram[i] + ) + nist_precision += precision + # Eqn 3 in Doddington(2002) + return nist_precision * nist_length_penalty(l_ref, l_sys) + + +def nist_length_penalty(ref_len, hyp_len): + """ + Calculates the NIST length penalty, from Eq. 3 in Doddington (2002) + + penalty = exp( beta * log( min( len(hyp)/len(ref) , 1.0 ))) + + where, + + `beta` is chosen to make the brevity penalty factor = 0.5 when the + no. of words in the system output (hyp) is 2/3 of the average + no. of words in the reference translation (ref) + + The NIST penalty is different from BLEU's such that it minimize the impact + of the score of small variations in the length of a translation. + See Fig. 4 in Doddington (2002) + """ + ratio = hyp_len / ref_len + if 0 < ratio < 1: + ratio_x, score_x = 1.5, 0.5 + beta = math.log(score_x) / math.log(ratio_x) ** 2 + return math.exp(beta * math.log(ratio) ** 2) + else: # ratio <= 0 or ratio >= 1 + return max(min(ratio, 1.0), 0.0) diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/translate/phrase_based.py b/env-llmeval/lib/python3.10/site-packages/nltk/translate/phrase_based.py new file mode 100644 index 0000000000000000000000000000000000000000..3fd85109ad26055023c502d6bd233a220d28e7e4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/translate/phrase_based.py @@ -0,0 +1,193 @@ +# Natural Language Toolkit: Phrase Extraction Algorithm +# +# Copyright (C) 2001-2023 NLTK Project +# Authors: Liling Tan, Fredrik Hedman, Petra Barancikova +# URL: +# For license information, see LICENSE.TXT + + +def extract( + f_start, + f_end, + e_start, + e_end, + alignment, + f_aligned, + srctext, + trgtext, + srclen, + trglen, + max_phrase_length, +): + """ + This function checks for alignment point consistency and extracts + phrases using the chunk of consistent phrases. + + A phrase pair (e, f ) is consistent with an alignment A if and only if: + + (i) No English words in the phrase pair are aligned to words outside it. + + ∀e i ∈ e, (e i , f j ) ∈ A ⇒ f j ∈ f + + (ii) No Foreign words in the phrase pair are aligned to words outside it. + + ∀f j ∈ f , (e i , f j ) ∈ A ⇒ e i ∈ e + + (iii) The phrase pair contains at least one alignment point. + + ∃e i ∈ e ̄ , f j ∈ f ̄ s.t. (e i , f j ) ∈ A + + :type f_start: int + :param f_start: Starting index of the possible foreign language phrases + :type f_end: int + :param f_end: End index of the possible foreign language phrases + :type e_start: int + :param e_start: Starting index of the possible source language phrases + :type e_end: int + :param e_end: End index of the possible source language phrases + :type srctext: list + :param srctext: The source language tokens, a list of string. + :type trgtext: list + :param trgtext: The target language tokens, a list of string. + :type srclen: int + :param srclen: The number of tokens in the source language tokens. + :type trglen: int + :param trglen: The number of tokens in the target language tokens. + """ + + if f_end < 0: # 0-based indexing. + return {} + # Check if alignment points are consistent. + for e, f in alignment: + if (f_start <= f <= f_end) and (e < e_start or e > e_end): + return {} + + # Add phrase pairs (incl. additional unaligned f) + phrases = set() + fs = f_start + while True: + fe = min(f_end, f_start + max_phrase_length - 1) + while True: + # add phrase pair ([e_start, e_end], [fs, fe]) to set E + # Need to +1 in range to include the end-point. + src_phrase = " ".join(srctext[e_start : e_end + 1]) + trg_phrase = " ".join(trgtext[fs : fe + 1]) + # Include more data for later ordering. + phrases.add(((e_start, e_end + 1), (fs, fe + 1), src_phrase, trg_phrase)) + fe += 1 + if fe in f_aligned or fe >= trglen: + break + fs -= 1 + if fs in f_aligned or fs < 0: + break + return phrases + + +def phrase_extraction(srctext, trgtext, alignment, max_phrase_length=0): + """ + Phrase extraction algorithm extracts all consistent phrase pairs from + a word-aligned sentence pair. + + The idea is to loop over all possible source language (e) phrases and find + the minimal foreign phrase (f) that matches each of them. Matching is done + by identifying all alignment points for the source phrase and finding the + shortest foreign phrase that includes all the foreign counterparts for the + source words. + + In short, a phrase alignment has to + (a) contain all alignment points for all covered words + (b) contain at least one alignment point + + >>> srctext = "michael assumes that he will stay in the house" + >>> trgtext = "michael geht davon aus , dass er im haus bleibt" + >>> alignment = [(0,0), (1,1), (1,2), (1,3), (2,5), (3,6), (4,9), + ... (5,9), (6,7), (7,7), (8,8)] + >>> phrases = phrase_extraction(srctext, trgtext, alignment) + >>> for i in sorted(phrases): + ... print(i) + ... + ((0, 1), (0, 1), 'michael', 'michael') + ((0, 2), (0, 4), 'michael assumes', 'michael geht davon aus') + ((0, 2), (0, 5), 'michael assumes', 'michael geht davon aus ,') + ((0, 3), (0, 6), 'michael assumes that', 'michael geht davon aus , dass') + ((0, 4), (0, 7), 'michael assumes that he', 'michael geht davon aus , dass er') + ((0, 9), (0, 10), 'michael assumes that he will stay in the house', 'michael geht davon aus , dass er im haus bleibt') + ((1, 2), (1, 4), 'assumes', 'geht davon aus') + ((1, 2), (1, 5), 'assumes', 'geht davon aus ,') + ((1, 3), (1, 6), 'assumes that', 'geht davon aus , dass') + ((1, 4), (1, 7), 'assumes that he', 'geht davon aus , dass er') + ((1, 9), (1, 10), 'assumes that he will stay in the house', 'geht davon aus , dass er im haus bleibt') + ((2, 3), (4, 6), 'that', ', dass') + ((2, 3), (5, 6), 'that', 'dass') + ((2, 4), (4, 7), 'that he', ', dass er') + ((2, 4), (5, 7), 'that he', 'dass er') + ((2, 9), (4, 10), 'that he will stay in the house', ', dass er im haus bleibt') + ((2, 9), (5, 10), 'that he will stay in the house', 'dass er im haus bleibt') + ((3, 4), (6, 7), 'he', 'er') + ((3, 9), (6, 10), 'he will stay in the house', 'er im haus bleibt') + ((4, 6), (9, 10), 'will stay', 'bleibt') + ((4, 9), (7, 10), 'will stay in the house', 'im haus bleibt') + ((6, 8), (7, 8), 'in the', 'im') + ((6, 9), (7, 9), 'in the house', 'im haus') + ((8, 9), (8, 9), 'house', 'haus') + + :type srctext: str + :param srctext: The sentence string from the source language. + :type trgtext: str + :param trgtext: The sentence string from the target language. + :type alignment: list(tuple) + :param alignment: The word alignment outputs as list of tuples, where + the first elements of tuples are the source words' indices and + second elements are the target words' indices. This is also the output + format of nltk.translate.ibm1 + :rtype: list(tuple) + :return: A list of tuples, each element in a list is a phrase and each + phrase is a tuple made up of (i) its source location, (ii) its target + location, (iii) the source phrase and (iii) the target phrase. The phrase + list of tuples represents all the possible phrases extracted from the + word alignments. + :type max_phrase_length: int + :param max_phrase_length: maximal phrase length, if 0 or not specified + it is set to a length of the longer sentence (srctext or trgtext). + """ + + srctext = srctext.split() # e + trgtext = trgtext.split() # f + srclen = len(srctext) # len(e) + trglen = len(trgtext) # len(f) + # Keeps an index of which source/target words that are aligned. + f_aligned = [j for _, j in alignment] + max_phrase_length = max_phrase_length or max(srclen, trglen) + + # set of phrase pairs BP + bp = set() + + for e_start in range(srclen): + max_idx = min(srclen, e_start + max_phrase_length) + for e_end in range(e_start, max_idx): + # // find the minimally matching foreign phrase + # (f start , f end ) = ( length(f), 0 ) + # f_start ∈ [0, len(f) - 1]; f_end ∈ [0, len(f) - 1] + f_start, f_end = trglen - 1, -1 # 0-based indexing + + for e, f in alignment: + if e_start <= e <= e_end: + f_start = min(f, f_start) + f_end = max(f, f_end) + # add extract (f start , f end , e start , e end ) to set BP + phrases = extract( + f_start, + f_end, + e_start, + e_end, + alignment, + f_aligned, + srctext, + trgtext, + srclen, + trglen, + max_phrase_length, + ) + if phrases: + bp.update(phrases) + return bp diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow_hotfix-0.6.dist-info/INSTALLER b/env-llmeval/lib/python3.10/site-packages/pyarrow_hotfix-0.6.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow_hotfix-0.6.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow_hotfix-0.6.dist-info/METADATA b/env-llmeval/lib/python3.10/site-packages/pyarrow_hotfix-0.6.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..8f391c9295b0f0738bb3fe673566174b47db7b20 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow_hotfix-0.6.dist-info/METADATA @@ -0,0 +1,115 @@ +Metadata-Version: 2.1 +Name: pyarrow-hotfix +Version: 0.6 +Project-URL: Documentation, https://github.com/pitrou/pyarrow-hotfix#readme +Project-URL: Issues, https://github.com/pitrou/pyarrow-hotfix/issues +Project-URL: Source, https://github.com/pitrou/pyarrow-hotfix +Author-email: Antoine Pitrou +License: Apache License, Version 2.0 +License-File: LICENSE.txt +Classifier: Development Status :: 4 - Beta +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Requires-Python: >=3.5 +Description-Content-Type: text/x-rst + +PyArrow Hotfix +============== + +.. image:: https://img.shields.io/pypi/v/pyarrow-hotfix.svg + :alt: pyarrow_hotfix package on PyPI + :target: https://pypi.org/project/pyarrow-hotfix + +.. image:: https://img.shields.io/pypi/pyversions/pyarrow-hotfix.svg + :alt: pyarrow_hotfix supported Python versions + :target: https://pypi.org/project/pyarrow-hotfix + +.. image:: https://github.com/pitrou/pyarrow-hotfix/actions/workflows/tests.yml/badge.svg + :alt: latest unit test results + :target: https://github.com/pitrou/pyarrow-hotfix/actions/workflows/tests.yml + + +Description +----------- + +This is a hotfix for the PyArrow security vulnerability +`CVE-2023-47248 `__. + +We generally recommend upgrading to PyArrow 14.0.1 or later, but if you +cannot upgrade, this package disables the vulnerability on older versions. + +Installation +------------ + +Use ``pip`` to install: + +.. code-block:: console + + pip install pyarrow_hotfix + +.. note:: + Both ``pyarrow-hotfix`` and ``pyarrow_hotfix`` are accepted and point to + the same package. + +Usage +----- + +``pyarrow_hotfix`` must be imported in your application or library code for +it to take effect: + +.. code-block:: python + + import pyarrow_hotfix + +Supported versions +------------------ + +``pyarrow_hotfix`` supports all Python versions starting from Python 3.5, +and all PyArrow versions starting from 0.14.0. + +Dependencies +------------ + +``pyarrow_hotfix`` is a pure Python package that does not have any explicit +dependencies, and assumes you have installed ``pyarrow`` through other means +(such as ``pip`` or ``conda``). + +Example +------- + +.. code-block:: pycon + + >>> import pyarrow as pa + >>> import pyarrow_hotfix + >>> + >>> pa.ipc.open_file('data.arrow') + Traceback (most recent call last): + [ ... ] + RuntimeError: forbidden deserialization of 'arrow.py_extension_type': storage_type = null, serialized = b"\x80\x03cbuiltins\neval\nq\x00X\x15\x00\x00\x00print('hello world!')q\x01\x85q\x02Rq\x03.", pickle disassembly: + 0: \x80 PROTO 3 + 2: c GLOBAL 'builtins eval' + 17: q BINPUT 0 + 19: X BINUNICODE "print('hello world!')" + 45: q BINPUT 1 + 47: \x85 TUPLE1 + 48: q BINPUT 2 + 50: R REDUCE + 51: q BINPUT 3 + 53: . STOP + highest protocol among opcodes = 2 + + +License +------- + +Like ``pyarrow``, ``pyarrow_hotfix`` is distributed under the terms of the +`Apache License, version 2.0 `_. diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow_hotfix-0.6.dist-info/RECORD b/env-llmeval/lib/python3.10/site-packages/pyarrow_hotfix-0.6.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..d556c7b53b7ed23cbe38b0060496e7a39fc6dcb5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow_hotfix-0.6.dist-info/RECORD @@ -0,0 +1,9 @@ +pyarrow_hotfix-0.6.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +pyarrow_hotfix-0.6.dist-info/METADATA,sha256=ZfkKYA2ae-Tzt8eLmiUZm4AyiIqYgfqKdw-BCk1jfao,3553 +pyarrow_hotfix-0.6.dist-info/RECORD,, +pyarrow_hotfix-0.6.dist-info/WHEEL,sha256=9QBuHhg6FNW7lppboF2vKVbCGTVzsFykgRQjjlajrhA,87 +pyarrow_hotfix-0.6.dist-info/licenses/LICENSE.txt,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358 +pyarrow_hotfix/__about__.py,sha256=vCzHOqAMvexTYGj1TtWWLK-FaFwXvvLLmvfVCpMqh54,136 +pyarrow_hotfix/__init__.py,sha256=7hf1tpfbJuFixx_fMYMcRfhsKZ9Yo3XTNmlyyoasBCw,3527 +pyarrow_hotfix/__pycache__/__about__.cpython-310.pyc,, +pyarrow_hotfix/__pycache__/__init__.cpython-310.pyc,, diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow_hotfix-0.6.dist-info/WHEEL b/env-llmeval/lib/python3.10/site-packages/pyarrow_hotfix-0.6.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..ba1a8af28bcccdacebb8c22dfda1537447a1a58a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow_hotfix-0.6.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.18.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow_hotfix-0.6.dist-info/licenses/LICENSE.txt b/env-llmeval/lib/python3.10/site-packages/pyarrow_hotfix-0.6.dist-info/licenses/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..d645695673349e3947e8e5ae42332d0ac3164cd7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow_hotfix-0.6.dist-info/licenses/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/env-llmeval/lib/python3.10/site-packages/setuptools/__init__.py b/env-llmeval/lib/python3.10/site-packages/setuptools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9d6f0bc0dd674e92a985a5f997b17039ade95217 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/setuptools/__init__.py @@ -0,0 +1,242 @@ +"""Extensions to the 'distutils' for large or complex distributions""" + +from fnmatch import fnmatchcase +import functools +import os +import re + +import _distutils_hack.override # noqa: F401 + +import distutils.core +from distutils.errors import DistutilsOptionError +from distutils.util import convert_path + +from ._deprecation_warning import SetuptoolsDeprecationWarning + +import setuptools.version +from setuptools.extension import Extension +from setuptools.dist import Distribution +from setuptools.depends import Require +from . import monkey + + +__all__ = [ + 'setup', + 'Distribution', + 'Command', + 'Extension', + 'Require', + 'SetuptoolsDeprecationWarning', + 'find_packages', + 'find_namespace_packages', +] + +__version__ = setuptools.version.__version__ + +bootstrap_install_from = None + + +class PackageFinder: + """ + Generate a list of all Python packages found within a directory + """ + + @classmethod + def find(cls, where='.', exclude=(), include=('*',)): + """Return a list all Python packages found within directory 'where' + + 'where' is the root directory which will be searched for packages. It + should be supplied as a "cross-platform" (i.e. URL-style) path; it will + be converted to the appropriate local path syntax. + + 'exclude' is a sequence of package names to exclude; '*' can be used + as a wildcard in the names, such that 'foo.*' will exclude all + subpackages of 'foo' (but not 'foo' itself). + + 'include' is a sequence of package names to include. If it's + specified, only the named packages will be included. If it's not + specified, all found packages will be included. 'include' can contain + shell style wildcard patterns just like 'exclude'. + """ + + return list( + cls._find_packages_iter( + convert_path(where), + cls._build_filter('ez_setup', '*__pycache__', *exclude), + cls._build_filter(*include), + ) + ) + + @classmethod + def _find_packages_iter(cls, where, exclude, include): + """ + All the packages found in 'where' that pass the 'include' filter, but + not the 'exclude' filter. + """ + for root, dirs, files in os.walk(where, followlinks=True): + # Copy dirs to iterate over it, then empty dirs. + all_dirs = dirs[:] + dirs[:] = [] + + for dir in all_dirs: + full_path = os.path.join(root, dir) + rel_path = os.path.relpath(full_path, where) + package = rel_path.replace(os.path.sep, '.') + + # Skip directory trees that are not valid packages + if '.' in dir or not cls._looks_like_package(full_path): + continue + + # Should this package be included? + if include(package) and not exclude(package): + yield package + + # Keep searching subdirectories, as there may be more packages + # down there, even if the parent was excluded. + dirs.append(dir) + + @staticmethod + def _looks_like_package(path): + """Does a directory look like a package?""" + return os.path.isfile(os.path.join(path, '__init__.py')) + + @staticmethod + def _build_filter(*patterns): + """ + Given a list of patterns, return a callable that will be true only if + the input matches at least one of the patterns. + """ + return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns) + + +class PEP420PackageFinder(PackageFinder): + @staticmethod + def _looks_like_package(path): + return True + + +find_packages = PackageFinder.find +find_namespace_packages = PEP420PackageFinder.find + + +def _install_setup_requires(attrs): + # Note: do not use `setuptools.Distribution` directly, as + # our PEP 517 backend patch `distutils.core.Distribution`. + class MinimalDistribution(distutils.core.Distribution): + """ + A minimal version of a distribution for supporting the + fetch_build_eggs interface. + """ + + def __init__(self, attrs): + _incl = 'dependency_links', 'setup_requires' + filtered = {k: attrs[k] for k in set(_incl) & set(attrs)} + distutils.core.Distribution.__init__(self, filtered) + + def finalize_options(self): + """ + Disable finalize_options to avoid building the working set. + Ref #2158. + """ + + dist = MinimalDistribution(attrs) + + # Honor setup.cfg's options. + dist.parse_config_files(ignore_option_errors=True) + if dist.setup_requires: + dist.fetch_build_eggs(dist.setup_requires) + + +def setup(**attrs): + # Make sure we have any requirements needed to interpret 'attrs'. + _install_setup_requires(attrs) + return distutils.core.setup(**attrs) + + +setup.__doc__ = distutils.core.setup.__doc__ + + +_Command = monkey.get_unpatched(distutils.core.Command) + + +class Command(_Command): + __doc__ = _Command.__doc__ + + command_consumes_arguments = False + + def __init__(self, dist, **kw): + """ + Construct the command for dist, updating + vars(self) with any keyword parameters. + """ + _Command.__init__(self, dist) + vars(self).update(kw) + + def _ensure_stringlike(self, option, what, default=None): + val = getattr(self, option) + if val is None: + setattr(self, option, default) + return default + elif not isinstance(val, str): + raise DistutilsOptionError( + "'%s' must be a %s (got `%s`)" % (option, what, val) + ) + return val + + def ensure_string_list(self, option): + r"""Ensure that 'option' is a list of strings. If 'option' is + currently a string, we split it either on /,\s*/ or /\s+/, so + "foo bar baz", "foo,bar,baz", and "foo, bar baz" all become + ["foo", "bar", "baz"]. + """ + val = getattr(self, option) + if val is None: + return + elif isinstance(val, str): + setattr(self, option, re.split(r',\s*|\s+', val)) + else: + if isinstance(val, list): + ok = all(isinstance(v, str) for v in val) + else: + ok = False + if not ok: + raise DistutilsOptionError( + "'%s' must be a list of strings (got %r)" % (option, val) + ) + + def reinitialize_command(self, command, reinit_subcommands=0, **kw): + cmd = _Command.reinitialize_command(self, command, reinit_subcommands) + vars(cmd).update(kw) + return cmd + + +def _find_all_simple(path): + """ + Find all files under 'path' + """ + results = ( + os.path.join(base, file) + for base, dirs, files in os.walk(path, followlinks=True) + for file in files + ) + return filter(os.path.isfile, results) + + +def findall(dir=os.curdir): + """ + Find all files under 'dir' and return the list of full filenames. + Unless dir is '.', return full filenames with dir prepended. + """ + files = _find_all_simple(dir) + if dir == os.curdir: + make_rel = functools.partial(os.path.relpath, start=dir) + files = map(make_rel, files) + return list(files) + + +class sic(str): + """Treat this string as-is (https://en.wikipedia.org/wiki/Sic)""" + + +# Apply monkey patches +monkey.patch_all() diff --git a/env-llmeval/lib/python3.10/site-packages/setuptools/_deprecation_warning.py b/env-llmeval/lib/python3.10/site-packages/setuptools/_deprecation_warning.py new file mode 100644 index 0000000000000000000000000000000000000000..086b64dd3817c0c1a194ffc1959eeffdd2695bef --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/setuptools/_deprecation_warning.py @@ -0,0 +1,7 @@ +class SetuptoolsDeprecationWarning(Warning): + """ + Base class for warning deprecations in ``setuptools`` + + This class is not derived from ``DeprecationWarning``, and as such is + visible by default. + """ diff --git a/env-llmeval/lib/python3.10/site-packages/setuptools/_distutils/version.py b/env-llmeval/lib/python3.10/site-packages/setuptools/_distutils/version.py new file mode 100644 index 0000000000000000000000000000000000000000..35e181dbb6dc23fa4ceb6c6b6a552f82aad038de --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/setuptools/_distutils/version.py @@ -0,0 +1,363 @@ +# +# distutils/version.py +# +# Implements multiple version numbering conventions for the +# Python Module Distribution Utilities. +# +# $Id$ +# + +"""Provides classes to represent module version numbers (one class for +each style of version numbering). There are currently two such classes +implemented: StrictVersion and LooseVersion. + +Every version number class implements the following interface: + * the 'parse' method takes a string and parses it to some internal + representation; if the string is an invalid version number, + 'parse' raises a ValueError exception + * the class constructor takes an optional string argument which, + if supplied, is passed to 'parse' + * __str__ reconstructs the string that was passed to 'parse' (or + an equivalent string -- ie. one that will generate an equivalent + version number instance) + * __repr__ generates Python code to recreate the version number instance + * _cmp compares the current instance with either another instance + of the same class or a string (which will be parsed to an instance + of the same class, thus must follow the same rules) +""" + +import re +import warnings +import contextlib + + +@contextlib.contextmanager +def suppress_known_deprecation(): + with warnings.catch_warnings(record=True) as ctx: + warnings.filterwarnings( + action='default', + category=DeprecationWarning, + message="distutils Version classes are deprecated.", + ) + yield ctx + + +class Version: + """Abstract base class for version numbering classes. Just provides + constructor (__init__) and reproducer (__repr__), because those + seem to be the same for all version numbering classes; and route + rich comparisons to _cmp. + """ + + def __init__ (self, vstring=None): + warnings.warn( + "distutils Version classes are deprecated. " + "Use packaging.version instead.", + DeprecationWarning, + stacklevel=2, + ) + if vstring: + self.parse(vstring) + + def __repr__ (self): + return "%s ('%s')" % (self.__class__.__name__, str(self)) + + def __eq__(self, other): + c = self._cmp(other) + if c is NotImplemented: + return c + return c == 0 + + def __lt__(self, other): + c = self._cmp(other) + if c is NotImplemented: + return c + return c < 0 + + def __le__(self, other): + c = self._cmp(other) + if c is NotImplemented: + return c + return c <= 0 + + def __gt__(self, other): + c = self._cmp(other) + if c is NotImplemented: + return c + return c > 0 + + def __ge__(self, other): + c = self._cmp(other) + if c is NotImplemented: + return c + return c >= 0 + + +# Interface for version-number classes -- must be implemented +# by the following classes (the concrete ones -- Version should +# be treated as an abstract class). +# __init__ (string) - create and take same action as 'parse' +# (string parameter is optional) +# parse (string) - convert a string representation to whatever +# internal representation is appropriate for +# this style of version numbering +# __str__ (self) - convert back to a string; should be very similar +# (if not identical to) the string supplied to parse +# __repr__ (self) - generate Python code to recreate +# the instance +# _cmp (self, other) - compare two version numbers ('other' may +# be an unparsed version string, or another +# instance of your version class) + + +class StrictVersion (Version): + + """Version numbering for anal retentives and software idealists. + Implements the standard interface for version number classes as + described above. A version number consists of two or three + dot-separated numeric components, with an optional "pre-release" tag + on the end. The pre-release tag consists of the letter 'a' or 'b' + followed by a number. If the numeric components of two version + numbers are equal, then one with a pre-release tag will always + be deemed earlier (lesser) than one without. + + The following are valid version numbers (shown in the order that + would be obtained by sorting according to the supplied cmp function): + + 0.4 0.4.0 (these two are equivalent) + 0.4.1 + 0.5a1 + 0.5b3 + 0.5 + 0.9.6 + 1.0 + 1.0.4a3 + 1.0.4b1 + 1.0.4 + + The following are examples of invalid version numbers: + + 1 + 2.7.2.2 + 1.3.a4 + 1.3pl1 + 1.3c4 + + The rationale for this version numbering system will be explained + in the distutils documentation. + """ + + version_re = re.compile(r'^(\d+) \. (\d+) (\. (\d+))? ([ab](\d+))?$', + re.VERBOSE | re.ASCII) + + + def parse (self, vstring): + match = self.version_re.match(vstring) + if not match: + raise ValueError("invalid version number '%s'" % vstring) + + (major, minor, patch, prerelease, prerelease_num) = \ + match.group(1, 2, 4, 5, 6) + + if patch: + self.version = tuple(map(int, [major, minor, patch])) + else: + self.version = tuple(map(int, [major, minor])) + (0,) + + if prerelease: + self.prerelease = (prerelease[0], int(prerelease_num)) + else: + self.prerelease = None + + + def __str__ (self): + + if self.version[2] == 0: + vstring = '.'.join(map(str, self.version[0:2])) + else: + vstring = '.'.join(map(str, self.version)) + + if self.prerelease: + vstring = vstring + self.prerelease[0] + str(self.prerelease[1]) + + return vstring + + + def _cmp (self, other): + if isinstance(other, str): + with suppress_known_deprecation(): + other = StrictVersion(other) + elif not isinstance(other, StrictVersion): + return NotImplemented + + if self.version != other.version: + # numeric versions don't match + # prerelease stuff doesn't matter + if self.version < other.version: + return -1 + else: + return 1 + + # have to compare prerelease + # case 1: neither has prerelease; they're equal + # case 2: self has prerelease, other doesn't; other is greater + # case 3: self doesn't have prerelease, other does: self is greater + # case 4: both have prerelease: must compare them! + + if (not self.prerelease and not other.prerelease): + return 0 + elif (self.prerelease and not other.prerelease): + return -1 + elif (not self.prerelease and other.prerelease): + return 1 + elif (self.prerelease and other.prerelease): + if self.prerelease == other.prerelease: + return 0 + elif self.prerelease < other.prerelease: + return -1 + else: + return 1 + else: + assert False, "never get here" + +# end class StrictVersion + + +# The rules according to Greg Stein: +# 1) a version number has 1 or more numbers separated by a period or by +# sequences of letters. If only periods, then these are compared +# left-to-right to determine an ordering. +# 2) sequences of letters are part of the tuple for comparison and are +# compared lexicographically +# 3) recognize the numeric components may have leading zeroes +# +# The LooseVersion class below implements these rules: a version number +# string is split up into a tuple of integer and string components, and +# comparison is a simple tuple comparison. This means that version +# numbers behave in a predictable and obvious way, but a way that might +# not necessarily be how people *want* version numbers to behave. There +# wouldn't be a problem if people could stick to purely numeric version +# numbers: just split on period and compare the numbers as tuples. +# However, people insist on putting letters into their version numbers; +# the most common purpose seems to be: +# - indicating a "pre-release" version +# ('alpha', 'beta', 'a', 'b', 'pre', 'p') +# - indicating a post-release patch ('p', 'pl', 'patch') +# but of course this can't cover all version number schemes, and there's +# no way to know what a programmer means without asking him. +# +# The problem is what to do with letters (and other non-numeric +# characters) in a version number. The current implementation does the +# obvious and predictable thing: keep them as strings and compare +# lexically within a tuple comparison. This has the desired effect if +# an appended letter sequence implies something "post-release": +# eg. "0.99" < "0.99pl14" < "1.0", and "5.001" < "5.001m" < "5.002". +# +# However, if letters in a version number imply a pre-release version, +# the "obvious" thing isn't correct. Eg. you would expect that +# "1.5.1" < "1.5.2a2" < "1.5.2", but under the tuple/lexical comparison +# implemented here, this just isn't so. +# +# Two possible solutions come to mind. The first is to tie the +# comparison algorithm to a particular set of semantic rules, as has +# been done in the StrictVersion class above. This works great as long +# as everyone can go along with bondage and discipline. Hopefully a +# (large) subset of Python module programmers will agree that the +# particular flavour of bondage and discipline provided by StrictVersion +# provides enough benefit to be worth using, and will submit their +# version numbering scheme to its domination. The free-thinking +# anarchists in the lot will never give in, though, and something needs +# to be done to accommodate them. +# +# Perhaps a "moderately strict" version class could be implemented that +# lets almost anything slide (syntactically), and makes some heuristic +# assumptions about non-digits in version number strings. This could +# sink into special-case-hell, though; if I was as talented and +# idiosyncratic as Larry Wall, I'd go ahead and implement a class that +# somehow knows that "1.2.1" < "1.2.2a2" < "1.2.2" < "1.2.2pl3", and is +# just as happy dealing with things like "2g6" and "1.13++". I don't +# think I'm smart enough to do it right though. +# +# In any case, I've coded the test suite for this module (see +# ../test/test_version.py) specifically to fail on things like comparing +# "1.2a2" and "1.2". That's not because the *code* is doing anything +# wrong, it's because the simple, obvious design doesn't match my +# complicated, hairy expectations for real-world version numbers. It +# would be a snap to fix the test suite to say, "Yep, LooseVersion does +# the Right Thing" (ie. the code matches the conception). But I'd rather +# have a conception that matches common notions about version numbers. + +class LooseVersion (Version): + + """Version numbering for anarchists and software realists. + Implements the standard interface for version number classes as + described above. A version number consists of a series of numbers, + separated by either periods or strings of letters. When comparing + version numbers, the numeric components will be compared + numerically, and the alphabetic components lexically. The following + are all valid version numbers, in no particular order: + + 1.5.1 + 1.5.2b2 + 161 + 3.10a + 8.02 + 3.4j + 1996.07.12 + 3.2.pl0 + 3.1.1.6 + 2g6 + 11g + 0.960923 + 2.2beta29 + 1.13++ + 5.5.kw + 2.0b1pl0 + + In fact, there is no such thing as an invalid version number under + this scheme; the rules for comparison are simple and predictable, + but may not always give the results you want (for some definition + of "want"). + """ + + component_re = re.compile(r'(\d+ | [a-z]+ | \.)', re.VERBOSE) + + def parse (self, vstring): + # I've given up on thinking I can reconstruct the version string + # from the parsed tuple -- so I just store the string here for + # use by __str__ + self.vstring = vstring + components = [x for x in self.component_re.split(vstring) + if x and x != '.'] + for i, obj in enumerate(components): + try: + components[i] = int(obj) + except ValueError: + pass + + self.version = components + + + def __str__ (self): + return self.vstring + + + def __repr__ (self): + return "LooseVersion ('%s')" % str(self) + + + def _cmp (self, other): + if isinstance(other, str): + other = LooseVersion(other) + elif not isinstance(other, LooseVersion): + return NotImplemented + + if self.version == other.version: + return 0 + if self.version < other.version: + return -1 + if self.version > other.version: + return 1 + + +# end class LooseVersion diff --git a/env-llmeval/lib/python3.10/site-packages/setuptools/archive_util.py b/env-llmeval/lib/python3.10/site-packages/setuptools/archive_util.py new file mode 100644 index 0000000000000000000000000000000000000000..0f70284822f50098e21ad439550cdbd4d298d011 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/setuptools/archive_util.py @@ -0,0 +1,205 @@ +"""Utilities for extracting common archive formats""" + +import zipfile +import tarfile +import os +import shutil +import posixpath +import contextlib +from distutils.errors import DistutilsError + +from pkg_resources import ensure_directory + +__all__ = [ + "unpack_archive", "unpack_zipfile", "unpack_tarfile", "default_filter", + "UnrecognizedFormat", "extraction_drivers", "unpack_directory", +] + + +class UnrecognizedFormat(DistutilsError): + """Couldn't recognize the archive type""" + + +def default_filter(src, dst): + """The default progress/filter callback; returns True for all files""" + return dst + + +def unpack_archive( + filename, extract_dir, progress_filter=default_filter, + drivers=None): + """Unpack `filename` to `extract_dir`, or raise ``UnrecognizedFormat`` + + `progress_filter` is a function taking two arguments: a source path + internal to the archive ('/'-separated), and a filesystem path where it + will be extracted. The callback must return the desired extract path + (which may be the same as the one passed in), or else ``None`` to skip + that file or directory. The callback can thus be used to report on the + progress of the extraction, as well as to filter the items extracted or + alter their extraction paths. + + `drivers`, if supplied, must be a non-empty sequence of functions with the + same signature as this function (minus the `drivers` argument), that raise + ``UnrecognizedFormat`` if they do not support extracting the designated + archive type. The `drivers` are tried in sequence until one is found that + does not raise an error, or until all are exhausted (in which case + ``UnrecognizedFormat`` is raised). If you do not supply a sequence of + drivers, the module's ``extraction_drivers`` constant will be used, which + means that ``unpack_zipfile`` and ``unpack_tarfile`` will be tried, in that + order. + """ + for driver in drivers or extraction_drivers: + try: + driver(filename, extract_dir, progress_filter) + except UnrecognizedFormat: + continue + else: + return + else: + raise UnrecognizedFormat( + "Not a recognized archive type: %s" % filename + ) + + +def unpack_directory(filename, extract_dir, progress_filter=default_filter): + """"Unpack" a directory, using the same interface as for archives + + Raises ``UnrecognizedFormat`` if `filename` is not a directory + """ + if not os.path.isdir(filename): + raise UnrecognizedFormat("%s is not a directory" % filename) + + paths = { + filename: ('', extract_dir), + } + for base, dirs, files in os.walk(filename): + src, dst = paths[base] + for d in dirs: + paths[os.path.join(base, d)] = src + d + '/', os.path.join(dst, d) + for f in files: + target = os.path.join(dst, f) + target = progress_filter(src + f, target) + if not target: + # skip non-files + continue + ensure_directory(target) + f = os.path.join(base, f) + shutil.copyfile(f, target) + shutil.copystat(f, target) + + +def unpack_zipfile(filename, extract_dir, progress_filter=default_filter): + """Unpack zip `filename` to `extract_dir` + + Raises ``UnrecognizedFormat`` if `filename` is not a zipfile (as determined + by ``zipfile.is_zipfile()``). See ``unpack_archive()`` for an explanation + of the `progress_filter` argument. + """ + + if not zipfile.is_zipfile(filename): + raise UnrecognizedFormat("%s is not a zip file" % (filename,)) + + with zipfile.ZipFile(filename) as z: + for info in z.infolist(): + name = info.filename + + # don't extract absolute paths or ones with .. in them + if name.startswith('/') or '..' in name.split('/'): + continue + + target = os.path.join(extract_dir, *name.split('/')) + target = progress_filter(name, target) + if not target: + continue + if name.endswith('/'): + # directory + ensure_directory(target) + else: + # file + ensure_directory(target) + data = z.read(info.filename) + with open(target, 'wb') as f: + f.write(data) + unix_attributes = info.external_attr >> 16 + if unix_attributes: + os.chmod(target, unix_attributes) + + +def _resolve_tar_file_or_dir(tar_obj, tar_member_obj): + """Resolve any links and extract link targets as normal files.""" + while tar_member_obj is not None and ( + tar_member_obj.islnk() or tar_member_obj.issym()): + linkpath = tar_member_obj.linkname + if tar_member_obj.issym(): + base = posixpath.dirname(tar_member_obj.name) + linkpath = posixpath.join(base, linkpath) + linkpath = posixpath.normpath(linkpath) + tar_member_obj = tar_obj._getmember(linkpath) + + is_file_or_dir = ( + tar_member_obj is not None and + (tar_member_obj.isfile() or tar_member_obj.isdir()) + ) + if is_file_or_dir: + return tar_member_obj + + raise LookupError('Got unknown file type') + + +def _iter_open_tar(tar_obj, extract_dir, progress_filter): + """Emit member-destination pairs from a tar archive.""" + # don't do any chowning! + tar_obj.chown = lambda *args: None + + with contextlib.closing(tar_obj): + for member in tar_obj: + name = member.name + # don't extract absolute paths or ones with .. in them + if name.startswith('/') or '..' in name.split('/'): + continue + + prelim_dst = os.path.join(extract_dir, *name.split('/')) + + try: + member = _resolve_tar_file_or_dir(tar_obj, member) + except LookupError: + continue + + final_dst = progress_filter(name, prelim_dst) + if not final_dst: + continue + + if final_dst.endswith(os.sep): + final_dst = final_dst[:-1] + + yield member, final_dst + + +def unpack_tarfile(filename, extract_dir, progress_filter=default_filter): + """Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir` + + Raises ``UnrecognizedFormat`` if `filename` is not a tarfile (as determined + by ``tarfile.open()``). See ``unpack_archive()`` for an explanation + of the `progress_filter` argument. + """ + try: + tarobj = tarfile.open(filename) + except tarfile.TarError as e: + raise UnrecognizedFormat( + "%s is not a compressed or uncompressed tar file" % (filename,) + ) from e + + for member, final_dst in _iter_open_tar( + tarobj, extract_dir, progress_filter, + ): + try: + # XXX Ugh + tarobj._extract_member(member, final_dst) + except tarfile.ExtractError: + # chown/chmod/mkfifo/mknode/makedev failed + pass + + return True + + +extraction_drivers = unpack_directory, unpack_zipfile, unpack_tarfile diff --git a/env-llmeval/lib/python3.10/site-packages/setuptools/build_meta.py b/env-llmeval/lib/python3.10/site-packages/setuptools/build_meta.py new file mode 100644 index 0000000000000000000000000000000000000000..d0ac613ba38c511f63e629096777544cfb8831c2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/setuptools/build_meta.py @@ -0,0 +1,290 @@ +"""A PEP 517 interface to setuptools + +Previously, when a user or a command line tool (let's call it a "frontend") +needed to make a request of setuptools to take a certain action, for +example, generating a list of installation requirements, the frontend would +would call "setup.py egg_info" or "setup.py bdist_wheel" on the command line. + +PEP 517 defines a different method of interfacing with setuptools. Rather +than calling "setup.py" directly, the frontend should: + + 1. Set the current directory to the directory with a setup.py file + 2. Import this module into a safe python interpreter (one in which + setuptools can potentially set global variables or crash hard). + 3. Call one of the functions defined in PEP 517. + +What each function does is defined in PEP 517. However, here is a "casual" +definition of the functions (this definition should not be relied on for +bug reports or API stability): + + - `build_wheel`: build a wheel in the folder and return the basename + - `get_requires_for_build_wheel`: get the `setup_requires` to build + - `prepare_metadata_for_build_wheel`: get the `install_requires` + - `build_sdist`: build an sdist in the folder and return the basename + - `get_requires_for_build_sdist`: get the `setup_requires` to build + +Again, this is not a formal definition! Just a "taste" of the module. +""" + +import io +import os +import sys +import tokenize +import shutil +import contextlib +import tempfile +import warnings + +import setuptools +import distutils + +from pkg_resources import parse_requirements + +__all__ = ['get_requires_for_build_sdist', + 'get_requires_for_build_wheel', + 'prepare_metadata_for_build_wheel', + 'build_wheel', + 'build_sdist', + '__legacy__', + 'SetupRequirementsError'] + + +class SetupRequirementsError(BaseException): + def __init__(self, specifiers): + self.specifiers = specifiers + + +class Distribution(setuptools.dist.Distribution): + def fetch_build_eggs(self, specifiers): + specifier_list = list(map(str, parse_requirements(specifiers))) + + raise SetupRequirementsError(specifier_list) + + @classmethod + @contextlib.contextmanager + def patch(cls): + """ + Replace + distutils.dist.Distribution with this class + for the duration of this context. + """ + orig = distutils.core.Distribution + distutils.core.Distribution = cls + try: + yield + finally: + distutils.core.Distribution = orig + + +@contextlib.contextmanager +def no_install_setup_requires(): + """Temporarily disable installing setup_requires + + Under PEP 517, the backend reports build dependencies to the frontend, + and the frontend is responsible for ensuring they're installed. + So setuptools (acting as a backend) should not try to install them. + """ + orig = setuptools._install_setup_requires + setuptools._install_setup_requires = lambda attrs: None + try: + yield + finally: + setuptools._install_setup_requires = orig + + +def _get_immediate_subdirectories(a_dir): + return [name for name in os.listdir(a_dir) + if os.path.isdir(os.path.join(a_dir, name))] + + +def _file_with_extension(directory, extension): + matching = ( + f for f in os.listdir(directory) + if f.endswith(extension) + ) + try: + file, = matching + except ValueError: + raise ValueError( + 'No distribution was found. Ensure that `setup.py` ' + 'is not empty and that it calls `setup()`.') + return file + + +def _open_setup_script(setup_script): + if not os.path.exists(setup_script): + # Supply a default setup.py + return io.StringIO(u"from setuptools import setup; setup()") + + return getattr(tokenize, 'open', open)(setup_script) + + +@contextlib.contextmanager +def suppress_known_deprecation(): + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', 'setup.py install is deprecated') + yield + + +class _BuildMetaBackend(object): + + def _fix_config(self, config_settings): + config_settings = config_settings or {} + config_settings.setdefault('--global-option', []) + return config_settings + + def _get_build_requires(self, config_settings, requirements): + config_settings = self._fix_config(config_settings) + + sys.argv = sys.argv[:1] + ['egg_info'] + \ + config_settings["--global-option"] + try: + with Distribution.patch(): + self.run_setup() + except SetupRequirementsError as e: + requirements += e.specifiers + + return requirements + + def run_setup(self, setup_script='setup.py'): + # Note that we can reuse our build directory between calls + # Correctness comes first, then optimization later + __file__ = setup_script + __name__ = '__main__' + + with _open_setup_script(__file__) as f: + code = f.read().replace(r'\r\n', r'\n') + + exec(compile(code, __file__, 'exec'), locals()) + + def get_requires_for_build_wheel(self, config_settings=None): + config_settings = self._fix_config(config_settings) + return self._get_build_requires( + config_settings, requirements=['wheel']) + + def get_requires_for_build_sdist(self, config_settings=None): + config_settings = self._fix_config(config_settings) + return self._get_build_requires(config_settings, requirements=[]) + + def prepare_metadata_for_build_wheel(self, metadata_directory, + config_settings=None): + sys.argv = sys.argv[:1] + [ + 'dist_info', '--egg-base', metadata_directory] + with no_install_setup_requires(): + self.run_setup() + + dist_info_directory = metadata_directory + while True: + dist_infos = [f for f in os.listdir(dist_info_directory) + if f.endswith('.dist-info')] + + if ( + len(dist_infos) == 0 and + len(_get_immediate_subdirectories(dist_info_directory)) == 1 + ): + + dist_info_directory = os.path.join( + dist_info_directory, os.listdir(dist_info_directory)[0]) + continue + + assert len(dist_infos) == 1 + break + + # PEP 517 requires that the .dist-info directory be placed in the + # metadata_directory. To comply, we MUST copy the directory to the root + if dist_info_directory != metadata_directory: + shutil.move( + os.path.join(dist_info_directory, dist_infos[0]), + metadata_directory) + shutil.rmtree(dist_info_directory, ignore_errors=True) + + return dist_infos[0] + + def _build_with_temp_dir(self, setup_command, result_extension, + result_directory, config_settings): + config_settings = self._fix_config(config_settings) + result_directory = os.path.abspath(result_directory) + + # Build in a temporary directory, then copy to the target. + os.makedirs(result_directory, exist_ok=True) + with tempfile.TemporaryDirectory(dir=result_directory) as tmp_dist_dir: + sys.argv = (sys.argv[:1] + setup_command + + ['--dist-dir', tmp_dist_dir] + + config_settings["--global-option"]) + with no_install_setup_requires(): + self.run_setup() + + result_basename = _file_with_extension( + tmp_dist_dir, result_extension) + result_path = os.path.join(result_directory, result_basename) + if os.path.exists(result_path): + # os.rename will fail overwriting on non-Unix. + os.remove(result_path) + os.rename(os.path.join(tmp_dist_dir, result_basename), result_path) + + return result_basename + + def build_wheel(self, wheel_directory, config_settings=None, + metadata_directory=None): + with suppress_known_deprecation(): + return self._build_with_temp_dir(['bdist_wheel'], '.whl', + wheel_directory, config_settings) + + def build_sdist(self, sdist_directory, config_settings=None): + return self._build_with_temp_dir(['sdist', '--formats', 'gztar'], + '.tar.gz', sdist_directory, + config_settings) + + +class _BuildMetaLegacyBackend(_BuildMetaBackend): + """Compatibility backend for setuptools + + This is a version of setuptools.build_meta that endeavors + to maintain backwards + compatibility with pre-PEP 517 modes of invocation. It + exists as a temporary + bridge between the old packaging mechanism and the new + packaging mechanism, + and will eventually be removed. + """ + def run_setup(self, setup_script='setup.py'): + # In order to maintain compatibility with scripts assuming that + # the setup.py script is in a directory on the PYTHONPATH, inject + # '' into sys.path. (pypa/setuptools#1642) + sys_path = list(sys.path) # Save the original path + + script_dir = os.path.dirname(os.path.abspath(setup_script)) + if script_dir not in sys.path: + sys.path.insert(0, script_dir) + + # Some setup.py scripts (e.g. in pygame and numpy) use sys.argv[0] to + # get the directory of the source code. They expect it to refer to the + # setup.py script. + sys_argv_0 = sys.argv[0] + sys.argv[0] = setup_script + + try: + super(_BuildMetaLegacyBackend, + self).run_setup(setup_script=setup_script) + finally: + # While PEP 517 frontends should be calling each hook in a fresh + # subprocess according to the standard (and thus it should not be + # strictly necessary to restore the old sys.path), we'll restore + # the original path so that the path manipulation does not persist + # within the hook after run_setup is called. + sys.path[:] = sys_path + sys.argv[0] = sys_argv_0 + + +# The primary backend +_BACKEND = _BuildMetaBackend() + +get_requires_for_build_wheel = _BACKEND.get_requires_for_build_wheel +get_requires_for_build_sdist = _BACKEND.get_requires_for_build_sdist +prepare_metadata_for_build_wheel = _BACKEND.prepare_metadata_for_build_wheel +build_wheel = _BACKEND.build_wheel +build_sdist = _BACKEND.build_sdist + + +# The legacy backend +__legacy__ = _BuildMetaLegacyBackend() diff --git a/env-llmeval/lib/python3.10/site-packages/setuptools/cli-32.exe b/env-llmeval/lib/python3.10/site-packages/setuptools/cli-32.exe new file mode 100644 index 0000000000000000000000000000000000000000..b1487b7819e7286577a043c7726fbe0ca1543083 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/setuptools/cli-32.exe differ diff --git a/env-llmeval/lib/python3.10/site-packages/setuptools/cli-arm64.exe b/env-llmeval/lib/python3.10/site-packages/setuptools/cli-arm64.exe new file mode 100644 index 0000000000000000000000000000000000000000..7a87ce48093d2c984b2ceb7b1f8e1ba6f5fc94f1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/setuptools/cli-arm64.exe differ diff --git a/env-llmeval/lib/python3.10/site-packages/setuptools/cli.exe b/env-llmeval/lib/python3.10/site-packages/setuptools/cli.exe new file mode 100644 index 0000000000000000000000000000000000000000..b1487b7819e7286577a043c7726fbe0ca1543083 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/setuptools/cli.exe differ diff --git a/env-llmeval/lib/python3.10/site-packages/setuptools/errors.py b/env-llmeval/lib/python3.10/site-packages/setuptools/errors.py new file mode 100644 index 0000000000000000000000000000000000000000..f4d35a630a016b74be9b18a63d89a5291d2b5f65 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/setuptools/errors.py @@ -0,0 +1,40 @@ +"""setuptools.errors + +Provides exceptions used by setuptools modules. +""" + +from distutils import errors as _distutils_errors +from distutils.errors import DistutilsError + + +class RemovedCommandError(DistutilsError, RuntimeError): + """Error used for commands that have been removed in setuptools. + + Since ``setuptools`` is built on ``distutils``, simply removing a command + from ``setuptools`` will make the behavior fall back to ``distutils``; this + error is raised if a command exists in ``distutils`` but has been actively + removed in ``setuptools``. + """ + + +# Re-export errors from distutils to facilitate the migration to PEP632 + +ByteCompileError = _distutils_errors.DistutilsByteCompileError +CCompilerError = _distutils_errors.CCompilerError +ClassError = _distutils_errors.DistutilsClassError +CompileError = _distutils_errors.CompileError +ExecError = _distutils_errors.DistutilsExecError +FileError = _distutils_errors.DistutilsFileError +InternalError = _distutils_errors.DistutilsInternalError +LibError = _distutils_errors.LibError +LinkError = _distutils_errors.LinkError +ModuleError = _distutils_errors.DistutilsModuleError +OptionError = _distutils_errors.DistutilsOptionError +PlatformError = _distutils_errors.DistutilsPlatformError +PreprocessError = _distutils_errors.PreprocessError +SetupError = _distutils_errors.DistutilsSetupError +TemplateError = _distutils_errors.DistutilsTemplateError +UnknownFileError = _distutils_errors.UnknownFileError + +# The root error class in the hierarchy +BaseError = _distutils_errors.DistutilsError diff --git a/env-llmeval/lib/python3.10/site-packages/setuptools/extension.py b/env-llmeval/lib/python3.10/site-packages/setuptools/extension.py new file mode 100644 index 0000000000000000000000000000000000000000..1820722a494b1744a406e364bc3dc3aefc7dd059 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/setuptools/extension.py @@ -0,0 +1,55 @@ +import re +import functools +import distutils.core +import distutils.errors +import distutils.extension + +from .monkey import get_unpatched + + +def _have_cython(): + """ + Return True if Cython can be imported. + """ + cython_impl = 'Cython.Distutils.build_ext' + try: + # from (cython_impl) import build_ext + __import__(cython_impl, fromlist=['build_ext']).build_ext + return True + except Exception: + pass + return False + + +# for compatibility +have_pyrex = _have_cython + +_Extension = get_unpatched(distutils.core.Extension) + + +class Extension(_Extension): + """Extension that uses '.c' files in place of '.pyx' files""" + + def __init__(self, name, sources, *args, **kw): + # The *args is needed for compatibility as calls may use positional + # arguments. py_limited_api may be set only via keyword. + self.py_limited_api = kw.pop("py_limited_api", False) + _Extension.__init__(self, name, sources, *args, **kw) + + def _convert_pyx_sources_to_lang(self): + """ + Replace sources with .pyx extensions to sources with the target + language extension. This mechanism allows language authors to supply + pre-converted sources but to prefer the .pyx sources. + """ + if _have_cython(): + # the build has Cython, so allow it to compile the .pyx files + return + lang = self.language or '' + target_ext = '.cpp' if lang.lower() == 'c++' else '.c' + sub = functools.partial(re.sub, '.pyx$', target_ext) + self.sources = list(map(sub, self.sources)) + + +class Library(Extension): + """Just like a regular Extension, but built as a library instead""" diff --git a/env-llmeval/lib/python3.10/site-packages/setuptools/extern/__init__.py b/env-llmeval/lib/python3.10/site-packages/setuptools/extern/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..baca1afabe94f3cf7a9309d8f11258a94fb19f06 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/setuptools/extern/__init__.py @@ -0,0 +1,73 @@ +import importlib.util +import sys + + +class VendorImporter: + """ + A PEP 302 meta path importer for finding optionally-vendored + or otherwise naturally-installed packages from root_name. + """ + + def __init__(self, root_name, vendored_names=(), vendor_pkg=None): + self.root_name = root_name + self.vendored_names = set(vendored_names) + self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor') + + @property + def search_path(self): + """ + Search first the vendor package then as a natural package. + """ + yield self.vendor_pkg + '.' + yield '' + + def _module_matches_namespace(self, fullname): + """Figure out if the target module is vendored.""" + root, base, target = fullname.partition(self.root_name + '.') + return not root and any(map(target.startswith, self.vendored_names)) + + def load_module(self, fullname): + """ + Iterate over the search path to locate and load fullname. + """ + root, base, target = fullname.partition(self.root_name + '.') + for prefix in self.search_path: + try: + extant = prefix + target + __import__(extant) + mod = sys.modules[extant] + sys.modules[fullname] = mod + return mod + except ImportError: + pass + else: + raise ImportError( + "The '{target}' package is required; " + "normally this is bundled with this package so if you get " + "this warning, consult the packager of your " + "distribution.".format(**locals()) + ) + + def create_module(self, spec): + return self.load_module(spec.name) + + def exec_module(self, module): + pass + + def find_spec(self, fullname, path=None, target=None): + """Return a module spec for vendored names.""" + return ( + importlib.util.spec_from_loader(fullname, self) + if self._module_matches_namespace(fullname) else None + ) + + def install(self): + """ + Install this importer into sys.meta_path if not already present. + """ + if self not in sys.meta_path: + sys.meta_path.append(self) + + +names = 'packaging', 'pyparsing', 'ordered_set', 'more_itertools', +VendorImporter(__name__, names, 'setuptools._vendor').install() diff --git a/env-llmeval/lib/python3.10/site-packages/setuptools/gui-32.exe b/env-llmeval/lib/python3.10/site-packages/setuptools/gui-32.exe new file mode 100644 index 0000000000000000000000000000000000000000..f8d3509653ba8f80ca7f3aa7f95616142ba83a94 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/setuptools/gui-32.exe differ diff --git a/env-llmeval/lib/python3.10/site-packages/setuptools/gui-64.exe b/env-llmeval/lib/python3.10/site-packages/setuptools/gui-64.exe new file mode 100644 index 0000000000000000000000000000000000000000..330c51a5dde15a0bb610a48cd0ca11770c914dae Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/setuptools/gui-64.exe differ diff --git a/env-llmeval/lib/python3.10/site-packages/setuptools/gui.exe b/env-llmeval/lib/python3.10/site-packages/setuptools/gui.exe new file mode 100644 index 0000000000000000000000000000000000000000..f8d3509653ba8f80ca7f3aa7f95616142ba83a94 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/setuptools/gui.exe differ diff --git a/env-llmeval/lib/python3.10/site-packages/setuptools/installer.py b/env-llmeval/lib/python3.10/site-packages/setuptools/installer.py new file mode 100644 index 0000000000000000000000000000000000000000..b7096df14b4a15980ad138a3990d3e25aeb3bfe1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/setuptools/installer.py @@ -0,0 +1,104 @@ +import glob +import os +import subprocess +import sys +import tempfile +import warnings +from distutils import log +from distutils.errors import DistutilsError + +import pkg_resources +from setuptools.wheel import Wheel +from ._deprecation_warning import SetuptoolsDeprecationWarning + + +def _fixup_find_links(find_links): + """Ensure find-links option end-up being a list of strings.""" + if isinstance(find_links, str): + return find_links.split() + assert isinstance(find_links, (tuple, list)) + return find_links + + +def fetch_build_egg(dist, req): # noqa: C901 # is too complex (16) # FIXME + """Fetch an egg needed for building. + + Use pip/wheel to fetch/build a wheel.""" + warnings.warn( + "setuptools.installer is deprecated. Requirements should " + "be satisfied by a PEP 517 installer.", + SetuptoolsDeprecationWarning, + ) + # Warn if wheel is not available + try: + pkg_resources.get_distribution('wheel') + except pkg_resources.DistributionNotFound: + dist.announce('WARNING: The wheel package is not available.', log.WARN) + # Ignore environment markers; if supplied, it is required. + req = strip_marker(req) + # Take easy_install options into account, but do not override relevant + # pip environment variables (like PIP_INDEX_URL or PIP_QUIET); they'll + # take precedence. + opts = dist.get_option_dict('easy_install') + if 'allow_hosts' in opts: + raise DistutilsError('the `allow-hosts` option is not supported ' + 'when using pip to install requirements.') + quiet = 'PIP_QUIET' not in os.environ and 'PIP_VERBOSE' not in os.environ + if 'PIP_INDEX_URL' in os.environ: + index_url = None + elif 'index_url' in opts: + index_url = opts['index_url'][1] + else: + index_url = None + find_links = ( + _fixup_find_links(opts['find_links'][1])[:] if 'find_links' in opts + else [] + ) + if dist.dependency_links: + find_links.extend(dist.dependency_links) + eggs_dir = os.path.realpath(dist.get_egg_cache_dir()) + environment = pkg_resources.Environment() + for egg_dist in pkg_resources.find_distributions(eggs_dir): + if egg_dist in req and environment.can_add(egg_dist): + return egg_dist + with tempfile.TemporaryDirectory() as tmpdir: + cmd = [ + sys.executable, '-m', 'pip', + '--disable-pip-version-check', + 'wheel', '--no-deps', + '-w', tmpdir, + ] + if quiet: + cmd.append('--quiet') + if index_url is not None: + cmd.extend(('--index-url', index_url)) + for link in find_links or []: + cmd.extend(('--find-links', link)) + # If requirement is a PEP 508 direct URL, directly pass + # the URL to pip, as `req @ url` does not work on the + # command line. + cmd.append(req.url or str(req)) + try: + subprocess.check_call(cmd) + except subprocess.CalledProcessError as e: + raise DistutilsError(str(e)) from e + wheel = Wheel(glob.glob(os.path.join(tmpdir, '*.whl'))[0]) + dist_location = os.path.join(eggs_dir, wheel.egg_name()) + wheel.install_as_egg(dist_location) + dist_metadata = pkg_resources.PathMetadata( + dist_location, os.path.join(dist_location, 'EGG-INFO')) + dist = pkg_resources.Distribution.from_filename( + dist_location, metadata=dist_metadata) + return dist + + +def strip_marker(req): + """ + Return a new requirement without the environment marker to avoid + calling pip with something like `babel; extra == "i18n"`, which + would always be ignored. + """ + # create a copy to avoid mutating the input + req = pkg_resources.Requirement.parse(str(req)) + req.marker = None + return req diff --git a/env-llmeval/lib/python3.10/site-packages/setuptools/launch.py b/env-llmeval/lib/python3.10/site-packages/setuptools/launch.py new file mode 100644 index 0000000000000000000000000000000000000000..0208fdf33b640cd9791359d74673bb90cfb87f96 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/setuptools/launch.py @@ -0,0 +1,36 @@ +""" +Launch the Python script on the command line after +setuptools is bootstrapped via import. +""" + +# Note that setuptools gets imported implicitly by the +# invocation of this script using python -m setuptools.launch + +import tokenize +import sys + + +def run(): + """ + Run the script in sys.argv[1] as if it had + been invoked naturally. + """ + __builtins__ + script_name = sys.argv[1] + namespace = dict( + __file__=script_name, + __name__='__main__', + __doc__=None, + ) + sys.argv[:] = sys.argv[1:] + + open_ = getattr(tokenize, 'open', open) + with open_(script_name) as fid: + script = fid.read() + norm_script = script.replace('\\r\\n', '\\n') + code = compile(norm_script, script_name, 'exec') + exec(code, namespace) + + +if __name__ == '__main__': + run() diff --git a/env-llmeval/lib/python3.10/site-packages/setuptools/monkey.py b/env-llmeval/lib/python3.10/site-packages/setuptools/monkey.py new file mode 100644 index 0000000000000000000000000000000000000000..fb36dc1a97a9f1f2a52c25fb6b872a7afa640be7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/setuptools/monkey.py @@ -0,0 +1,177 @@ +""" +Monkey patching of distutils. +""" + +import sys +import distutils.filelist +import platform +import types +import functools +from importlib import import_module +import inspect + +import setuptools + +__all__ = [] +""" +Everything is private. Contact the project team +if you think you need this functionality. +""" + + +def _get_mro(cls): + """ + Returns the bases classes for cls sorted by the MRO. + + Works around an issue on Jython where inspect.getmro will not return all + base classes if multiple classes share the same name. Instead, this + function will return a tuple containing the class itself, and the contents + of cls.__bases__. See https://github.com/pypa/setuptools/issues/1024. + """ + if platform.python_implementation() == "Jython": + return (cls,) + cls.__bases__ + return inspect.getmro(cls) + + +def get_unpatched(item): + lookup = ( + get_unpatched_class if isinstance(item, type) else + get_unpatched_function if isinstance(item, types.FunctionType) else + lambda item: None + ) + return lookup(item) + + +def get_unpatched_class(cls): + """Protect against re-patching the distutils if reloaded + + Also ensures that no other distutils extension monkeypatched the distutils + first. + """ + external_bases = ( + cls + for cls in _get_mro(cls) + if not cls.__module__.startswith('setuptools') + ) + base = next(external_bases) + if not base.__module__.startswith('distutils'): + msg = "distutils has already been patched by %r" % cls + raise AssertionError(msg) + return base + + +def patch_all(): + # we can't patch distutils.cmd, alas + distutils.core.Command = setuptools.Command + + has_issue_12885 = sys.version_info <= (3, 5, 3) + + if has_issue_12885: + # fix findall bug in distutils (http://bugs.python.org/issue12885) + distutils.filelist.findall = setuptools.findall + + needs_warehouse = ( + sys.version_info < (2, 7, 13) + or + (3, 4) < sys.version_info < (3, 4, 6) + or + (3, 5) < sys.version_info <= (3, 5, 3) + ) + + if needs_warehouse: + warehouse = 'https://upload.pypi.org/legacy/' + distutils.config.PyPIRCCommand.DEFAULT_REPOSITORY = warehouse + + _patch_distribution_metadata() + + # Install Distribution throughout the distutils + for module in distutils.dist, distutils.core, distutils.cmd: + module.Distribution = setuptools.dist.Distribution + + # Install the patched Extension + distutils.core.Extension = setuptools.extension.Extension + distutils.extension.Extension = setuptools.extension.Extension + if 'distutils.command.build_ext' in sys.modules: + sys.modules['distutils.command.build_ext'].Extension = ( + setuptools.extension.Extension + ) + + patch_for_msvc_specialized_compiler() + + +def _patch_distribution_metadata(): + """Patch write_pkg_file and read_pkg_file for higher metadata standards""" + for attr in ('write_pkg_file', 'read_pkg_file', 'get_metadata_version'): + new_val = getattr(setuptools.dist, attr) + setattr(distutils.dist.DistributionMetadata, attr, new_val) + + +def patch_func(replacement, target_mod, func_name): + """ + Patch func_name in target_mod with replacement + + Important - original must be resolved by name to avoid + patching an already patched function. + """ + original = getattr(target_mod, func_name) + + # set the 'unpatched' attribute on the replacement to + # point to the original. + vars(replacement).setdefault('unpatched', original) + + # replace the function in the original module + setattr(target_mod, func_name, replacement) + + +def get_unpatched_function(candidate): + return getattr(candidate, 'unpatched') + + +def patch_for_msvc_specialized_compiler(): + """ + Patch functions in distutils to use standalone Microsoft Visual C++ + compilers. + """ + # import late to avoid circular imports on Python < 3.5 + msvc = import_module('setuptools.msvc') + + if platform.system() != 'Windows': + # Compilers only available on Microsoft Windows + return + + def patch_params(mod_name, func_name): + """ + Prepare the parameters for patch_func to patch indicated function. + """ + repl_prefix = 'msvc9_' if 'msvc9' in mod_name else 'msvc14_' + repl_name = repl_prefix + func_name.lstrip('_') + repl = getattr(msvc, repl_name) + mod = import_module(mod_name) + if not hasattr(mod, func_name): + raise ImportError(func_name) + return repl, mod, func_name + + # Python 2.7 to 3.4 + msvc9 = functools.partial(patch_params, 'distutils.msvc9compiler') + + # Python 3.5+ + msvc14 = functools.partial(patch_params, 'distutils._msvccompiler') + + try: + # Patch distutils.msvc9compiler + patch_func(*msvc9('find_vcvarsall')) + patch_func(*msvc9('query_vcvarsall')) + except ImportError: + pass + + try: + # Patch distutils._msvccompiler._get_vc_env + patch_func(*msvc14('_get_vc_env')) + except ImportError: + pass + + try: + # Patch distutils._msvccompiler.gen_lib_options for Numpy + patch_func(*msvc14('gen_lib_options')) + except ImportError: + pass diff --git a/env-llmeval/lib/python3.10/site-packages/setuptools/msvc.py b/env-llmeval/lib/python3.10/site-packages/setuptools/msvc.py new file mode 100644 index 0000000000000000000000000000000000000000..281ea1c2af6b0eb5f02ecc6d115f2d6884be74b5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/setuptools/msvc.py @@ -0,0 +1,1805 @@ +""" +Improved support for Microsoft Visual C++ compilers. + +Known supported compilers: +-------------------------- +Microsoft Visual C++ 9.0: + Microsoft Visual C++ Compiler for Python 2.7 (x86, amd64) + Microsoft Windows SDK 6.1 (x86, x64, ia64) + Microsoft Windows SDK 7.0 (x86, x64, ia64) + +Microsoft Visual C++ 10.0: + Microsoft Windows SDK 7.1 (x86, x64, ia64) + +Microsoft Visual C++ 14.X: + Microsoft Visual C++ Build Tools 2015 (x86, x64, arm) + Microsoft Visual Studio Build Tools 2017 (x86, x64, arm, arm64) + Microsoft Visual Studio Build Tools 2019 (x86, x64, arm, arm64) + +This may also support compilers shipped with compatible Visual Studio versions. +""" + +import json +from io import open +from os import listdir, pathsep +from os.path import join, isfile, isdir, dirname +import sys +import contextlib +import platform +import itertools +import subprocess +import distutils.errors +from setuptools.extern.packaging.version import LegacyVersion +from setuptools.extern.more_itertools import unique_everseen + +from .monkey import get_unpatched + +if platform.system() == 'Windows': + import winreg + from os import environ +else: + # Mock winreg and environ so the module can be imported on this platform. + + class winreg: + HKEY_USERS = None + HKEY_CURRENT_USER = None + HKEY_LOCAL_MACHINE = None + HKEY_CLASSES_ROOT = None + + environ = dict() + +_msvc9_suppress_errors = ( + # msvc9compiler isn't available on some platforms + ImportError, + + # msvc9compiler raises DistutilsPlatformError in some + # environments. See #1118. + distutils.errors.DistutilsPlatformError, +) + +try: + from distutils.msvc9compiler import Reg +except _msvc9_suppress_errors: + pass + + +def msvc9_find_vcvarsall(version): + """ + Patched "distutils.msvc9compiler.find_vcvarsall" to use the standalone + compiler build for Python + (VCForPython / Microsoft Visual C++ Compiler for Python 2.7). + + Fall back to original behavior when the standalone compiler is not + available. + + Redirect the path of "vcvarsall.bat". + + Parameters + ---------- + version: float + Required Microsoft Visual C++ version. + + Return + ------ + str + vcvarsall.bat path + """ + vc_base = r'Software\%sMicrosoft\DevDiv\VCForPython\%0.1f' + key = vc_base % ('', version) + try: + # Per-user installs register the compiler path here + productdir = Reg.get_value(key, "installdir") + except KeyError: + try: + # All-user installs on a 64-bit system register here + key = vc_base % ('Wow6432Node\\', version) + productdir = Reg.get_value(key, "installdir") + except KeyError: + productdir = None + + if productdir: + vcvarsall = join(productdir, "vcvarsall.bat") + if isfile(vcvarsall): + return vcvarsall + + return get_unpatched(msvc9_find_vcvarsall)(version) + + +def msvc9_query_vcvarsall(ver, arch='x86', *args, **kwargs): + """ + Patched "distutils.msvc9compiler.query_vcvarsall" for support extra + Microsoft Visual C++ 9.0 and 10.0 compilers. + + Set environment without use of "vcvarsall.bat". + + Parameters + ---------- + ver: float + Required Microsoft Visual C++ version. + arch: str + Target architecture. + + Return + ------ + dict + environment + """ + # Try to get environment from vcvarsall.bat (Classical way) + try: + orig = get_unpatched(msvc9_query_vcvarsall) + return orig(ver, arch, *args, **kwargs) + except distutils.errors.DistutilsPlatformError: + # Pass error if Vcvarsall.bat is missing + pass + except ValueError: + # Pass error if environment not set after executing vcvarsall.bat + pass + + # If error, try to set environment directly + try: + return EnvironmentInfo(arch, ver).return_env() + except distutils.errors.DistutilsPlatformError as exc: + _augment_exception(exc, ver, arch) + raise + + +def _msvc14_find_vc2015(): + """Python 3.8 "distutils/_msvccompiler.py" backport""" + try: + key = winreg.OpenKey( + winreg.HKEY_LOCAL_MACHINE, + r"Software\Microsoft\VisualStudio\SxS\VC7", + 0, + winreg.KEY_READ | winreg.KEY_WOW64_32KEY + ) + except OSError: + return None, None + + best_version = 0 + best_dir = None + with key: + for i in itertools.count(): + try: + v, vc_dir, vt = winreg.EnumValue(key, i) + except OSError: + break + if v and vt == winreg.REG_SZ and isdir(vc_dir): + try: + version = int(float(v)) + except (ValueError, TypeError): + continue + if version >= 14 and version > best_version: + best_version, best_dir = version, vc_dir + return best_version, best_dir + + +def _msvc14_find_vc2017(): + """Python 3.8 "distutils/_msvccompiler.py" backport + + Returns "15, path" based on the result of invoking vswhere.exe + If no install is found, returns "None, None" + + The version is returned to avoid unnecessarily changing the function + result. It may be ignored when the path is not None. + + If vswhere.exe is not available, by definition, VS 2017 is not + installed. + """ + root = environ.get("ProgramFiles(x86)") or environ.get("ProgramFiles") + if not root: + return None, None + + try: + path = subprocess.check_output([ + join(root, "Microsoft Visual Studio", "Installer", "vswhere.exe"), + "-latest", + "-prerelease", + "-requiresAny", + "-requires", "Microsoft.VisualStudio.Component.VC.Tools.x86.x64", + "-requires", "Microsoft.VisualStudio.Workload.WDExpress", + "-property", "installationPath", + "-products", "*", + ]).decode(encoding="mbcs", errors="strict").strip() + except (subprocess.CalledProcessError, OSError, UnicodeDecodeError): + return None, None + + path = join(path, "VC", "Auxiliary", "Build") + if isdir(path): + return 15, path + + return None, None + + +PLAT_SPEC_TO_RUNTIME = { + 'x86': 'x86', + 'x86_amd64': 'x64', + 'x86_arm': 'arm', + 'x86_arm64': 'arm64' +} + + +def _msvc14_find_vcvarsall(plat_spec): + """Python 3.8 "distutils/_msvccompiler.py" backport""" + _, best_dir = _msvc14_find_vc2017() + vcruntime = None + + if plat_spec in PLAT_SPEC_TO_RUNTIME: + vcruntime_plat = PLAT_SPEC_TO_RUNTIME[plat_spec] + else: + vcruntime_plat = 'x64' if 'amd64' in plat_spec else 'x86' + + if best_dir: + vcredist = join(best_dir, "..", "..", "redist", "MSVC", "**", + vcruntime_plat, "Microsoft.VC14*.CRT", + "vcruntime140.dll") + try: + import glob + vcruntime = glob.glob(vcredist, recursive=True)[-1] + except (ImportError, OSError, LookupError): + vcruntime = None + + if not best_dir: + best_version, best_dir = _msvc14_find_vc2015() + if best_version: + vcruntime = join(best_dir, 'redist', vcruntime_plat, + "Microsoft.VC140.CRT", "vcruntime140.dll") + + if not best_dir: + return None, None + + vcvarsall = join(best_dir, "vcvarsall.bat") + if not isfile(vcvarsall): + return None, None + + if not vcruntime or not isfile(vcruntime): + vcruntime = None + + return vcvarsall, vcruntime + + +def _msvc14_get_vc_env(plat_spec): + """Python 3.8 "distutils/_msvccompiler.py" backport""" + if "DISTUTILS_USE_SDK" in environ: + return { + key.lower(): value + for key, value in environ.items() + } + + vcvarsall, vcruntime = _msvc14_find_vcvarsall(plat_spec) + if not vcvarsall: + raise distutils.errors.DistutilsPlatformError( + "Unable to find vcvarsall.bat" + ) + + try: + out = subprocess.check_output( + 'cmd /u /c "{}" {} && set'.format(vcvarsall, plat_spec), + stderr=subprocess.STDOUT, + ).decode('utf-16le', errors='replace') + except subprocess.CalledProcessError as exc: + raise distutils.errors.DistutilsPlatformError( + "Error executing {}".format(exc.cmd) + ) from exc + + env = { + key.lower(): value + for key, _, value in + (line.partition('=') for line in out.splitlines()) + if key and value + } + + if vcruntime: + env['py_vcruntime_redist'] = vcruntime + return env + + +def msvc14_get_vc_env(plat_spec): + """ + Patched "distutils._msvccompiler._get_vc_env" for support extra + Microsoft Visual C++ 14.X compilers. + + Set environment without use of "vcvarsall.bat". + + Parameters + ---------- + plat_spec: str + Target architecture. + + Return + ------ + dict + environment + """ + + # Always use backport from CPython 3.8 + try: + return _msvc14_get_vc_env(plat_spec) + except distutils.errors.DistutilsPlatformError as exc: + _augment_exception(exc, 14.0) + raise + + +def msvc14_gen_lib_options(*args, **kwargs): + """ + Patched "distutils._msvccompiler.gen_lib_options" for fix + compatibility between "numpy.distutils" and "distutils._msvccompiler" + (for Numpy < 1.11.2) + """ + if "numpy.distutils" in sys.modules: + import numpy as np + if LegacyVersion(np.__version__) < LegacyVersion('1.11.2'): + return np.distutils.ccompiler.gen_lib_options(*args, **kwargs) + return get_unpatched(msvc14_gen_lib_options)(*args, **kwargs) + + +def _augment_exception(exc, version, arch=''): + """ + Add details to the exception message to help guide the user + as to what action will resolve it. + """ + # Error if MSVC++ directory not found or environment not set + message = exc.args[0] + + if "vcvarsall" in message.lower() or "visual c" in message.lower(): + # Special error message if MSVC++ not installed + tmpl = 'Microsoft Visual C++ {version:0.1f} or greater is required.' + message = tmpl.format(**locals()) + msdownload = 'www.microsoft.com/download/details.aspx?id=%d' + if version == 9.0: + if arch.lower().find('ia64') > -1: + # For VC++ 9.0, if IA64 support is needed, redirect user + # to Windows SDK 7.0. + # Note: No download link available from Microsoft. + message += ' Get it with "Microsoft Windows SDK 7.0"' + else: + # For VC++ 9.0 redirect user to Vc++ for Python 2.7 : + # This redirection link is maintained by Microsoft. + # Contact vspython@microsoft.com if it needs updating. + message += ' Get it from http://aka.ms/vcpython27' + elif version == 10.0: + # For VC++ 10.0 Redirect user to Windows SDK 7.1 + message += ' Get it with "Microsoft Windows SDK 7.1": ' + message += msdownload % 8279 + elif version >= 14.0: + # For VC++ 14.X Redirect user to latest Visual C++ Build Tools + message += (' Get it with "Microsoft C++ Build Tools": ' + r'https://visualstudio.microsoft.com' + r'/visual-cpp-build-tools/') + + exc.args = (message, ) + + +class PlatformInfo: + """ + Current and Target Architectures information. + + Parameters + ---------- + arch: str + Target architecture. + """ + current_cpu = environ.get('processor_architecture', '').lower() + + def __init__(self, arch): + self.arch = arch.lower().replace('x64', 'amd64') + + @property + def target_cpu(self): + """ + Return Target CPU architecture. + + Return + ------ + str + Target CPU + """ + return self.arch[self.arch.find('_') + 1:] + + def target_is_x86(self): + """ + Return True if target CPU is x86 32 bits.. + + Return + ------ + bool + CPU is x86 32 bits + """ + return self.target_cpu == 'x86' + + def current_is_x86(self): + """ + Return True if current CPU is x86 32 bits.. + + Return + ------ + bool + CPU is x86 32 bits + """ + return self.current_cpu == 'x86' + + def current_dir(self, hidex86=False, x64=False): + """ + Current platform specific subfolder. + + Parameters + ---------- + hidex86: bool + return '' and not '\x86' if architecture is x86. + x64: bool + return '\x64' and not '\amd64' if architecture is amd64. + + Return + ------ + str + subfolder: '\target', or '' (see hidex86 parameter) + """ + return ( + '' if (self.current_cpu == 'x86' and hidex86) else + r'\x64' if (self.current_cpu == 'amd64' and x64) else + r'\%s' % self.current_cpu + ) + + def target_dir(self, hidex86=False, x64=False): + r""" + Target platform specific subfolder. + + Parameters + ---------- + hidex86: bool + return '' and not '\x86' if architecture is x86. + x64: bool + return '\x64' and not '\amd64' if architecture is amd64. + + Return + ------ + str + subfolder: '\current', or '' (see hidex86 parameter) + """ + return ( + '' if (self.target_cpu == 'x86' and hidex86) else + r'\x64' if (self.target_cpu == 'amd64' and x64) else + r'\%s' % self.target_cpu + ) + + def cross_dir(self, forcex86=False): + r""" + Cross platform specific subfolder. + + Parameters + ---------- + forcex86: bool + Use 'x86' as current architecture even if current architecture is + not x86. + + Return + ------ + str + subfolder: '' if target architecture is current architecture, + '\current_target' if not. + """ + current = 'x86' if forcex86 else self.current_cpu + return ( + '' if self.target_cpu == current else + self.target_dir().replace('\\', '\\%s_' % current) + ) + + +class RegistryInfo: + """ + Microsoft Visual Studio related registry information. + + Parameters + ---------- + platform_info: PlatformInfo + "PlatformInfo" instance. + """ + HKEYS = (winreg.HKEY_USERS, + winreg.HKEY_CURRENT_USER, + winreg.HKEY_LOCAL_MACHINE, + winreg.HKEY_CLASSES_ROOT) + + def __init__(self, platform_info): + self.pi = platform_info + + @property + def visualstudio(self): + """ + Microsoft Visual Studio root registry key. + + Return + ------ + str + Registry key + """ + return 'VisualStudio' + + @property + def sxs(self): + """ + Microsoft Visual Studio SxS registry key. + + Return + ------ + str + Registry key + """ + return join(self.visualstudio, 'SxS') + + @property + def vc(self): + """ + Microsoft Visual C++ VC7 registry key. + + Return + ------ + str + Registry key + """ + return join(self.sxs, 'VC7') + + @property + def vs(self): + """ + Microsoft Visual Studio VS7 registry key. + + Return + ------ + str + Registry key + """ + return join(self.sxs, 'VS7') + + @property + def vc_for_python(self): + """ + Microsoft Visual C++ for Python registry key. + + Return + ------ + str + Registry key + """ + return r'DevDiv\VCForPython' + + @property + def microsoft_sdk(self): + """ + Microsoft SDK registry key. + + Return + ------ + str + Registry key + """ + return 'Microsoft SDKs' + + @property + def windows_sdk(self): + """ + Microsoft Windows/Platform SDK registry key. + + Return + ------ + str + Registry key + """ + return join(self.microsoft_sdk, 'Windows') + + @property + def netfx_sdk(self): + """ + Microsoft .NET Framework SDK registry key. + + Return + ------ + str + Registry key + """ + return join(self.microsoft_sdk, 'NETFXSDK') + + @property + def windows_kits_roots(self): + """ + Microsoft Windows Kits Roots registry key. + + Return + ------ + str + Registry key + """ + return r'Windows Kits\Installed Roots' + + def microsoft(self, key, x86=False): + """ + Return key in Microsoft software registry. + + Parameters + ---------- + key: str + Registry key path where look. + x86: str + Force x86 software registry. + + Return + ------ + str + Registry key + """ + node64 = '' if self.pi.current_is_x86() or x86 else 'Wow6432Node' + return join('Software', node64, 'Microsoft', key) + + def lookup(self, key, name): + """ + Look for values in registry in Microsoft software registry. + + Parameters + ---------- + key: str + Registry key path where look. + name: str + Value name to find. + + Return + ------ + str + value + """ + key_read = winreg.KEY_READ + openkey = winreg.OpenKey + closekey = winreg.CloseKey + ms = self.microsoft + for hkey in self.HKEYS: + bkey = None + try: + bkey = openkey(hkey, ms(key), 0, key_read) + except (OSError, IOError): + if not self.pi.current_is_x86(): + try: + bkey = openkey(hkey, ms(key, True), 0, key_read) + except (OSError, IOError): + continue + else: + continue + try: + return winreg.QueryValueEx(bkey, name)[0] + except (OSError, IOError): + pass + finally: + if bkey: + closekey(bkey) + + +class SystemInfo: + """ + Microsoft Windows and Visual Studio related system information. + + Parameters + ---------- + registry_info: RegistryInfo + "RegistryInfo" instance. + vc_ver: float + Required Microsoft Visual C++ version. + """ + + # Variables and properties in this class use originals CamelCase variables + # names from Microsoft source files for more easy comparison. + WinDir = environ.get('WinDir', '') + ProgramFiles = environ.get('ProgramFiles', '') + ProgramFilesx86 = environ.get('ProgramFiles(x86)', ProgramFiles) + + def __init__(self, registry_info, vc_ver=None): + self.ri = registry_info + self.pi = self.ri.pi + + self.known_vs_paths = self.find_programdata_vs_vers() + + # Except for VS15+, VC version is aligned with VS version + self.vs_ver = self.vc_ver = ( + vc_ver or self._find_latest_available_vs_ver()) + + def _find_latest_available_vs_ver(self): + """ + Find the latest VC version + + Return + ------ + float + version + """ + reg_vc_vers = self.find_reg_vs_vers() + + if not (reg_vc_vers or self.known_vs_paths): + raise distutils.errors.DistutilsPlatformError( + 'No Microsoft Visual C++ version found') + + vc_vers = set(reg_vc_vers) + vc_vers.update(self.known_vs_paths) + return sorted(vc_vers)[-1] + + def find_reg_vs_vers(self): + """ + Find Microsoft Visual Studio versions available in registry. + + Return + ------ + list of float + Versions + """ + ms = self.ri.microsoft + vckeys = (self.ri.vc, self.ri.vc_for_python, self.ri.vs) + vs_vers = [] + for hkey, key in itertools.product(self.ri.HKEYS, vckeys): + try: + bkey = winreg.OpenKey(hkey, ms(key), 0, winreg.KEY_READ) + except (OSError, IOError): + continue + with bkey: + subkeys, values, _ = winreg.QueryInfoKey(bkey) + for i in range(values): + with contextlib.suppress(ValueError): + ver = float(winreg.EnumValue(bkey, i)[0]) + if ver not in vs_vers: + vs_vers.append(ver) + for i in range(subkeys): + with contextlib.suppress(ValueError): + ver = float(winreg.EnumKey(bkey, i)) + if ver not in vs_vers: + vs_vers.append(ver) + return sorted(vs_vers) + + def find_programdata_vs_vers(self): + r""" + Find Visual studio 2017+ versions from information in + "C:\ProgramData\Microsoft\VisualStudio\Packages\_Instances". + + Return + ------ + dict + float version as key, path as value. + """ + vs_versions = {} + instances_dir = \ + r'C:\ProgramData\Microsoft\VisualStudio\Packages\_Instances' + + try: + hashed_names = listdir(instances_dir) + + except (OSError, IOError): + # Directory not exists with all Visual Studio versions + return vs_versions + + for name in hashed_names: + try: + # Get VS installation path from "state.json" file + state_path = join(instances_dir, name, 'state.json') + with open(state_path, 'rt', encoding='utf-8') as state_file: + state = json.load(state_file) + vs_path = state['installationPath'] + + # Raises OSError if this VS installation does not contain VC + listdir(join(vs_path, r'VC\Tools\MSVC')) + + # Store version and path + vs_versions[self._as_float_version( + state['installationVersion'])] = vs_path + + except (OSError, IOError, KeyError): + # Skip if "state.json" file is missing or bad format + continue + + return vs_versions + + @staticmethod + def _as_float_version(version): + """ + Return a string version as a simplified float version (major.minor) + + Parameters + ---------- + version: str + Version. + + Return + ------ + float + version + """ + return float('.'.join(version.split('.')[:2])) + + @property + def VSInstallDir(self): + """ + Microsoft Visual Studio directory. + + Return + ------ + str + path + """ + # Default path + default = join(self.ProgramFilesx86, + 'Microsoft Visual Studio %0.1f' % self.vs_ver) + + # Try to get path from registry, if fail use default path + return self.ri.lookup(self.ri.vs, '%0.1f' % self.vs_ver) or default + + @property + def VCInstallDir(self): + """ + Microsoft Visual C++ directory. + + Return + ------ + str + path + """ + path = self._guess_vc() or self._guess_vc_legacy() + + if not isdir(path): + msg = 'Microsoft Visual C++ directory not found' + raise distutils.errors.DistutilsPlatformError(msg) + + return path + + def _guess_vc(self): + """ + Locate Visual C++ for VS2017+. + + Return + ------ + str + path + """ + if self.vs_ver <= 14.0: + return '' + + try: + # First search in known VS paths + vs_dir = self.known_vs_paths[self.vs_ver] + except KeyError: + # Else, search with path from registry + vs_dir = self.VSInstallDir + + guess_vc = join(vs_dir, r'VC\Tools\MSVC') + + # Subdir with VC exact version as name + try: + # Update the VC version with real one instead of VS version + vc_ver = listdir(guess_vc)[-1] + self.vc_ver = self._as_float_version(vc_ver) + return join(guess_vc, vc_ver) + except (OSError, IOError, IndexError): + return '' + + def _guess_vc_legacy(self): + """ + Locate Visual C++ for versions prior to 2017. + + Return + ------ + str + path + """ + default = join(self.ProgramFilesx86, + r'Microsoft Visual Studio %0.1f\VC' % self.vs_ver) + + # Try to get "VC++ for Python" path from registry as default path + reg_path = join(self.ri.vc_for_python, '%0.1f' % self.vs_ver) + python_vc = self.ri.lookup(reg_path, 'installdir') + default_vc = join(python_vc, 'VC') if python_vc else default + + # Try to get path from registry, if fail use default path + return self.ri.lookup(self.ri.vc, '%0.1f' % self.vs_ver) or default_vc + + @property + def WindowsSdkVersion(self): + """ + Microsoft Windows SDK versions for specified MSVC++ version. + + Return + ------ + tuple of str + versions + """ + if self.vs_ver <= 9.0: + return '7.0', '6.1', '6.0a' + elif self.vs_ver == 10.0: + return '7.1', '7.0a' + elif self.vs_ver == 11.0: + return '8.0', '8.0a' + elif self.vs_ver == 12.0: + return '8.1', '8.1a' + elif self.vs_ver >= 14.0: + return '10.0', '8.1' + + @property + def WindowsSdkLastVersion(self): + """ + Microsoft Windows SDK last version. + + Return + ------ + str + version + """ + return self._use_last_dir_name(join(self.WindowsSdkDir, 'lib')) + + @property # noqa: C901 + def WindowsSdkDir(self): # noqa: C901 # is too complex (12) # FIXME + """ + Microsoft Windows SDK directory. + + Return + ------ + str + path + """ + sdkdir = '' + for ver in self.WindowsSdkVersion: + # Try to get it from registry + loc = join(self.ri.windows_sdk, 'v%s' % ver) + sdkdir = self.ri.lookup(loc, 'installationfolder') + if sdkdir: + break + if not sdkdir or not isdir(sdkdir): + # Try to get "VC++ for Python" version from registry + path = join(self.ri.vc_for_python, '%0.1f' % self.vc_ver) + install_base = self.ri.lookup(path, 'installdir') + if install_base: + sdkdir = join(install_base, 'WinSDK') + if not sdkdir or not isdir(sdkdir): + # If fail, use default new path + for ver in self.WindowsSdkVersion: + intver = ver[:ver.rfind('.')] + path = r'Microsoft SDKs\Windows Kits\%s' % intver + d = join(self.ProgramFiles, path) + if isdir(d): + sdkdir = d + if not sdkdir or not isdir(sdkdir): + # If fail, use default old path + for ver in self.WindowsSdkVersion: + path = r'Microsoft SDKs\Windows\v%s' % ver + d = join(self.ProgramFiles, path) + if isdir(d): + sdkdir = d + if not sdkdir: + # If fail, use Platform SDK + sdkdir = join(self.VCInstallDir, 'PlatformSDK') + return sdkdir + + @property + def WindowsSDKExecutablePath(self): + """ + Microsoft Windows SDK executable directory. + + Return + ------ + str + path + """ + # Find WinSDK NetFx Tools registry dir name + if self.vs_ver <= 11.0: + netfxver = 35 + arch = '' + else: + netfxver = 40 + hidex86 = True if self.vs_ver <= 12.0 else False + arch = self.pi.current_dir(x64=True, hidex86=hidex86) + fx = 'WinSDK-NetFx%dTools%s' % (netfxver, arch.replace('\\', '-')) + + # list all possibles registry paths + regpaths = [] + if self.vs_ver >= 14.0: + for ver in self.NetFxSdkVersion: + regpaths += [join(self.ri.netfx_sdk, ver, fx)] + + for ver in self.WindowsSdkVersion: + regpaths += [join(self.ri.windows_sdk, 'v%sA' % ver, fx)] + + # Return installation folder from the more recent path + for path in regpaths: + execpath = self.ri.lookup(path, 'installationfolder') + if execpath: + return execpath + + @property + def FSharpInstallDir(self): + """ + Microsoft Visual F# directory. + + Return + ------ + str + path + """ + path = join(self.ri.visualstudio, r'%0.1f\Setup\F#' % self.vs_ver) + return self.ri.lookup(path, 'productdir') or '' + + @property + def UniversalCRTSdkDir(self): + """ + Microsoft Universal CRT SDK directory. + + Return + ------ + str + path + """ + # Set Kit Roots versions for specified MSVC++ version + vers = ('10', '81') if self.vs_ver >= 14.0 else () + + # Find path of the more recent Kit + for ver in vers: + sdkdir = self.ri.lookup(self.ri.windows_kits_roots, + 'kitsroot%s' % ver) + if sdkdir: + return sdkdir or '' + + @property + def UniversalCRTSdkLastVersion(self): + """ + Microsoft Universal C Runtime SDK last version. + + Return + ------ + str + version + """ + return self._use_last_dir_name(join(self.UniversalCRTSdkDir, 'lib')) + + @property + def NetFxSdkVersion(self): + """ + Microsoft .NET Framework SDK versions. + + Return + ------ + tuple of str + versions + """ + # Set FxSdk versions for specified VS version + return (('4.7.2', '4.7.1', '4.7', + '4.6.2', '4.6.1', '4.6', + '4.5.2', '4.5.1', '4.5') + if self.vs_ver >= 14.0 else ()) + + @property + def NetFxSdkDir(self): + """ + Microsoft .NET Framework SDK directory. + + Return + ------ + str + path + """ + sdkdir = '' + for ver in self.NetFxSdkVersion: + loc = join(self.ri.netfx_sdk, ver) + sdkdir = self.ri.lookup(loc, 'kitsinstallationfolder') + if sdkdir: + break + return sdkdir + + @property + def FrameworkDir32(self): + """ + Microsoft .NET Framework 32bit directory. + + Return + ------ + str + path + """ + # Default path + guess_fw = join(self.WinDir, r'Microsoft.NET\Framework') + + # Try to get path from registry, if fail use default path + return self.ri.lookup(self.ri.vc, 'frameworkdir32') or guess_fw + + @property + def FrameworkDir64(self): + """ + Microsoft .NET Framework 64bit directory. + + Return + ------ + str + path + """ + # Default path + guess_fw = join(self.WinDir, r'Microsoft.NET\Framework64') + + # Try to get path from registry, if fail use default path + return self.ri.lookup(self.ri.vc, 'frameworkdir64') or guess_fw + + @property + def FrameworkVersion32(self): + """ + Microsoft .NET Framework 32bit versions. + + Return + ------ + tuple of str + versions + """ + return self._find_dot_net_versions(32) + + @property + def FrameworkVersion64(self): + """ + Microsoft .NET Framework 64bit versions. + + Return + ------ + tuple of str + versions + """ + return self._find_dot_net_versions(64) + + def _find_dot_net_versions(self, bits): + """ + Find Microsoft .NET Framework versions. + + Parameters + ---------- + bits: int + Platform number of bits: 32 or 64. + + Return + ------ + tuple of str + versions + """ + # Find actual .NET version in registry + reg_ver = self.ri.lookup(self.ri.vc, 'frameworkver%d' % bits) + dot_net_dir = getattr(self, 'FrameworkDir%d' % bits) + ver = reg_ver or self._use_last_dir_name(dot_net_dir, 'v') or '' + + # Set .NET versions for specified MSVC++ version + if self.vs_ver >= 12.0: + return ver, 'v4.0' + elif self.vs_ver >= 10.0: + return 'v4.0.30319' if ver.lower()[:2] != 'v4' else ver, 'v3.5' + elif self.vs_ver == 9.0: + return 'v3.5', 'v2.0.50727' + elif self.vs_ver == 8.0: + return 'v3.0', 'v2.0.50727' + + @staticmethod + def _use_last_dir_name(path, prefix=''): + """ + Return name of the last dir in path or '' if no dir found. + + Parameters + ---------- + path: str + Use dirs in this path + prefix: str + Use only dirs starting by this prefix + + Return + ------ + str + name + """ + matching_dirs = ( + dir_name + for dir_name in reversed(listdir(path)) + if isdir(join(path, dir_name)) and + dir_name.startswith(prefix) + ) + return next(matching_dirs, None) or '' + + +class EnvironmentInfo: + """ + Return environment variables for specified Microsoft Visual C++ version + and platform : Lib, Include, Path and libpath. + + This function is compatible with Microsoft Visual C++ 9.0 to 14.X. + + Script created by analysing Microsoft environment configuration files like + "vcvars[...].bat", "SetEnv.Cmd", "vcbuildtools.bat", ... + + Parameters + ---------- + arch: str + Target architecture. + vc_ver: float + Required Microsoft Visual C++ version. If not set, autodetect the last + version. + vc_min_ver: float + Minimum Microsoft Visual C++ version. + """ + + # Variables and properties in this class use originals CamelCase variables + # names from Microsoft source files for more easy comparison. + + def __init__(self, arch, vc_ver=None, vc_min_ver=0): + self.pi = PlatformInfo(arch) + self.ri = RegistryInfo(self.pi) + self.si = SystemInfo(self.ri, vc_ver) + + if self.vc_ver < vc_min_ver: + err = 'No suitable Microsoft Visual C++ version found' + raise distutils.errors.DistutilsPlatformError(err) + + @property + def vs_ver(self): + """ + Microsoft Visual Studio. + + Return + ------ + float + version + """ + return self.si.vs_ver + + @property + def vc_ver(self): + """ + Microsoft Visual C++ version. + + Return + ------ + float + version + """ + return self.si.vc_ver + + @property + def VSTools(self): + """ + Microsoft Visual Studio Tools. + + Return + ------ + list of str + paths + """ + paths = [r'Common7\IDE', r'Common7\Tools'] + + if self.vs_ver >= 14.0: + arch_subdir = self.pi.current_dir(hidex86=True, x64=True) + paths += [r'Common7\IDE\CommonExtensions\Microsoft\TestWindow'] + paths += [r'Team Tools\Performance Tools'] + paths += [r'Team Tools\Performance Tools%s' % arch_subdir] + + return [join(self.si.VSInstallDir, path) for path in paths] + + @property + def VCIncludes(self): + """ + Microsoft Visual C++ & Microsoft Foundation Class Includes. + + Return + ------ + list of str + paths + """ + return [join(self.si.VCInstallDir, 'Include'), + join(self.si.VCInstallDir, r'ATLMFC\Include')] + + @property + def VCLibraries(self): + """ + Microsoft Visual C++ & Microsoft Foundation Class Libraries. + + Return + ------ + list of str + paths + """ + if self.vs_ver >= 15.0: + arch_subdir = self.pi.target_dir(x64=True) + else: + arch_subdir = self.pi.target_dir(hidex86=True) + paths = ['Lib%s' % arch_subdir, r'ATLMFC\Lib%s' % arch_subdir] + + if self.vs_ver >= 14.0: + paths += [r'Lib\store%s' % arch_subdir] + + return [join(self.si.VCInstallDir, path) for path in paths] + + @property + def VCStoreRefs(self): + """ + Microsoft Visual C++ store references Libraries. + + Return + ------ + list of str + paths + """ + if self.vs_ver < 14.0: + return [] + return [join(self.si.VCInstallDir, r'Lib\store\references')] + + @property + def VCTools(self): + """ + Microsoft Visual C++ Tools. + + Return + ------ + list of str + paths + """ + si = self.si + tools = [join(si.VCInstallDir, 'VCPackages')] + + forcex86 = True if self.vs_ver <= 10.0 else False + arch_subdir = self.pi.cross_dir(forcex86) + if arch_subdir: + tools += [join(si.VCInstallDir, 'Bin%s' % arch_subdir)] + + if self.vs_ver == 14.0: + path = 'Bin%s' % self.pi.current_dir(hidex86=True) + tools += [join(si.VCInstallDir, path)] + + elif self.vs_ver >= 15.0: + host_dir = (r'bin\HostX86%s' if self.pi.current_is_x86() else + r'bin\HostX64%s') + tools += [join( + si.VCInstallDir, host_dir % self.pi.target_dir(x64=True))] + + if self.pi.current_cpu != self.pi.target_cpu: + tools += [join( + si.VCInstallDir, host_dir % self.pi.current_dir(x64=True))] + + else: + tools += [join(si.VCInstallDir, 'Bin')] + + return tools + + @property + def OSLibraries(self): + """ + Microsoft Windows SDK Libraries. + + Return + ------ + list of str + paths + """ + if self.vs_ver <= 10.0: + arch_subdir = self.pi.target_dir(hidex86=True, x64=True) + return [join(self.si.WindowsSdkDir, 'Lib%s' % arch_subdir)] + + else: + arch_subdir = self.pi.target_dir(x64=True) + lib = join(self.si.WindowsSdkDir, 'lib') + libver = self._sdk_subdir + return [join(lib, '%sum%s' % (libver, arch_subdir))] + + @property + def OSIncludes(self): + """ + Microsoft Windows SDK Include. + + Return + ------ + list of str + paths + """ + include = join(self.si.WindowsSdkDir, 'include') + + if self.vs_ver <= 10.0: + return [include, join(include, 'gl')] + + else: + if self.vs_ver >= 14.0: + sdkver = self._sdk_subdir + else: + sdkver = '' + return [join(include, '%sshared' % sdkver), + join(include, '%sum' % sdkver), + join(include, '%swinrt' % sdkver)] + + @property + def OSLibpath(self): + """ + Microsoft Windows SDK Libraries Paths. + + Return + ------ + list of str + paths + """ + ref = join(self.si.WindowsSdkDir, 'References') + libpath = [] + + if self.vs_ver <= 9.0: + libpath += self.OSLibraries + + if self.vs_ver >= 11.0: + libpath += [join(ref, r'CommonConfiguration\Neutral')] + + if self.vs_ver >= 14.0: + libpath += [ + ref, + join(self.si.WindowsSdkDir, 'UnionMetadata'), + join( + ref, 'Windows.Foundation.UniversalApiContract', '1.0.0.0'), + join(ref, 'Windows.Foundation.FoundationContract', '1.0.0.0'), + join( + ref, 'Windows.Networking.Connectivity.WwanContract', + '1.0.0.0'), + join( + self.si.WindowsSdkDir, 'ExtensionSDKs', 'Microsoft.VCLibs', + '%0.1f' % self.vs_ver, 'References', 'CommonConfiguration', + 'neutral'), + ] + return libpath + + @property + def SdkTools(self): + """ + Microsoft Windows SDK Tools. + + Return + ------ + list of str + paths + """ + return list(self._sdk_tools()) + + def _sdk_tools(self): + """ + Microsoft Windows SDK Tools paths generator. + + Return + ------ + generator of str + paths + """ + if self.vs_ver < 15.0: + bin_dir = 'Bin' if self.vs_ver <= 11.0 else r'Bin\x86' + yield join(self.si.WindowsSdkDir, bin_dir) + + if not self.pi.current_is_x86(): + arch_subdir = self.pi.current_dir(x64=True) + path = 'Bin%s' % arch_subdir + yield join(self.si.WindowsSdkDir, path) + + if self.vs_ver in (10.0, 11.0): + if self.pi.target_is_x86(): + arch_subdir = '' + else: + arch_subdir = self.pi.current_dir(hidex86=True, x64=True) + path = r'Bin\NETFX 4.0 Tools%s' % arch_subdir + yield join(self.si.WindowsSdkDir, path) + + elif self.vs_ver >= 15.0: + path = join(self.si.WindowsSdkDir, 'Bin') + arch_subdir = self.pi.current_dir(x64=True) + sdkver = self.si.WindowsSdkLastVersion + yield join(path, '%s%s' % (sdkver, arch_subdir)) + + if self.si.WindowsSDKExecutablePath: + yield self.si.WindowsSDKExecutablePath + + @property + def _sdk_subdir(self): + """ + Microsoft Windows SDK version subdir. + + Return + ------ + str + subdir + """ + ucrtver = self.si.WindowsSdkLastVersion + return ('%s\\' % ucrtver) if ucrtver else '' + + @property + def SdkSetup(self): + """ + Microsoft Windows SDK Setup. + + Return + ------ + list of str + paths + """ + if self.vs_ver > 9.0: + return [] + + return [join(self.si.WindowsSdkDir, 'Setup')] + + @property + def FxTools(self): + """ + Microsoft .NET Framework Tools. + + Return + ------ + list of str + paths + """ + pi = self.pi + si = self.si + + if self.vs_ver <= 10.0: + include32 = True + include64 = not pi.target_is_x86() and not pi.current_is_x86() + else: + include32 = pi.target_is_x86() or pi.current_is_x86() + include64 = pi.current_cpu == 'amd64' or pi.target_cpu == 'amd64' + + tools = [] + if include32: + tools += [join(si.FrameworkDir32, ver) + for ver in si.FrameworkVersion32] + if include64: + tools += [join(si.FrameworkDir64, ver) + for ver in si.FrameworkVersion64] + return tools + + @property + def NetFxSDKLibraries(self): + """ + Microsoft .Net Framework SDK Libraries. + + Return + ------ + list of str + paths + """ + if self.vs_ver < 14.0 or not self.si.NetFxSdkDir: + return [] + + arch_subdir = self.pi.target_dir(x64=True) + return [join(self.si.NetFxSdkDir, r'lib\um%s' % arch_subdir)] + + @property + def NetFxSDKIncludes(self): + """ + Microsoft .Net Framework SDK Includes. + + Return + ------ + list of str + paths + """ + if self.vs_ver < 14.0 or not self.si.NetFxSdkDir: + return [] + + return [join(self.si.NetFxSdkDir, r'include\um')] + + @property + def VsTDb(self): + """ + Microsoft Visual Studio Team System Database. + + Return + ------ + list of str + paths + """ + return [join(self.si.VSInstallDir, r'VSTSDB\Deploy')] + + @property + def MSBuild(self): + """ + Microsoft Build Engine. + + Return + ------ + list of str + paths + """ + if self.vs_ver < 12.0: + return [] + elif self.vs_ver < 15.0: + base_path = self.si.ProgramFilesx86 + arch_subdir = self.pi.current_dir(hidex86=True) + else: + base_path = self.si.VSInstallDir + arch_subdir = '' + + path = r'MSBuild\%0.1f\bin%s' % (self.vs_ver, arch_subdir) + build = [join(base_path, path)] + + if self.vs_ver >= 15.0: + # Add Roslyn C# & Visual Basic Compiler + build += [join(base_path, path, 'Roslyn')] + + return build + + @property + def HTMLHelpWorkshop(self): + """ + Microsoft HTML Help Workshop. + + Return + ------ + list of str + paths + """ + if self.vs_ver < 11.0: + return [] + + return [join(self.si.ProgramFilesx86, 'HTML Help Workshop')] + + @property + def UCRTLibraries(self): + """ + Microsoft Universal C Runtime SDK Libraries. + + Return + ------ + list of str + paths + """ + if self.vs_ver < 14.0: + return [] + + arch_subdir = self.pi.target_dir(x64=True) + lib = join(self.si.UniversalCRTSdkDir, 'lib') + ucrtver = self._ucrt_subdir + return [join(lib, '%sucrt%s' % (ucrtver, arch_subdir))] + + @property + def UCRTIncludes(self): + """ + Microsoft Universal C Runtime SDK Include. + + Return + ------ + list of str + paths + """ + if self.vs_ver < 14.0: + return [] + + include = join(self.si.UniversalCRTSdkDir, 'include') + return [join(include, '%sucrt' % self._ucrt_subdir)] + + @property + def _ucrt_subdir(self): + """ + Microsoft Universal C Runtime SDK version subdir. + + Return + ------ + str + subdir + """ + ucrtver = self.si.UniversalCRTSdkLastVersion + return ('%s\\' % ucrtver) if ucrtver else '' + + @property + def FSharp(self): + """ + Microsoft Visual F#. + + Return + ------ + list of str + paths + """ + if 11.0 > self.vs_ver > 12.0: + return [] + + return [self.si.FSharpInstallDir] + + @property + def VCRuntimeRedist(self): + """ + Microsoft Visual C++ runtime redistributable dll. + + Return + ------ + str + path + """ + vcruntime = 'vcruntime%d0.dll' % self.vc_ver + arch_subdir = self.pi.target_dir(x64=True).strip('\\') + + # Installation prefixes candidates + prefixes = [] + tools_path = self.si.VCInstallDir + redist_path = dirname(tools_path.replace(r'\Tools', r'\Redist')) + if isdir(redist_path): + # Redist version may not be exactly the same as tools + redist_path = join(redist_path, listdir(redist_path)[-1]) + prefixes += [redist_path, join(redist_path, 'onecore')] + + prefixes += [join(tools_path, 'redist')] # VS14 legacy path + + # CRT directory + crt_dirs = ('Microsoft.VC%d.CRT' % (self.vc_ver * 10), + # Sometime store in directory with VS version instead of VC + 'Microsoft.VC%d.CRT' % (int(self.vs_ver) * 10)) + + # vcruntime path + for prefix, crt_dir in itertools.product(prefixes, crt_dirs): + path = join(prefix, arch_subdir, crt_dir, vcruntime) + if isfile(path): + return path + + def return_env(self, exists=True): + """ + Return environment dict. + + Parameters + ---------- + exists: bool + It True, only return existing paths. + + Return + ------ + dict + environment + """ + env = dict( + include=self._build_paths('include', + [self.VCIncludes, + self.OSIncludes, + self.UCRTIncludes, + self.NetFxSDKIncludes], + exists), + lib=self._build_paths('lib', + [self.VCLibraries, + self.OSLibraries, + self.FxTools, + self.UCRTLibraries, + self.NetFxSDKLibraries], + exists), + libpath=self._build_paths('libpath', + [self.VCLibraries, + self.FxTools, + self.VCStoreRefs, + self.OSLibpath], + exists), + path=self._build_paths('path', + [self.VCTools, + self.VSTools, + self.VsTDb, + self.SdkTools, + self.SdkSetup, + self.FxTools, + self.MSBuild, + self.HTMLHelpWorkshop, + self.FSharp], + exists), + ) + if self.vs_ver >= 14 and isfile(self.VCRuntimeRedist): + env['py_vcruntime_redist'] = self.VCRuntimeRedist + return env + + def _build_paths(self, name, spec_path_lists, exists): + """ + Given an environment variable name and specified paths, + return a pathsep-separated string of paths containing + unique, extant, directories from those paths and from + the environment variable. Raise an error if no paths + are resolved. + + Parameters + ---------- + name: str + Environment variable name + spec_path_lists: list of str + Paths + exists: bool + It True, only return existing paths. + + Return + ------ + str + Pathsep-separated paths + """ + # flatten spec_path_lists + spec_paths = itertools.chain.from_iterable(spec_path_lists) + env_paths = environ.get(name, '').split(pathsep) + paths = itertools.chain(spec_paths, env_paths) + extant_paths = list(filter(isdir, paths)) if exists else paths + if not extant_paths: + msg = "%s environment variable is empty" % name.upper() + raise distutils.errors.DistutilsPlatformError(msg) + unique_paths = unique_everseen(extant_paths) + return pathsep.join(unique_paths) diff --git a/env-llmeval/lib/python3.10/site-packages/setuptools/namespaces.py b/env-llmeval/lib/python3.10/site-packages/setuptools/namespaces.py new file mode 100644 index 0000000000000000000000000000000000000000..44939e1c6d40539eb8173bf1527db926c5a54658 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/setuptools/namespaces.py @@ -0,0 +1,107 @@ +import os +from distutils import log +import itertools + + +flatten = itertools.chain.from_iterable + + +class Installer: + + nspkg_ext = '-nspkg.pth' + + def install_namespaces(self): + nsp = self._get_all_ns_packages() + if not nsp: + return + filename, ext = os.path.splitext(self._get_target()) + filename += self.nspkg_ext + self.outputs.append(filename) + log.info("Installing %s", filename) + lines = map(self._gen_nspkg_line, nsp) + + if self.dry_run: + # always generate the lines, even in dry run + list(lines) + return + + with open(filename, 'wt') as f: + f.writelines(lines) + + def uninstall_namespaces(self): + filename, ext = os.path.splitext(self._get_target()) + filename += self.nspkg_ext + if not os.path.exists(filename): + return + log.info("Removing %s", filename) + os.remove(filename) + + def _get_target(self): + return self.target + + _nspkg_tmpl = ( + "import sys, types, os", + "has_mfs = sys.version_info > (3, 5)", + "p = os.path.join(%(root)s, *%(pth)r)", + "importlib = has_mfs and __import__('importlib.util')", + "has_mfs and __import__('importlib.machinery')", + ( + "m = has_mfs and " + "sys.modules.setdefault(%(pkg)r, " + "importlib.util.module_from_spec(" + "importlib.machinery.PathFinder.find_spec(%(pkg)r, " + "[os.path.dirname(p)])))" + ), + ( + "m = m or " + "sys.modules.setdefault(%(pkg)r, types.ModuleType(%(pkg)r))" + ), + "mp = (m or []) and m.__dict__.setdefault('__path__',[])", + "(p not in mp) and mp.append(p)", + ) + "lines for the namespace installer" + + _nspkg_tmpl_multi = ( + 'm and setattr(sys.modules[%(parent)r], %(child)r, m)', + ) + "additional line(s) when a parent package is indicated" + + def _get_root(self): + return "sys._getframe(1).f_locals['sitedir']" + + def _gen_nspkg_line(self, pkg): + pth = tuple(pkg.split('.')) + root = self._get_root() + tmpl_lines = self._nspkg_tmpl + parent, sep, child = pkg.rpartition('.') + if parent: + tmpl_lines += self._nspkg_tmpl_multi + return ';'.join(tmpl_lines) % locals() + '\n' + + def _get_all_ns_packages(self): + """Return sorted list of all package namespaces""" + pkgs = self.distribution.namespace_packages or [] + return sorted(flatten(map(self._pkg_names, pkgs))) + + @staticmethod + def _pkg_names(pkg): + """ + Given a namespace package, yield the components of that + package. + + >>> names = Installer._pkg_names('a.b.c') + >>> set(names) == set(['a', 'a.b', 'a.b.c']) + True + """ + parts = pkg.split('.') + while parts: + yield '.'.join(parts) + parts.pop() + + +class DevelopInstaller(Installer): + def _get_root(self): + return repr(str(self.egg_path)) + + def _get_target(self): + return self.egg_link diff --git a/env-llmeval/lib/python3.10/site-packages/setuptools/package_index.py b/env-llmeval/lib/python3.10/site-packages/setuptools/package_index.py new file mode 100644 index 0000000000000000000000000000000000000000..e93fcc644645c3bf7cb100a38b0404e764635e7b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/setuptools/package_index.py @@ -0,0 +1,1127 @@ +"""PyPI and direct package downloading""" +import sys +import os +import re +import io +import shutil +import socket +import base64 +import hashlib +import itertools +import warnings +import configparser +import html +import http.client +import urllib.parse +import urllib.request +import urllib.error +from functools import wraps + +import setuptools +from pkg_resources import ( + CHECKOUT_DIST, Distribution, BINARY_DIST, normalize_path, SOURCE_DIST, + Environment, find_distributions, safe_name, safe_version, + to_filename, Requirement, DEVELOP_DIST, EGG_DIST, parse_version, +) +from distutils import log +from distutils.errors import DistutilsError +from fnmatch import translate +from setuptools.wheel import Wheel +from setuptools.extern.more_itertools import unique_everseen + + +EGG_FRAGMENT = re.compile(r'^egg=([-A-Za-z0-9_.+!]+)$') +HREF = re.compile(r"""href\s*=\s*['"]?([^'"> ]+)""", re.I) +PYPI_MD5 = re.compile( + r'([^<]+)\n\s+\(md5\)' +) +URL_SCHEME = re.compile('([-+.a-z0-9]{2,}):', re.I).match +EXTENSIONS = ".tar.gz .tar.bz2 .tar .zip .tgz".split() + +__all__ = [ + 'PackageIndex', 'distros_for_url', 'parse_bdist_wininst', + 'interpret_distro_name', +] + +_SOCKET_TIMEOUT = 15 + +_tmpl = "setuptools/{setuptools.__version__} Python-urllib/{py_major}" +user_agent = _tmpl.format( + py_major='{}.{}'.format(*sys.version_info), setuptools=setuptools) + + +def parse_requirement_arg(spec): + try: + return Requirement.parse(spec) + except ValueError as e: + raise DistutilsError( + "Not a URL, existing file, or requirement spec: %r" % (spec,) + ) from e + + +def parse_bdist_wininst(name): + """Return (base,pyversion) or (None,None) for possible .exe name""" + + lower = name.lower() + base, py_ver, plat = None, None, None + + if lower.endswith('.exe'): + if lower.endswith('.win32.exe'): + base = name[:-10] + plat = 'win32' + elif lower.startswith('.win32-py', -16): + py_ver = name[-7:-4] + base = name[:-16] + plat = 'win32' + elif lower.endswith('.win-amd64.exe'): + base = name[:-14] + plat = 'win-amd64' + elif lower.startswith('.win-amd64-py', -20): + py_ver = name[-7:-4] + base = name[:-20] + plat = 'win-amd64' + return base, py_ver, plat + + +def egg_info_for_url(url): + parts = urllib.parse.urlparse(url) + scheme, server, path, parameters, query, fragment = parts + base = urllib.parse.unquote(path.split('/')[-1]) + if server == 'sourceforge.net' and base == 'download': # XXX Yuck + base = urllib.parse.unquote(path.split('/')[-2]) + if '#' in base: + base, fragment = base.split('#', 1) + return base, fragment + + +def distros_for_url(url, metadata=None): + """Yield egg or source distribution objects that might be found at a URL""" + base, fragment = egg_info_for_url(url) + for dist in distros_for_location(url, base, metadata): + yield dist + if fragment: + match = EGG_FRAGMENT.match(fragment) + if match: + for dist in interpret_distro_name( + url, match.group(1), metadata, precedence=CHECKOUT_DIST + ): + yield dist + + +def distros_for_location(location, basename, metadata=None): + """Yield egg or source distribution objects based on basename""" + if basename.endswith('.egg.zip'): + basename = basename[:-4] # strip the .zip + if basename.endswith('.egg') and '-' in basename: + # only one, unambiguous interpretation + return [Distribution.from_location(location, basename, metadata)] + if basename.endswith('.whl') and '-' in basename: + wheel = Wheel(basename) + if not wheel.is_compatible(): + return [] + return [Distribution( + location=location, + project_name=wheel.project_name, + version=wheel.version, + # Increase priority over eggs. + precedence=EGG_DIST + 1, + )] + if basename.endswith('.exe'): + win_base, py_ver, platform = parse_bdist_wininst(basename) + if win_base is not None: + return interpret_distro_name( + location, win_base, metadata, py_ver, BINARY_DIST, platform + ) + # Try source distro extensions (.zip, .tgz, etc.) + # + for ext in EXTENSIONS: + if basename.endswith(ext): + basename = basename[:-len(ext)] + return interpret_distro_name(location, basename, metadata) + return [] # no extension matched + + +def distros_for_filename(filename, metadata=None): + """Yield possible egg or source distribution objects based on a filename""" + return distros_for_location( + normalize_path(filename), os.path.basename(filename), metadata + ) + + +def interpret_distro_name( + location, basename, metadata, py_version=None, precedence=SOURCE_DIST, + platform=None +): + """Generate alternative interpretations of a source distro name + + Note: if `location` is a filesystem filename, you should call + ``pkg_resources.normalize_path()`` on it before passing it to this + routine! + """ + # Generate alternative interpretations of a source distro name + # Because some packages are ambiguous as to name/versions split + # e.g. "adns-python-1.1.0", "egenix-mx-commercial", etc. + # So, we generate each possible interpretation (e.g. "adns, python-1.1.0" + # "adns-python, 1.1.0", and "adns-python-1.1.0, no version"). In practice, + # the spurious interpretations should be ignored, because in the event + # there's also an "adns" package, the spurious "python-1.1.0" version will + # compare lower than any numeric version number, and is therefore unlikely + # to match a request for it. It's still a potential problem, though, and + # in the long run PyPI and the distutils should go for "safe" names and + # versions in distribution archive names (sdist and bdist). + + parts = basename.split('-') + if not py_version and any(re.match(r'py\d\.\d$', p) for p in parts[2:]): + # it is a bdist_dumb, not an sdist -- bail out + return + + for p in range(1, len(parts) + 1): + yield Distribution( + location, metadata, '-'.join(parts[:p]), '-'.join(parts[p:]), + py_version=py_version, precedence=precedence, + platform=platform + ) + + +def unique_values(func): + """ + Wrap a function returning an iterable such that the resulting iterable + only ever yields unique items. + """ + + @wraps(func) + def wrapper(*args, **kwargs): + return unique_everseen(func(*args, **kwargs)) + + return wrapper + + +REL = re.compile(r"""<([^>]*\srel\s{0,10}=\s{0,10}['"]?([^'" >]+)[^>]*)>""", re.I) +# this line is here to fix emacs' cruddy broken syntax highlighting + + +@unique_values +def find_external_links(url, page): + """Find rel="homepage" and rel="download" links in `page`, yielding URLs""" + + for match in REL.finditer(page): + tag, rel = match.groups() + rels = set(map(str.strip, rel.lower().split(','))) + if 'homepage' in rels or 'download' in rels: + for match in HREF.finditer(tag): + yield urllib.parse.urljoin(url, htmldecode(match.group(1))) + + for tag in ("Home Page", "Download URL"): + pos = page.find(tag) + if pos != -1: + match = HREF.search(page, pos) + if match: + yield urllib.parse.urljoin(url, htmldecode(match.group(1))) + + +class ContentChecker: + """ + A null content checker that defines the interface for checking content + """ + + def feed(self, block): + """ + Feed a block of data to the hash. + """ + return + + def is_valid(self): + """ + Check the hash. Return False if validation fails. + """ + return True + + def report(self, reporter, template): + """ + Call reporter with information about the checker (hash name) + substituted into the template. + """ + return + + +class HashChecker(ContentChecker): + pattern = re.compile( + r'(?Psha1|sha224|sha384|sha256|sha512|md5)=' + r'(?P[a-f0-9]+)' + ) + + def __init__(self, hash_name, expected): + self.hash_name = hash_name + self.hash = hashlib.new(hash_name) + self.expected = expected + + @classmethod + def from_url(cls, url): + "Construct a (possibly null) ContentChecker from a URL" + fragment = urllib.parse.urlparse(url)[-1] + if not fragment: + return ContentChecker() + match = cls.pattern.search(fragment) + if not match: + return ContentChecker() + return cls(**match.groupdict()) + + def feed(self, block): + self.hash.update(block) + + def is_valid(self): + return self.hash.hexdigest() == self.expected + + def report(self, reporter, template): + msg = template % self.hash_name + return reporter(msg) + + +class PackageIndex(Environment): + """A distribution index that scans web pages for download URLs""" + + def __init__( + self, index_url="https://pypi.org/simple/", hosts=('*',), + ca_bundle=None, verify_ssl=True, *args, **kw + ): + Environment.__init__(self, *args, **kw) + self.index_url = index_url + "/" [:not index_url.endswith('/')] + self.scanned_urls = {} + self.fetched_urls = {} + self.package_pages = {} + self.allows = re.compile('|'.join(map(translate, hosts))).match + self.to_scan = [] + self.opener = urllib.request.urlopen + + def add(self, dist): + # ignore invalid versions + try: + parse_version(dist.version) + except Exception: + return + return super().add(dist) + + # FIXME: 'PackageIndex.process_url' is too complex (14) + def process_url(self, url, retrieve=False): # noqa: C901 + """Evaluate a URL as a possible download, and maybe retrieve it""" + if url in self.scanned_urls and not retrieve: + return + self.scanned_urls[url] = True + if not URL_SCHEME(url): + self.process_filename(url) + return + else: + dists = list(distros_for_url(url)) + if dists: + if not self.url_ok(url): + return + self.debug("Found link: %s", url) + + if dists or not retrieve or url in self.fetched_urls: + list(map(self.add, dists)) + return # don't need the actual page + + if not self.url_ok(url): + self.fetched_urls[url] = True + return + + self.info("Reading %s", url) + self.fetched_urls[url] = True # prevent multiple fetch attempts + tmpl = "Download error on %s: %%s -- Some packages may not be found!" + f = self.open_url(url, tmpl % url) + if f is None: + return + if isinstance(f, urllib.error.HTTPError) and f.code == 401: + self.info("Authentication error: %s" % f.msg) + self.fetched_urls[f.url] = True + if 'html' not in f.headers.get('content-type', '').lower(): + f.close() # not html, we can't process it + return + + base = f.url # handle redirects + page = f.read() + if not isinstance(page, str): + # In Python 3 and got bytes but want str. + if isinstance(f, urllib.error.HTTPError): + # Errors have no charset, assume latin1: + charset = 'latin-1' + else: + charset = f.headers.get_param('charset') or 'latin-1' + page = page.decode(charset, "ignore") + f.close() + for match in HREF.finditer(page): + link = urllib.parse.urljoin(base, htmldecode(match.group(1))) + self.process_url(link) + if url.startswith(self.index_url) and getattr(f, 'code', None) != 404: + page = self.process_index(url, page) + + def process_filename(self, fn, nested=False): + # process filenames or directories + if not os.path.exists(fn): + self.warn("Not found: %s", fn) + return + + if os.path.isdir(fn) and not nested: + path = os.path.realpath(fn) + for item in os.listdir(path): + self.process_filename(os.path.join(path, item), True) + + dists = distros_for_filename(fn) + if dists: + self.debug("Found: %s", fn) + list(map(self.add, dists)) + + def url_ok(self, url, fatal=False): + s = URL_SCHEME(url) + is_file = s and s.group(1).lower() == 'file' + if is_file or self.allows(urllib.parse.urlparse(url)[1]): + return True + msg = ( + "\nNote: Bypassing %s (disallowed host; see " + "http://bit.ly/2hrImnY for details).\n") + if fatal: + raise DistutilsError(msg % url) + else: + self.warn(msg, url) + + def scan_egg_links(self, search_path): + dirs = filter(os.path.isdir, search_path) + egg_links = ( + (path, entry) + for path in dirs + for entry in os.listdir(path) + if entry.endswith('.egg-link') + ) + list(itertools.starmap(self.scan_egg_link, egg_links)) + + def scan_egg_link(self, path, entry): + with open(os.path.join(path, entry)) as raw_lines: + # filter non-empty lines + lines = list(filter(None, map(str.strip, raw_lines))) + + if len(lines) != 2: + # format is not recognized; punt + return + + egg_path, setup_path = lines + + for dist in find_distributions(os.path.join(path, egg_path)): + dist.location = os.path.join(path, *lines) + dist.precedence = SOURCE_DIST + self.add(dist) + + def _scan(self, link): + # Process a URL to see if it's for a package page + NO_MATCH_SENTINEL = None, None + if not link.startswith(self.index_url): + return NO_MATCH_SENTINEL + + parts = list(map( + urllib.parse.unquote, link[len(self.index_url):].split('/') + )) + if len(parts) != 2 or '#' in parts[1]: + return NO_MATCH_SENTINEL + + # it's a package page, sanitize and index it + pkg = safe_name(parts[0]) + ver = safe_version(parts[1]) + self.package_pages.setdefault(pkg.lower(), {})[link] = True + return to_filename(pkg), to_filename(ver) + + def process_index(self, url, page): + """Process the contents of a PyPI page""" + + # process an index page into the package-page index + for match in HREF.finditer(page): + try: + self._scan(urllib.parse.urljoin(url, htmldecode(match.group(1)))) + except ValueError: + pass + + pkg, ver = self._scan(url) # ensure this page is in the page index + if not pkg: + return "" # no sense double-scanning non-package pages + + # process individual package page + for new_url in find_external_links(url, page): + # Process the found URL + base, frag = egg_info_for_url(new_url) + if base.endswith('.py') and not frag: + if ver: + new_url += '#egg=%s-%s' % (pkg, ver) + else: + self.need_version_info(url) + self.scan_url(new_url) + + return PYPI_MD5.sub( + lambda m: '%s' % m.group(1, 3, 2), page + ) + + def need_version_info(self, url): + self.scan_all( + "Page at %s links to .py file(s) without version info; an index " + "scan is required.", url + ) + + def scan_all(self, msg=None, *args): + if self.index_url not in self.fetched_urls: + if msg: + self.warn(msg, *args) + self.info( + "Scanning index of all packages (this may take a while)" + ) + self.scan_url(self.index_url) + + def find_packages(self, requirement): + self.scan_url(self.index_url + requirement.unsafe_name + '/') + + if not self.package_pages.get(requirement.key): + # Fall back to safe version of the name + self.scan_url(self.index_url + requirement.project_name + '/') + + if not self.package_pages.get(requirement.key): + # We couldn't find the target package, so search the index page too + self.not_found_in_index(requirement) + + for url in list(self.package_pages.get(requirement.key, ())): + # scan each page that might be related to the desired package + self.scan_url(url) + + def obtain(self, requirement, installer=None): + self.prescan() + self.find_packages(requirement) + for dist in self[requirement.key]: + if dist in requirement: + return dist + self.debug("%s does not match %s", requirement, dist) + return super(PackageIndex, self).obtain(requirement, installer) + + def check_hash(self, checker, filename, tfp): + """ + checker is a ContentChecker + """ + checker.report( + self.debug, + "Validating %%s checksum for %s" % filename) + if not checker.is_valid(): + tfp.close() + os.unlink(filename) + raise DistutilsError( + "%s validation failed for %s; " + "possible download problem?" + % (checker.hash.name, os.path.basename(filename)) + ) + + def add_find_links(self, urls): + """Add `urls` to the list that will be prescanned for searches""" + for url in urls: + if ( + self.to_scan is None # if we have already "gone online" + or not URL_SCHEME(url) # or it's a local file/directory + or url.startswith('file:') + or list(distros_for_url(url)) # or a direct package link + ): + # then go ahead and process it now + self.scan_url(url) + else: + # otherwise, defer retrieval till later + self.to_scan.append(url) + + def prescan(self): + """Scan urls scheduled for prescanning (e.g. --find-links)""" + if self.to_scan: + list(map(self.scan_url, self.to_scan)) + self.to_scan = None # from now on, go ahead and process immediately + + def not_found_in_index(self, requirement): + if self[requirement.key]: # we've seen at least one distro + meth, msg = self.info, "Couldn't retrieve index page for %r" + else: # no distros seen for this name, might be misspelled + meth, msg = ( + self.warn, + "Couldn't find index page for %r (maybe misspelled?)") + meth(msg, requirement.unsafe_name) + self.scan_all() + + def download(self, spec, tmpdir): + """Locate and/or download `spec` to `tmpdir`, returning a local path + + `spec` may be a ``Requirement`` object, or a string containing a URL, + an existing local filename, or a project/version requirement spec + (i.e. the string form of a ``Requirement`` object). If it is the URL + of a .py file with an unambiguous ``#egg=name-version`` tag (i.e., one + that escapes ``-`` as ``_`` throughout), a trivial ``setup.py`` is + automatically created alongside the downloaded file. + + If `spec` is a ``Requirement`` object or a string containing a + project/version requirement spec, this method returns the location of + a matching distribution (possibly after downloading it to `tmpdir`). + If `spec` is a locally existing file or directory name, it is simply + returned unchanged. If `spec` is a URL, it is downloaded to a subpath + of `tmpdir`, and the local filename is returned. Various errors may be + raised if a problem occurs during downloading. + """ + if not isinstance(spec, Requirement): + scheme = URL_SCHEME(spec) + if scheme: + # It's a url, download it to tmpdir + found = self._download_url(scheme.group(1), spec, tmpdir) + base, fragment = egg_info_for_url(spec) + if base.endswith('.py'): + found = self.gen_setup(found, fragment, tmpdir) + return found + elif os.path.exists(spec): + # Existing file or directory, just return it + return spec + else: + spec = parse_requirement_arg(spec) + return getattr(self.fetch_distribution(spec, tmpdir), 'location', None) + + def fetch_distribution( # noqa: C901 # is too complex (14) # FIXME + self, requirement, tmpdir, force_scan=False, source=False, + develop_ok=False, local_index=None): + """Obtain a distribution suitable for fulfilling `requirement` + + `requirement` must be a ``pkg_resources.Requirement`` instance. + If necessary, or if the `force_scan` flag is set, the requirement is + searched for in the (online) package index as well as the locally + installed packages. If a distribution matching `requirement` is found, + the returned distribution's ``location`` is the value you would have + gotten from calling the ``download()`` method with the matching + distribution's URL or filename. If no matching distribution is found, + ``None`` is returned. + + If the `source` flag is set, only source distributions and source + checkout links will be considered. Unless the `develop_ok` flag is + set, development and system eggs (i.e., those using the ``.egg-info`` + format) will be ignored. + """ + # process a Requirement + self.info("Searching for %s", requirement) + skipped = {} + dist = None + + def find(req, env=None): + if env is None: + env = self + # Find a matching distribution; may be called more than once + + for dist in env[req.key]: + + if dist.precedence == DEVELOP_DIST and not develop_ok: + if dist not in skipped: + self.warn( + "Skipping development or system egg: %s", dist, + ) + skipped[dist] = 1 + continue + + test = ( + dist in req + and (dist.precedence <= SOURCE_DIST or not source) + ) + if test: + loc = self.download(dist.location, tmpdir) + dist.download_location = loc + if os.path.exists(dist.download_location): + return dist + + if force_scan: + self.prescan() + self.find_packages(requirement) + dist = find(requirement) + + if not dist and local_index is not None: + dist = find(requirement, local_index) + + if dist is None: + if self.to_scan is not None: + self.prescan() + dist = find(requirement) + + if dist is None and not force_scan: + self.find_packages(requirement) + dist = find(requirement) + + if dist is None: + self.warn( + "No local packages or working download links found for %s%s", + (source and "a source distribution of " or ""), + requirement, + ) + else: + self.info("Best match: %s", dist) + return dist.clone(location=dist.download_location) + + def fetch(self, requirement, tmpdir, force_scan=False, source=False): + """Obtain a file suitable for fulfilling `requirement` + + DEPRECATED; use the ``fetch_distribution()`` method now instead. For + backward compatibility, this routine is identical but returns the + ``location`` of the downloaded distribution instead of a distribution + object. + """ + dist = self.fetch_distribution(requirement, tmpdir, force_scan, source) + if dist is not None: + return dist.location + return None + + def gen_setup(self, filename, fragment, tmpdir): + match = EGG_FRAGMENT.match(fragment) + dists = match and [ + d for d in + interpret_distro_name(filename, match.group(1), None) if d.version + ] or [] + + if len(dists) == 1: # unambiguous ``#egg`` fragment + basename = os.path.basename(filename) + + # Make sure the file has been downloaded to the temp dir. + if os.path.dirname(filename) != tmpdir: + dst = os.path.join(tmpdir, basename) + from setuptools.command.easy_install import samefile + if not samefile(filename, dst): + shutil.copy2(filename, dst) + filename = dst + + with open(os.path.join(tmpdir, 'setup.py'), 'w') as file: + file.write( + "from setuptools import setup\n" + "setup(name=%r, version=%r, py_modules=[%r])\n" + % ( + dists[0].project_name, dists[0].version, + os.path.splitext(basename)[0] + ) + ) + return filename + + elif match: + raise DistutilsError( + "Can't unambiguously interpret project/version identifier %r; " + "any dashes in the name or version should be escaped using " + "underscores. %r" % (fragment, dists) + ) + else: + raise DistutilsError( + "Can't process plain .py files without an '#egg=name-version'" + " suffix to enable automatic setup script generation." + ) + + dl_blocksize = 8192 + + def _download_to(self, url, filename): + self.info("Downloading %s", url) + # Download the file + fp = None + try: + checker = HashChecker.from_url(url) + fp = self.open_url(url) + if isinstance(fp, urllib.error.HTTPError): + raise DistutilsError( + "Can't download %s: %s %s" % (url, fp.code, fp.msg) + ) + headers = fp.info() + blocknum = 0 + bs = self.dl_blocksize + size = -1 + if "content-length" in headers: + # Some servers return multiple Content-Length headers :( + sizes = headers.get_all('Content-Length') + size = max(map(int, sizes)) + self.reporthook(url, filename, blocknum, bs, size) + with open(filename, 'wb') as tfp: + while True: + block = fp.read(bs) + if block: + checker.feed(block) + tfp.write(block) + blocknum += 1 + self.reporthook(url, filename, blocknum, bs, size) + else: + break + self.check_hash(checker, filename, tfp) + return headers + finally: + if fp: + fp.close() + + def reporthook(self, url, filename, blocknum, blksize, size): + pass # no-op + + # FIXME: + def open_url(self, url, warning=None): # noqa: C901 # is too complex (12) + if url.startswith('file:'): + return local_open(url) + try: + return open_with_auth(url, self.opener) + except (ValueError, http.client.InvalidURL) as v: + msg = ' '.join([str(arg) for arg in v.args]) + if warning: + self.warn(warning, msg) + else: + raise DistutilsError('%s %s' % (url, msg)) from v + except urllib.error.HTTPError as v: + return v + except urllib.error.URLError as v: + if warning: + self.warn(warning, v.reason) + else: + raise DistutilsError("Download error for %s: %s" + % (url, v.reason)) from v + except http.client.BadStatusLine as v: + if warning: + self.warn(warning, v.line) + else: + raise DistutilsError( + '%s returned a bad status line. The server might be ' + 'down, %s' % + (url, v.line) + ) from v + except (http.client.HTTPException, socket.error) as v: + if warning: + self.warn(warning, v) + else: + raise DistutilsError("Download error for %s: %s" + % (url, v)) from v + + def _download_url(self, scheme, url, tmpdir): + # Determine download filename + # + name, fragment = egg_info_for_url(url) + if name: + while '..' in name: + name = name.replace('..', '.').replace('\\', '_') + else: + name = "__downloaded__" # default if URL has no path contents + + if name.endswith('.egg.zip'): + name = name[:-4] # strip the extra .zip before download + + filename = os.path.join(tmpdir, name) + + # Download the file + # + if scheme == 'svn' or scheme.startswith('svn+'): + return self._download_svn(url, filename) + elif scheme == 'git' or scheme.startswith('git+'): + return self._download_git(url, filename) + elif scheme.startswith('hg+'): + return self._download_hg(url, filename) + elif scheme == 'file': + return urllib.request.url2pathname(urllib.parse.urlparse(url)[2]) + else: + self.url_ok(url, True) # raises error if not allowed + return self._attempt_download(url, filename) + + def scan_url(self, url): + self.process_url(url, True) + + def _attempt_download(self, url, filename): + headers = self._download_to(url, filename) + if 'html' in headers.get('content-type', '').lower(): + return self._download_html(url, headers, filename) + else: + return filename + + def _download_html(self, url, headers, filename): + file = open(filename) + for line in file: + if line.strip(): + # Check for a subversion index page + if re.search(r'([^- ]+ - )?Revision \d+:', line): + # it's a subversion index page: + file.close() + os.unlink(filename) + return self._download_svn(url, filename) + break # not an index page + file.close() + os.unlink(filename) + raise DistutilsError("Unexpected HTML page found at " + url) + + def _download_svn(self, url, filename): + warnings.warn("SVN download support is deprecated", UserWarning) + url = url.split('#', 1)[0] # remove any fragment for svn's sake + creds = '' + if url.lower().startswith('svn:') and '@' in url: + scheme, netloc, path, p, q, f = urllib.parse.urlparse(url) + if not netloc and path.startswith('//') and '/' in path[2:]: + netloc, path = path[2:].split('/', 1) + auth, host = _splituser(netloc) + if auth: + if ':' in auth: + user, pw = auth.split(':', 1) + creds = " --username=%s --password=%s" % (user, pw) + else: + creds = " --username=" + auth + netloc = host + parts = scheme, netloc, url, p, q, f + url = urllib.parse.urlunparse(parts) + self.info("Doing subversion checkout from %s to %s", url, filename) + os.system("svn checkout%s -q %s %s" % (creds, url, filename)) + return filename + + @staticmethod + def _vcs_split_rev_from_url(url, pop_prefix=False): + scheme, netloc, path, query, frag = urllib.parse.urlsplit(url) + + scheme = scheme.split('+', 1)[-1] + + # Some fragment identification fails + path = path.split('#', 1)[0] + + rev = None + if '@' in path: + path, rev = path.rsplit('@', 1) + + # Also, discard fragment + url = urllib.parse.urlunsplit((scheme, netloc, path, query, '')) + + return url, rev + + def _download_git(self, url, filename): + filename = filename.split('#', 1)[0] + url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True) + + self.info("Doing git clone from %s to %s", url, filename) + os.system("git clone --quiet %s %s" % (url, filename)) + + if rev is not None: + self.info("Checking out %s", rev) + os.system("git -C %s checkout --quiet %s" % ( + filename, + rev, + )) + + return filename + + def _download_hg(self, url, filename): + filename = filename.split('#', 1)[0] + url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True) + + self.info("Doing hg clone from %s to %s", url, filename) + os.system("hg clone --quiet %s %s" % (url, filename)) + + if rev is not None: + self.info("Updating to %s", rev) + os.system("hg --cwd %s up -C -r %s -q" % ( + filename, + rev, + )) + + return filename + + def debug(self, msg, *args): + log.debug(msg, *args) + + def info(self, msg, *args): + log.info(msg, *args) + + def warn(self, msg, *args): + log.warn(msg, *args) + + +# This pattern matches a character entity reference (a decimal numeric +# references, a hexadecimal numeric reference, or a named reference). +entity_sub = re.compile(r'&(#(\d+|x[\da-fA-F]+)|[\w.:-]+);?').sub + + +def decode_entity(match): + what = match.group(0) + return html.unescape(what) + + +def htmldecode(text): + """ + Decode HTML entities in the given text. + + >>> htmldecode( + ... 'https://../package_name-0.1.2.tar.gz' + ... '?tokena=A&tokenb=B">package_name-0.1.2.tar.gz') + 'https://../package_name-0.1.2.tar.gz?tokena=A&tokenb=B">package_name-0.1.2.tar.gz' + """ + return entity_sub(decode_entity, text) + + +def socket_timeout(timeout=15): + def _socket_timeout(func): + def _socket_timeout(*args, **kwargs): + old_timeout = socket.getdefaulttimeout() + socket.setdefaulttimeout(timeout) + try: + return func(*args, **kwargs) + finally: + socket.setdefaulttimeout(old_timeout) + + return _socket_timeout + + return _socket_timeout + + +def _encode_auth(auth): + """ + Encode auth from a URL suitable for an HTTP header. + >>> str(_encode_auth('username%3Apassword')) + 'dXNlcm5hbWU6cGFzc3dvcmQ=' + + Long auth strings should not cause a newline to be inserted. + >>> long_auth = 'username:' + 'password'*10 + >>> chr(10) in str(_encode_auth(long_auth)) + False + """ + auth_s = urllib.parse.unquote(auth) + # convert to bytes + auth_bytes = auth_s.encode() + encoded_bytes = base64.b64encode(auth_bytes) + # convert back to a string + encoded = encoded_bytes.decode() + # strip the trailing carriage return + return encoded.replace('\n', '') + + +class Credential: + """ + A username/password pair. Use like a namedtuple. + """ + + def __init__(self, username, password): + self.username = username + self.password = password + + def __iter__(self): + yield self.username + yield self.password + + def __str__(self): + return '%(username)s:%(password)s' % vars(self) + + +class PyPIConfig(configparser.RawConfigParser): + def __init__(self): + """ + Load from ~/.pypirc + """ + defaults = dict.fromkeys(['username', 'password', 'repository'], '') + configparser.RawConfigParser.__init__(self, defaults) + + rc = os.path.join(os.path.expanduser('~'), '.pypirc') + if os.path.exists(rc): + self.read(rc) + + @property + def creds_by_repository(self): + sections_with_repositories = [ + section for section in self.sections() + if self.get(section, 'repository').strip() + ] + + return dict(map(self._get_repo_cred, sections_with_repositories)) + + def _get_repo_cred(self, section): + repo = self.get(section, 'repository').strip() + return repo, Credential( + self.get(section, 'username').strip(), + self.get(section, 'password').strip(), + ) + + def find_credential(self, url): + """ + If the URL indicated appears to be a repository defined in this + config, return the credential for that repository. + """ + for repository, cred in self.creds_by_repository.items(): + if url.startswith(repository): + return cred + + +def open_with_auth(url, opener=urllib.request.urlopen): + """Open a urllib2 request, handling HTTP authentication""" + + parsed = urllib.parse.urlparse(url) + scheme, netloc, path, params, query, frag = parsed + + # Double scheme does not raise on macOS as revealed by a + # failing test. We would expect "nonnumeric port". Refs #20. + if netloc.endswith(':'): + raise http.client.InvalidURL("nonnumeric port: ''") + + if scheme in ('http', 'https'): + auth, address = _splituser(netloc) + else: + auth = None + + if not auth: + cred = PyPIConfig().find_credential(url) + if cred: + auth = str(cred) + info = cred.username, url + log.info('Authenticating as %s for %s (from .pypirc)', *info) + + if auth: + auth = "Basic " + _encode_auth(auth) + parts = scheme, address, path, params, query, frag + new_url = urllib.parse.urlunparse(parts) + request = urllib.request.Request(new_url) + request.add_header("Authorization", auth) + else: + request = urllib.request.Request(url) + + request.add_header('User-Agent', user_agent) + fp = opener(request) + + if auth: + # Put authentication info back into request URL if same host, + # so that links found on the page will work + s2, h2, path2, param2, query2, frag2 = urllib.parse.urlparse(fp.url) + if s2 == scheme and h2 == address: + parts = s2, netloc, path2, param2, query2, frag2 + fp.url = urllib.parse.urlunparse(parts) + + return fp + + +# copy of urllib.parse._splituser from Python 3.8 +def _splituser(host): + """splituser('user[:passwd]@host[:port]') + --> 'user[:passwd]', 'host[:port]'.""" + user, delim, host = host.rpartition('@') + return (user if delim else None), host + + +# adding a timeout to avoid freezing package_index +open_with_auth = socket_timeout(_SOCKET_TIMEOUT)(open_with_auth) + + +def fix_sf_url(url): + return url # backward compatibility + + +def local_open(url): + """Read a local path, with special support for directories""" + scheme, server, path, param, query, frag = urllib.parse.urlparse(url) + filename = urllib.request.url2pathname(path) + if os.path.isfile(filename): + return urllib.request.urlopen(url) + elif path.endswith('/') and os.path.isdir(filename): + files = [] + for f in os.listdir(filename): + filepath = os.path.join(filename, f) + if f == 'index.html': + with open(filepath, 'r') as fp: + body = fp.read() + break + elif os.path.isdir(filepath): + f += '/' + files.append('<a href="{name}">{name}</a>'.format(name=f)) + else: + tmpl = ( + "<html><head><title>{url}" + "{files}") + body = tmpl.format(url=url, files='\n'.join(files)) + status, message = 200, "OK" + else: + status, message, body = 404, "Path not found", "Not found" + + headers = {'content-type': 'text/html'} + body_stream = io.StringIO(body) + return urllib.error.HTTPError(url, status, message, headers, body_stream) diff --git a/env-llmeval/lib/python3.10/site-packages/setuptools/py34compat.py b/env-llmeval/lib/python3.10/site-packages/setuptools/py34compat.py new file mode 100644 index 0000000000000000000000000000000000000000..3ad917222a4e5bb93fe1c9e8fe1713bcab3630b6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/setuptools/py34compat.py @@ -0,0 +1,13 @@ +import importlib + +try: + import importlib.util +except ImportError: + pass + + +try: + module_from_spec = importlib.util.module_from_spec +except AttributeError: + def module_from_spec(spec): + return spec.loader.load_module(spec.name) diff --git a/env-llmeval/lib/python3.10/site-packages/setuptools/script (dev).tmpl b/env-llmeval/lib/python3.10/site-packages/setuptools/script (dev).tmpl new file mode 100644 index 0000000000000000000000000000000000000000..39a24b04888e79df51e2237577b303a2f901be63 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/setuptools/script (dev).tmpl @@ -0,0 +1,6 @@ +# EASY-INSTALL-DEV-SCRIPT: %(spec)r,%(script_name)r +__requires__ = %(spec)r +__import__('pkg_resources').require(%(spec)r) +__file__ = %(dev_path)r +with open(__file__) as f: + exec(compile(f.read(), __file__, 'exec')) diff --git a/env-llmeval/lib/python3.10/site-packages/setuptools/script.tmpl b/env-llmeval/lib/python3.10/site-packages/setuptools/script.tmpl new file mode 100644 index 0000000000000000000000000000000000000000..ff5efbcab3b58063dd84787181c26a95fb663d94 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/setuptools/script.tmpl @@ -0,0 +1,3 @@ +# EASY-INSTALL-SCRIPT: %(spec)r,%(script_name)r +__requires__ = %(spec)r +__import__('pkg_resources').run_script(%(spec)r, %(script_name)r) diff --git a/env-llmeval/lib/python3.10/site-packages/setuptools/unicode_utils.py b/env-llmeval/lib/python3.10/site-packages/setuptools/unicode_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e84e65e3e14152a2ba6e6e05d914f0e1bbef187b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/setuptools/unicode_utils.py @@ -0,0 +1,42 @@ +import unicodedata +import sys + + +# HFS Plus uses decomposed UTF-8 +def decompose(path): + if isinstance(path, str): + return unicodedata.normalize('NFD', path) + try: + path = path.decode('utf-8') + path = unicodedata.normalize('NFD', path) + path = path.encode('utf-8') + except UnicodeError: + pass # Not UTF-8 + return path + + +def filesys_decode(path): + """ + Ensure that the given path is decoded, + NONE when no expected encoding works + """ + + if isinstance(path, str): + return path + + fs_enc = sys.getfilesystemencoding() or 'utf-8' + candidates = fs_enc, 'utf-8' + + for enc in candidates: + try: + return path.decode(enc) + except UnicodeDecodeError: + continue + + +def try_encode(string, enc): + "turn unicode encoding into a functional routine" + try: + return string.encode(enc) + except UnicodeEncodeError: + return None diff --git a/env-llmeval/lib/python3.10/site-packages/setuptools/version.py b/env-llmeval/lib/python3.10/site-packages/setuptools/version.py new file mode 100644 index 0000000000000000000000000000000000000000..95e1869658566aac3060562d8cd5a6b647887d1e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/setuptools/version.py @@ -0,0 +1,6 @@ +import pkg_resources + +try: + __version__ = pkg_resources.get_distribution('setuptools').version +except Exception: + __version__ = 'unknown' diff --git a/env-llmeval/lib/python3.10/site-packages/setuptools/windows_support.py b/env-llmeval/lib/python3.10/site-packages/setuptools/windows_support.py new file mode 100644 index 0000000000000000000000000000000000000000..cb977cff9545ef5d48ad7cf13f2cbe1ebc3e7cd0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/setuptools/windows_support.py @@ -0,0 +1,29 @@ +import platform +import ctypes + + +def windows_only(func): + if platform.system() != 'Windows': + return lambda *args, **kwargs: None + return func + + +@windows_only +def hide_file(path): + """ + Set the hidden attribute on a file or directory. + + From http://stackoverflow.com/questions/19622133/ + + `path` must be text. + """ + __import__('ctypes.wintypes') + SetFileAttributes = ctypes.windll.kernel32.SetFileAttributesW + SetFileAttributes.argtypes = ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD + SetFileAttributes.restype = ctypes.wintypes.BOOL + + FILE_ATTRIBUTE_HIDDEN = 0x02 + + ret = SetFileAttributes(path, FILE_ATTRIBUTE_HIDDEN) + if not ret: + raise ctypes.WinError() diff --git a/env-llmeval/lib/python3.10/site-packages/word2number/__init__.py b/env-llmeval/lib/python3.10/site-packages/word2number/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/word2number/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/word2number/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7d28f7f2efebae590e7e2993f01617849992c90 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/word2number/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/word2number/__pycache__/w2n.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/word2number/__pycache__/w2n.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8997c3340e6987a1fb9347cbc86e4470418a3ba6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/word2number/__pycache__/w2n.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/word2number/w2n.py b/env-llmeval/lib/python3.10/site-packages/word2number/w2n.py new file mode 100644 index 0000000000000000000000000000000000000000..69abcd95e5b987d1dd3c4de235d6619c108971df --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/word2number/w2n.py @@ -0,0 +1,217 @@ +from __future__ import print_function + + +american_number_system = { + 'zero': 0, + 'one': 1, + 'two': 2, + 'three': 3, + 'four': 4, + 'five': 5, + 'six': 6, + 'seven': 7, + 'eight': 8, + 'nine': 9, + 'ten': 10, + 'eleven': 11, + 'twelve': 12, + 'thirteen': 13, + 'fourteen': 14, + 'fifteen': 15, + 'sixteen': 16, + 'seventeen': 17, + 'eighteen': 18, + 'nineteen': 19, + 'twenty': 20, + 'thirty': 30, + 'forty': 40, + 'fifty': 50, + 'sixty': 60, + 'seventy': 70, + 'eighty': 80, + 'ninety': 90, + 'hundred': 100, + 'thousand': 1000, + 'million': 1000000, + 'billion': 1000000000, + 'point': '.' +} + +decimal_words = ['zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine'] + +""" +#TODO +indian_number_system = { + 'zero': 0, + 'one': 1, + 'two': 2, + 'three': 3, + 'four': 4, + 'five': 5, + 'six': 6, + 'seven': 7, + 'eight': 8, + 'nine': 9, + 'ten': 10, + 'eleven': 11, + 'twelve': 12, + 'thirteen': 13, + 'fourteen': 14, + 'fifteen': 15, + 'sixteen': 16, + 'seventeen': 17, + 'eighteen': 18, + 'nineteen': 19, + 'twenty': 20, + 'thirty': 30, + 'forty': 40, + 'fifty': 50, + 'sixty': 60, + 'seventy': 70, + 'eighty': 80, + 'ninety': 90, + 'hundred': 100, + 'thousand': 1000, + 'lac': 100000, + 'lakh': 100000, + 'crore': 10000000 +} +""" + + +""" +function to form numeric multipliers for million, billion, thousand etc. + +input: list of strings +return value: integer +""" + + +def number_formation(number_words): + numbers = [] + for number_word in number_words: + numbers.append(american_number_system[number_word]) + if len(numbers) == 4: + return (numbers[0] * numbers[1]) + numbers[2] + numbers[3] + elif len(numbers) == 3: + return numbers[0] * numbers[1] + numbers[2] + elif len(numbers) == 2: + if 100 in numbers: + return numbers[0] * numbers[1] + else: + return numbers[0] + numbers[1] + else: + return numbers[0] + + +""" +function to convert post decimal digit words to numerial digits +input: list of strings +output: double +""" + + +def get_decimal_sum(decimal_digit_words): + decimal_number_str = [] + for dec_word in decimal_digit_words: + if(dec_word not in decimal_words): + return 0 + else: + decimal_number_str.append(american_number_system[dec_word]) + final_decimal_string = '0.' + ''.join(map(str,decimal_number_str)) + return float(final_decimal_string) + + +""" +function to return integer for an input `number_sentence` string +input: string +output: int or double or None +""" + + +def word_to_num(number_sentence): + if type(number_sentence) is not str: + raise ValueError("Type of input is not string! Please enter a valid number word (eg. \'two million twenty three thousand and forty nine\')") + + number_sentence = number_sentence.replace('-', ' ') + number_sentence = number_sentence.lower() # converting input to lowercase + + if(number_sentence.isdigit()): # return the number if user enters a number string + return int(number_sentence) + + split_words = number_sentence.strip().split() # strip extra spaces and split sentence into words + + clean_numbers = [] + clean_decimal_numbers = [] + + # removing and, & etc. + for word in split_words: + if word in american_number_system: + clean_numbers.append(word) + + # Error message if the user enters invalid input! + if len(clean_numbers) == 0: + raise ValueError("No valid number words found! Please enter a valid number word (eg. two million twenty three thousand and forty nine)") + + # Error if user enters million,billion, thousand or decimal point twice + if clean_numbers.count('thousand') > 1 or clean_numbers.count('million') > 1 or clean_numbers.count('billion') > 1 or clean_numbers.count('point')> 1: + raise ValueError("Redundant number word! Please enter a valid number word (eg. two million twenty three thousand and forty nine)") + + # separate decimal part of number (if exists) + if clean_numbers.count('point') == 1: + clean_decimal_numbers = clean_numbers[clean_numbers.index('point')+1:] + clean_numbers = clean_numbers[:clean_numbers.index('point')] + + billion_index = clean_numbers.index('billion') if 'billion' in clean_numbers else -1 + million_index = clean_numbers.index('million') if 'million' in clean_numbers else -1 + thousand_index = clean_numbers.index('thousand') if 'thousand' in clean_numbers else -1 + + if (thousand_index > -1 and (thousand_index < million_index or thousand_index < billion_index)) or (million_index>-1 and million_index < billion_index): + raise ValueError("Malformed number! Please enter a valid number word (eg. two million twenty three thousand and forty nine)") + + total_sum = 0 # storing the number to be returned + + if len(clean_numbers) > 0: + # hack for now, better way TODO + if len(clean_numbers) == 1: + total_sum += american_number_system[clean_numbers[0]] + + else: + if billion_index > -1: + billion_multiplier = number_formation(clean_numbers[0:billion_index]) + total_sum += billion_multiplier * 1000000000 + + if million_index > -1: + if billion_index > -1: + million_multiplier = number_formation(clean_numbers[billion_index+1:million_index]) + else: + million_multiplier = number_formation(clean_numbers[0:million_index]) + total_sum += million_multiplier * 1000000 + + if thousand_index > -1: + if million_index > -1: + thousand_multiplier = number_formation(clean_numbers[million_index+1:thousand_index]) + elif billion_index > -1 and million_index == -1: + thousand_multiplier = number_formation(clean_numbers[billion_index+1:thousand_index]) + else: + thousand_multiplier = number_formation(clean_numbers[0:thousand_index]) + total_sum += thousand_multiplier * 1000 + + if thousand_index > -1 and thousand_index != len(clean_numbers)-1: + hundreds = number_formation(clean_numbers[thousand_index+1:]) + elif million_index > -1 and million_index != len(clean_numbers)-1: + hundreds = number_formation(clean_numbers[million_index+1:]) + elif billion_index > -1 and billion_index != len(clean_numbers)-1: + hundreds = number_formation(clean_numbers[billion_index+1:]) + elif thousand_index == -1 and million_index == -1 and billion_index == -1: + hundreds = number_formation(clean_numbers) + else: + hundreds = 0 + total_sum += hundreds + + # adding decimal part to total_sum (if exists) + if len(clean_decimal_numbers) > 0: + decimal_sum = get_decimal_sum(clean_decimal_numbers) + total_sum += decimal_sum + + return total_sum \ No newline at end of file diff --git a/env-llmeval/lib/python3.10/site-packages/xxhash-3.4.1.dist-info/INSTALLER b/env-llmeval/lib/python3.10/site-packages/xxhash-3.4.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/xxhash-3.4.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/env-llmeval/lib/python3.10/site-packages/xxhash-3.4.1.dist-info/LICENSE b/env-llmeval/lib/python3.10/site-packages/xxhash-3.4.1.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..36b248ce00511fd76f358eb0f5543ca448d0e443 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/xxhash-3.4.1.dist-info/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2014-2020, Yue Du +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/env-llmeval/lib/python3.10/site-packages/xxhash-3.4.1.dist-info/METADATA b/env-llmeval/lib/python3.10/site-packages/xxhash-3.4.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..4f7117bec2f88ca9d73ccca493b5c5bd56913863 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/xxhash-3.4.1.dist-info/METADATA @@ -0,0 +1,515 @@ +Metadata-Version: 2.1 +Name: xxhash +Version: 3.4.1 +Summary: Python binding for xxHash +Home-page: https://github.com/ifduyue/python-xxhash +Author: Yue Du +Author-email: ifduyue@gmail.com +License: BSD +Classifier: Development Status :: 5 - Production/Stable +Classifier: License :: OSI Approved :: BSD License +Classifier: Intended Audience :: Developers +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: Implementation :: CPython +Requires-Python: >=3.7 +Description-Content-Type: text/x-rst +License-File: LICENSE + +python-xxhash +============= + +.. image:: https://github.com/ifduyue/python-xxhash/actions/workflows/test.yml/badge.svg + :target: https://github.com/ifduyue/python-xxhash/actions/workflows/test.yml + :alt: Github Actions Status + +.. image:: https://img.shields.io/pypi/v/xxhash.svg + :target: https://pypi.org/project/xxhash/ + :alt: Latest Version + +.. image:: https://img.shields.io/pypi/pyversions/xxhash.svg + :target: https://pypi.org/project/xxhash/ + :alt: Supported Python versions + +.. image:: https://img.shields.io/pypi/l/xxhash.svg + :target: https://pypi.org/project/xxhash/ + :alt: License + + +.. _HMAC: http://en.wikipedia.org/wiki/Hash-based_message_authentication_code +.. _xxHash: https://github.com/Cyan4973/xxHash +.. _Cyan4973: https://github.com/Cyan4973 + + +xxhash is a Python binding for the xxHash_ library by `Yann Collet`__. + +__ Cyan4973_ + +Installation +------------ + +.. code-block:: bash + + $ pip install xxhash + +You can also install using conda: + +.. code-block:: bash + + $ conda install -c conda-forge python-xxhash + + +Installing From Source +~~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: bash + + $ pip install --no-binary xxhash xxhash + +Prerequisites +++++++++++++++ + +On Debian/Ubuntu: + +.. code-block:: bash + + $ apt-get install python-dev gcc + +On CentOS/Fedora: + +.. code-block:: bash + + $ yum install python-devel gcc redhat-rpm-config + +Linking to libxxhash.so +~~~~~~~~~~~~~~~~~~~~~~~~ + +By default python-xxhash will use bundled xxHash, +we can change this by specifying ENV var ``XXHASH_LINK_SO``: + +.. code-block:: bash + + $ XXHASH_LINK_SO=1 pip install --no-binary xxhash xxhash + +Usage +-------- + +Module version and its backend xxHash library version can be retrieved using +the module properties ``VERSION`` AND ``XXHASH_VERSION`` respectively. + +.. code-block:: python + + >>> import xxhash + >>> xxhash.VERSION + '2.0.0' + >>> xxhash.XXHASH_VERSION + '0.8.0' + +This module is hashlib-compliant, which means you can use it in the same way as ``hashlib.md5``. + + | update() -- update the current digest with an additional string + | digest() -- return the current digest value + | hexdigest() -- return the current digest as a string of hexadecimal digits + | intdigest() -- return the current digest as an integer + | copy() -- return a copy of the current xxhash object + | reset() -- reset state + +md5 digest returns bytes, but the original xxh32 and xxh64 C APIs return integers. +While this module is made hashlib-compliant, ``intdigest()`` is also provided to +get the integer digest. + +Constructors for hash algorithms provided by this module are ``xxh32()`` and ``xxh64()``. + +For example, to obtain the digest of the byte string ``b'Nobody inspects the spammish repetition'``: + +.. code-block:: python + + >>> import xxhash + >>> x = xxhash.xxh32() + >>> x.update(b'Nobody inspects') + >>> x.update(b' the spammish repetition') + >>> x.digest() + b'\xe2);/' + >>> x.digest_size + 4 + >>> x.block_size + 16 + +More condensed: + +.. code-block:: python + + >>> xxhash.xxh32(b'Nobody inspects the spammish repetition').hexdigest() + 'e2293b2f' + >>> xxhash.xxh32(b'Nobody inspects the spammish repetition').digest() == x.digest() + True + +An optional seed (default is 0) can be used to alter the result predictably: + +.. code-block:: python + + >>> import xxhash + >>> xxhash.xxh64('xxhash').hexdigest() + '32dd38952c4bc720' + >>> xxhash.xxh64('xxhash', seed=20141025).hexdigest() + 'b559b98d844e0635' + >>> x = xxhash.xxh64(seed=20141025) + >>> x.update('xxhash') + >>> x.hexdigest() + 'b559b98d844e0635' + >>> x.intdigest() + 13067679811253438005 + +Be careful that xxh32 takes an unsigned 32-bit integer as seed, while xxh64 +takes an unsigned 64-bit integer. Although unsigned integer overflow is +defined behavior, it's better not to make it happen: + +.. code-block:: python + + >>> xxhash.xxh32('I want an unsigned 32-bit seed!', seed=0).hexdigest() + 'f7a35af8' + >>> xxhash.xxh32('I want an unsigned 32-bit seed!', seed=2**32).hexdigest() + 'f7a35af8' + >>> xxhash.xxh32('I want an unsigned 32-bit seed!', seed=1).hexdigest() + 'd8d4b4ba' + >>> xxhash.xxh32('I want an unsigned 32-bit seed!', seed=2**32+1).hexdigest() + 'd8d4b4ba' + >>> + >>> xxhash.xxh64('I want an unsigned 64-bit seed!', seed=0).hexdigest() + 'd4cb0a70a2b8c7c1' + >>> xxhash.xxh64('I want an unsigned 64-bit seed!', seed=2**64).hexdigest() + 'd4cb0a70a2b8c7c1' + >>> xxhash.xxh64('I want an unsigned 64-bit seed!', seed=1).hexdigest() + 'ce5087f12470d961' + >>> xxhash.xxh64('I want an unsigned 64-bit seed!', seed=2**64+1).hexdigest() + 'ce5087f12470d961' + + +``digest()`` returns bytes of the **big-endian** representation of the integer +digest: + +.. code-block:: python + + >>> import xxhash + >>> h = xxhash.xxh64() + >>> h.digest() + b'\xefF\xdb7Q\xd8\xe9\x99' + >>> h.intdigest().to_bytes(8, 'big') + b'\xefF\xdb7Q\xd8\xe9\x99' + >>> h.hexdigest() + 'ef46db3751d8e999' + >>> format(h.intdigest(), '016x') + 'ef46db3751d8e999' + >>> h.intdigest() + 17241709254077376921 + >>> int(h.hexdigest(), 16) + 17241709254077376921 + +Besides xxh32/xxh64 mentioned above, oneshot functions are also provided, +so we can avoid allocating XXH32/64 state on heap: + + | xxh32_digest(bytes, seed=0) + | xxh32_intdigest(bytes, seed=0) + | xxh32_hexdigest(bytes, seed=0) + | xxh64_digest(bytes, seed=0) + | xxh64_intdigest(bytes, seed=0) + | xxh64_hexdigest(bytes, seed=0) + +.. code-block:: python + + >>> import xxhash + >>> xxhash.xxh64('a').digest() == xxhash.xxh64_digest('a') + True + >>> xxhash.xxh64('a').intdigest() == xxhash.xxh64_intdigest('a') + True + >>> xxhash.xxh64('a').hexdigest() == xxhash.xxh64_hexdigest('a') + True + >>> xxhash.xxh64_hexdigest('xxhash', seed=20141025) + 'b559b98d844e0635' + >>> xxhash.xxh64_intdigest('xxhash', seed=20141025) + 13067679811253438005L + >>> xxhash.xxh64_digest('xxhash', seed=20141025) + '\xb5Y\xb9\x8d\x84N\x065' + +.. code-block:: python + + In [1]: import xxhash + + In [2]: %timeit xxhash.xxh64_hexdigest('xxhash') + 268 ns ± 24.1 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each) + + In [3]: %timeit xxhash.xxh64('xxhash').hexdigest() + 416 ns ± 17.3 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each) + + +XXH3 hashes are available since v2.0.0 (xxHash v0.8.0), they are: + +Streaming classes: + + | xxh3_64 + | xxh3_128 + +Oneshot functions: + + | xxh3_64_digest(bytes, seed=0) + | xxh3_64_intdigest(bytes, seed=0) + | xxh3_64_hexdigest(bytes, seed=0) + | xxh3_128_digest(bytes, seed=0) + | xxh3_128_intdigest(bytes, seed=0) + | xxh3_128_hexdigest(bytes, seed=0) + +And aliases: + + | xxh128 = xxh3_128 + | xxh128_digest = xxh3_128_digest + | xxh128_intdigest = xxh3_128_intdigest + | xxh128_hexdigest = xxh3_128_hexdigest + +Caveats +------- + +SEED OVERFLOW +~~~~~~~~~~~~~~ + +xxh32 takes an unsigned 32-bit integer as seed, and xxh64 takes +an unsigned 64-bit integer as seed. Make sure that the seed is greater than +or equal to ``0``. + +ENDIANNESS +~~~~~~~~~~~ + +As of python-xxhash 0.3.0, ``digest()`` returns bytes of the +**big-endian** representation of the integer digest. It used +to be little-endian. + +DONT USE XXHASH IN HMAC +~~~~~~~~~~~~~~~~~~~~~~~ +Though you can use xxhash as an HMAC_ hash function, but it's +highly recommended not to. + +xxhash is **NOT** a cryptographic hash function, it is a +non-cryptographic hash algorithm aimed at speed and quality. +Do not put xxhash in any position where cryptographic hash +functions are required. + + +Copyright and License +--------------------- + +Copyright (c) 2014-2020 Yue Du - https://github.com/ifduyue + +Licensed under `BSD 2-Clause License `_ + +CHANGELOG +----------- + +v3.4.1 2023-10-05 +~~~~~~~~~~~~~~~~~ + +- Remove setuptools_scm + + +v3.4.0 2023-10-05 +~~~~~~~~~~~~~~~~~ + +- Build wheels for Python 3.12 + +v3.3.0 2023-07-29 +~~~~~~~~~~~~~~~~~ + +- Upgrade xxHash to v0.8.2 +- Drop support for Python 3.6 + +v3.2.0 2022-12-28 +~~~~~~~~~~~~~~~~~ + +This is the last version to support Python 3.6 + +- Build Python 3.11 wheels. +- Remove setup.py test_suites, call unittest directly + +v3.1.0 2022-10-19 +~~~~~~~~~~~~~~~~~ + +- Type annotations. +- Enabled muslinux wheels building. + +v3.0.0 2022-02-25 +~~~~~~~~~~~~~~~~~ + +- New set `algorithms_available` lists all implemented algorithms in `xxhash` + package. +- Upgrade xxHash to v0.8.1. +- Drop support for EOL Python versions, require python >= 3.6 from now on. +- Migrate to github actions and build arm64 wheels for macOS. +- Always release GIL. + + +v2.0.2 2021-04-15 +~~~~~~~~~~~~~~~~~ + +- Fix Travis CI OSX dpl python2.7 get-pip.py error + +v2.0.1 2021-04-15 +~~~~~~~~~~~~~~~~~ + +- Only to trigger Python 3.9 wheels building. + +v2.0.0 2020-08-03 +~~~~~~~~~~~~~~~~~ + +- **Require xxHash version >= v0.8.0** +- Upgrade xxHash to v0.8.0 +- XXH3 hashes: `xxh3_64`, `xxh3_128`, and their oneshot functions + +v1.4.4 2020-06-20 +~~~~~~~~~~~~~~~~~ + +- Upgrade xxHash to v0.7.3 +- Stop using PEP393 deprecated APIs +- Use XXH(32|64)_canonicalFromHash to replace u2bytes and ull2bytes + +v1.4.3 2019-11-12 +~~~~~~~~~~~~~~~~~ + +- Upgrade xxHash to v0.7.2 +- Python 3.8 wheels + +v1.4.2 2019-10-13 +~~~~~~~~~~~~~~~~~ + +- Fixed: setup.py fails when reading README.rst and the default encoding is not UTF-8 + +v1.4.1 2019-08-27 +~~~~~~~~~~~~~~~~~ + +- Fixed: xxh3.h in missing from source tarball + +v1.4.0 2019-08-25 +~~~~~~~~~~~~~~~~~ + +- Upgrade xxHash to v0.7.1 + +v1.3.0 2018-10-21 +~~~~~~~~~~~~~~~~~ + +- Wheels are now built automatically +- Split CFFI variant into a separate package `ifduyue/python-xxhash-cffi `_ + +v1.2.0 2018-07-13 +~~~~~~~~~~~~~~~~~ + +- Add oneshot functions xxh{32,64}_{,int,hex}digest + +v1.1.0 2018-07-05 +~~~~~~~~~~~~~~~~~ + +- Allow input larger than 2GB +- Release the GIL on sufficiently large input +- Drop support for Python 3.2 + +v1.0.1 2017-03-02 +~~~~~~~~~~~~~~~~~~ + +- Free state actively, instead of delegating it to ffi.gc + +v1.0.0 2017-02-10 +~~~~~~~~~~~~~~~~~~ + +- Fixed copy() segfault +- Added CFFI variant + +v0.6.3 2017-02-10 +~~~~~~~~~~~~~~~~~~ + +- Fixed copy() segfault + +v0.6.2 2017-02-10 +~~~~~~~~~~~~~~~~~~ + +- Upgrade xxHash to v0.6.2 + +v0.6.1 2016-06-26 +~~~~~~~~~~~~~~~~~~ + +- Upgrade xxHash to v0.6.1 + +v0.5.0 2016-03-02 +~~~~~~~~~~~~~~~~~~ + +- Upgrade xxHash to v0.5.0 + +v0.4.3 2015-08-21 +~~~~~~~~~~~~~~~~~~ + +- Upgrade xxHash to r42 + +v0.4.1 2015-08-16 +~~~~~~~~~~~~~~~~~~ + +- Upgrade xxHash to r41 + +v0.4.0 2015-08-05 +~~~~~~~~~~~~~~~~~~ + +- Added method reset +- Upgrade xxHash to r40 + +v0.3.2 2015-01-27 +~~~~~~~~~~~~~~~~~~ + +- Fixed some typos in docstrings + +v0.3.1 2015-01-24 +~~~~~~~~~~~~~~~~~~ + +- Upgrade xxHash to r39 + +v0.3.0 2014-11-11 +~~~~~~~~~~~~~~~~~~ + +- Change digest() from little-endian representation to big-endian representation of the integer digest. + This change breaks compatibility (digest() results are different). + +v0.2.0 2014-10-25 +~~~~~~~~~~~~~~~~~~ + +- Make this package hashlib-compliant + +v0.1.3 2014-10-23 +~~~~~~~~~~~~~~~~~~ + +- Update xxHash to r37 + +v0.1.2 2014-10-19 +~~~~~~~~~~~~~~~~~~ + +- Improve: Check XXHnn_init() return value. +- Update xxHash to r36 + +v0.1.1 2014-08-07 +~~~~~~~~~~~~~~~~~~ + +- Improve: Can now be built with Visual C++ Compiler. + +v0.1.0 2014-08-05 +~~~~~~~~~~~~~~~~~~ + +- New: XXH32 and XXH64 type, which support partially update. +- Fix: build under Python 3.4 + +v0.0.2 2014-08-03 +~~~~~~~~~~~~~~~~~~ + +- NEW: Support Python 3 + +v0.0.1 2014-07-30 +~~~~~~~~~~~~~~~~~~ + +- NEW: xxh32 and xxh64 diff --git a/env-llmeval/lib/python3.10/site-packages/xxhash-3.4.1.dist-info/RECORD b/env-llmeval/lib/python3.10/site-packages/xxhash-3.4.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..a3a11b791266211c8784c79155f79176c1c1db12 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/xxhash-3.4.1.dist-info/RECORD @@ -0,0 +1,13 @@ +xxhash-3.4.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +xxhash-3.4.1.dist-info/LICENSE,sha256=-OnvAMeL5NLaUmssN-QJnJIMf-C52UO_Da6y78MKOls,1313 +xxhash-3.4.1.dist-info/METADATA,sha256=00hjepcOk7xbVEn_p1iOwQDjmH7vVGpPsbPT5_V-SuI,12407 +xxhash-3.4.1.dist-info/RECORD,, +xxhash-3.4.1.dist-info/WHEEL,sha256=cD39NF6a3hkhaWoPQJng7gnGZRIfQsUCtwcedITCPtg,152 +xxhash-3.4.1.dist-info/top_level.txt,sha256=1PPSBP-gnjG59E5bigzMTzmT6BVWjHwnpzMiisPWZ5I,15 +xxhash/__init__.py,sha256=mPEdihxDMU0rjLWum3FrU9Ua2jQ-rzfewYgIg-J-Jlc,1147 +xxhash/__init__.pyi,sha256=Te-hUGiCW_4Y65lyrGCOgeQrL35XTFM-qunbz0R9MiE,1786 +xxhash/__pycache__/__init__.cpython-310.pyc,, +xxhash/__pycache__/version.cpython-310.pyc,, +xxhash/_xxhash.cpython-310-x86_64-linux-gnu.so,sha256=-ivlxG-o9sMQAHnKz9tauU4XikfH8HzheTOL3Zp6QLw,830856 +xxhash/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +xxhash/version.py,sha256=h7POl0WWl8CsxtaUevRn7QF75kWctjTiIWdvJzM4yeA,44 diff --git a/env-llmeval/lib/python3.10/site-packages/xxhash-3.4.1.dist-info/WHEEL b/env-llmeval/lib/python3.10/site-packages/xxhash-3.4.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..7b52c3f3e667e03ca4b2a8b53a94655a796beafc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/xxhash-3.4.1.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.41.2) +Root-Is-Purelib: false +Tag: cp310-cp310-manylinux_2_17_x86_64 +Tag: cp310-cp310-manylinux2014_x86_64 + diff --git a/env-llmeval/lib/python3.10/site-packages/xxhash-3.4.1.dist-info/top_level.txt b/env-llmeval/lib/python3.10/site-packages/xxhash-3.4.1.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..46a6ce2f1ac8b49207db6601355076641e92f163 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/xxhash-3.4.1.dist-info/top_level.txt @@ -0,0 +1,2 @@ +_xxhash +xxhash diff --git a/env-llmeval/lib/python3.10/site-packages/zstandard/__init__.py b/env-llmeval/lib/python3.10/site-packages/zstandard/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8588729004b2c22b2e2150679d63120e122579a4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/zstandard/__init__.py @@ -0,0 +1,210 @@ +# Copyright (c) 2017-present, Gregory Szorc +# All rights reserved. +# +# This software may be modified and distributed under the terms +# of the BSD license. See the LICENSE file for details. + +"""Python interface to the Zstandard (zstd) compression library.""" + +from __future__ import absolute_import, unicode_literals + +# This module serves 2 roles: +# +# 1) Export the C or CFFI "backend" through a central module. +# 2) Implement additional functionality built on top of C or CFFI backend. + +import builtins +import io +import os +import platform + +from typing import ByteString + +# Some Python implementations don't support C extensions. That's why we have +# a CFFI implementation in the first place. The code here import one of our +# "backends" then re-exports the symbols from this module. For convenience, +# we support falling back to the CFFI backend if the C extension can't be +# imported. But for performance reasons, we only do this on unknown Python +# implementation. Notably, for CPython we require the C extension by default. +# Because someone will inevitably want special behavior, the behavior is +# configurable via an environment variable. A potentially better way to handle +# this is to import a special ``__importpolicy__`` module or something +# defining a variable and `setup.py` could write the file with whatever +# policy was specified at build time. Until someone needs it, we go with +# the hacky but simple environment variable approach. +_module_policy = os.environ.get("PYTHON_ZSTANDARD_IMPORT_POLICY", "default") + +if _module_policy == "default": + if platform.python_implementation() in ("CPython",): + from .backend_c import * # type: ignore + + backend = "cext" + elif platform.python_implementation() in ("PyPy",): + from .backend_cffi import * # type: ignore + + backend = "cffi" + else: + try: + from .backend_c import * + + backend = "cext" + except ImportError: + from .backend_cffi import * + + backend = "cffi" +elif _module_policy == "cffi_fallback": + try: + from .backend_c import * + + backend = "cext" + except ImportError: + from .backend_cffi import * + + backend = "cffi" +elif _module_policy == "rust": + from .backend_rust import * # type: ignore + + backend = "rust" +elif _module_policy == "cext": + from .backend_c import * + + backend = "cext" +elif _module_policy == "cffi": + from .backend_cffi import * + + backend = "cffi" +else: + raise ImportError( + "unknown module import policy: %s; use default, cffi_fallback, " + "cext, or cffi" % _module_policy + ) + +# Keep this in sync with python-zstandard.h, rust-ext/src/lib.rs, and debian/changelog. +__version__ = "0.22.0" + +_MODE_CLOSED = 0 +_MODE_READ = 1 +_MODE_WRITE = 2 + + +def open( + filename, + mode="rb", + cctx=None, + dctx=None, + encoding=None, + errors=None, + newline=None, + closefd=None, +): + """Create a file object with zstd (de)compression. + + The object returned from this function will be a + :py:class:`ZstdDecompressionReader` if opened for reading in binary mode, + a :py:class:`ZstdCompressionWriter` if opened for writing in binary mode, + or an ``io.TextIOWrapper`` if opened for reading or writing in text mode. + + :param filename: + ``bytes``, ``str``, or ``os.PathLike`` defining a file to open or a + file object (with a ``read()`` or ``write()`` method). + :param mode: + ``str`` File open mode. Accepts any of the open modes recognized by + ``open()``. + :param cctx: + ``ZstdCompressor`` to use for compression. If not specified and file + is opened for writing, the default ``ZstdCompressor`` will be used. + :param dctx: + ``ZstdDecompressor`` to use for decompression. If not specified and file + is opened for reading, the default ``ZstdDecompressor`` will be used. + :param encoding: + ``str`` that defines text encoding to use when file is opened in text + mode. + :param errors: + ``str`` defining text encoding error handling mode. + :param newline: + ``str`` defining newline to use in text mode. + :param closefd: + ``bool`` whether to close the file when the returned object is closed. + Only used if a file object is passed. If a filename is specified, the + opened file is always closed when the returned object is closed. + """ + normalized_mode = mode.replace("t", "") + + if normalized_mode in ("r", "rb"): + dctx = dctx or ZstdDecompressor() + open_mode = "r" + raw_open_mode = "rb" + elif normalized_mode in ("w", "wb", "a", "ab", "x", "xb"): + cctx = cctx or ZstdCompressor() + open_mode = "w" + raw_open_mode = normalized_mode + if not raw_open_mode.endswith("b"): + raw_open_mode = raw_open_mode + "b" + else: + raise ValueError("Invalid mode: {!r}".format(mode)) + + if hasattr(os, "PathLike"): + types = (str, bytes, os.PathLike) + else: + types = (str, bytes) + + if isinstance(filename, types): # type: ignore + inner_fh = builtins.open(filename, raw_open_mode) + closefd = True + elif hasattr(filename, "read") or hasattr(filename, "write"): + inner_fh = filename + closefd = bool(closefd) + else: + raise TypeError( + "filename must be a str, bytes, file or PathLike object" + ) + + if open_mode == "r": + fh = dctx.stream_reader(inner_fh, closefd=closefd) + elif open_mode == "w": + fh = cctx.stream_writer(inner_fh, closefd=closefd) + else: + raise RuntimeError("logic error in zstandard.open() handling open mode") + + if "b" not in normalized_mode: + return io.TextIOWrapper( + fh, encoding=encoding, errors=errors, newline=newline + ) + else: + return fh + + +def compress(data: ByteString, level: int = 3) -> bytes: + """Compress source data using the zstd compression format. + + This performs one-shot compression using basic/default compression + settings. + + This method is provided for convenience and is equivalent to calling + ``ZstdCompressor(level=level).compress(data)``. + + If you find yourself calling this function in a tight loop, + performance will be greater if you construct a single ``ZstdCompressor`` + and repeatedly call ``compress()`` on it. + """ + cctx = ZstdCompressor(level=level) + + return cctx.compress(data) + + +def decompress(data: ByteString, max_output_size: int = 0) -> bytes: + """Decompress a zstd frame into its original data. + + This performs one-shot decompression using basic/default compression + settings. + + This method is provided for convenience and is equivalent to calling + ``ZstdDecompressor().decompress(data, max_output_size=max_output_size)``. + + If you find yourself calling this function in a tight loop, performance + will be greater if you construct a single ``ZstdDecompressor`` and + repeatedly call ``decompress()`` on it. + """ + dctx = ZstdDecompressor() + + return dctx.decompress(data, max_output_size=max_output_size) diff --git a/env-llmeval/lib/python3.10/site-packages/zstandard/__init__.pyi b/env-llmeval/lib/python3.10/site-packages/zstandard/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..c95a73e89b9a3bbcf740cc5daf63d16a92472130 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/zstandard/__init__.pyi @@ -0,0 +1,480 @@ +# Copyright (c) 2016-present, Gregory Szorc +# All rights reserved. +# +# This software may be modified and distributed under the terms +# of the BSD license. See the LICENSE file for details. + +import os + +from typing import ( + BinaryIO, + ByteString, + Generator, + IO, + Iterable, + List, + Optional, + Set, + Tuple, + Union, +) + +FLUSH_BLOCK: int +FLUSH_FRAME: int + +COMPRESSOBJ_FLUSH_FINISH: int +COMPRESSOBJ_FLUSH_BLOCK: int + +CONTENTSIZE_UNKNOWN: int +CONTENTSIZE_ERROR: int + +MAX_COMPRESSION_LEVEL: int + +COMPRESSION_RECOMMENDED_INPUT_SIZE: int +COMPRESSION_RECOMMENDED_OUTPUT_SIZE: int + +DECOMPRESSION_RECOMMENDED_INPUT_SIZE: int +DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE: int + +BLOCKSIZELOG_MAX: int +BLOCKSIZE_MAX: int + +WINDOWLOG_MIN: int +WINDOWLOG_MAX: int + +CHAINLOG_MIN: int +CHAINLOG_MAX: int +HASHLOG_MIN: int +HASHLOG_MAX: int +MINMATCH_MIN: int +MINMATCH_MAX: int +SEARCHLOG_MIN: int +SEARCHLOG_MAX: int +SEARCHLENGTH_MIN: int +SEARCHLENGTH_MAX: int +TARGETLENGTH_MIN: int +TARGETLENGTH_MAX: int +LDM_MINMATCH_MIN: int +LDM_MINMATCH_MAX: int +LDM_BUCKETSIZELOG_MAX: int + +STRATEGY_FAST: int +STRATEGY_DFAST: int +STRATEGY_GREEDY: int +STRATEGY_LAZY: int +STRATEGY_LAZY2: int +STRATEGY_BTLAZY2: int +STRATEGY_BTOPT: int +STRATEGY_BTULTRA: int +STRATEGY_BTULTRA2: int + +DICT_TYPE_AUTO: int +DICT_TYPE_RAWCONTENT: int +DICT_TYPE_FULLDICT: int + +FORMAT_ZSTD1: int +FORMAT_ZSTD1_MAGICLESS: int + +ZSTD_VERSION: Tuple[int, int, int] +FRAME_HEADER: bytes +MAGIC_NUMBER: int + +backend: str +backend_features: Set[str] +__version__: str + +class ZstdError(Exception): ... + +class BufferSegment(object): + offset: int + def __len__(self) -> int: ... + def tobytes(self) -> bytes: ... + +class BufferSegments(object): + def __len__(self) -> int: ... + def __getitem__(self, i: int) -> BufferSegment: ... + +class BufferWithSegments(object): + size: int + def __init__(self, data: ByteString, segments: ByteString): ... + def __len__(self) -> int: ... + def __getitem__(self, i: int) -> BufferSegment: ... + def segments(self): ... + def tobytes(self) -> bytes: ... + +class BufferWithSegmentsCollection(object): + def __init__(self, *args): ... + def __len__(self) -> int: ... + def __getitem__(self, i: int) -> BufferSegment: ... + def size(self) -> int: ... + +class ZstdCompressionParameters(object): + @staticmethod + def from_level( + level: int, source_size: int = ..., dict_size: int = ..., **kwargs + ) -> "ZstdCompressionParameters": ... + def __init__( + self, + format: int = ..., + compression_level: int = ..., + window_log: int = ..., + hash_log: int = ..., + chain_log: int = ..., + search_log: int = ..., + min_match: int = ..., + target_length: int = ..., + strategy: int = ..., + write_content_size: int = ..., + write_checksum: int = ..., + write_dict_id: int = ..., + job_size: int = ..., + overlap_log: int = ..., + force_max_window: int = ..., + enable_ldm: int = ..., + ldm_hash_log: int = ..., + ldm_min_match: int = ..., + ldm_bucket_size_log: int = ..., + ldm_hash_rate_log: int = ..., + threads: int = ..., + ): ... + @property + def format(self) -> int: ... + @property + def compression_level(self) -> int: ... + @property + def window_log(self) -> int: ... + @property + def hash_log(self) -> int: ... + @property + def chain_log(self) -> int: ... + @property + def search_log(self) -> int: ... + @property + def min_match(self) -> int: ... + @property + def target_length(self) -> int: ... + @property + def strategy(self) -> int: ... + @property + def write_content_size(self) -> int: ... + @property + def write_checksum(self) -> int: ... + @property + def write_dict_id(self) -> int: ... + @property + def job_size(self) -> int: ... + @property + def overlap_log(self) -> int: ... + @property + def force_max_window(self) -> int: ... + @property + def enable_ldm(self) -> int: ... + @property + def ldm_hash_log(self) -> int: ... + @property + def ldm_min_match(self) -> int: ... + @property + def ldm_bucket_size_log(self) -> int: ... + @property + def ldm_hash_rate_log(self) -> int: ... + @property + def threads(self) -> int: ... + def estimated_compression_context_size(self) -> int: ... + +class CompressionParameters(ZstdCompressionParameters): ... + +class ZstdCompressionDict(object): + k: int + d: int + def __init__( + self, + data: ByteString, + dict_type: int = ..., + k: int = ..., + d: int = ..., + ): ... + def __len__(self) -> int: ... + def dict_id(self) -> int: ... + def as_bytes(self) -> bytes: ... + def precompute_compress( + self, + level: int = ..., + compression_params: ZstdCompressionParameters = ..., + ): ... + +class ZstdCompressionObj(object): + def compress(self, data: ByteString) -> bytes: ... + def flush(self, flush_mode: int = ...) -> bytes: ... + +class ZstdCompressionChunker(object): + def compress(self, data: ByteString): ... + def flush(self): ... + def finish(self): ... + +class ZstdCompressionReader(BinaryIO): + def __enter__(self) -> "ZstdCompressionReader": ... + def __exit__(self, exc_type, exc_value, exc_tb): ... + def readable(self) -> bool: ... + def writable(self) -> bool: ... + def seekable(self) -> bool: ... + def readline(self, limit: int = ...) -> bytes: ... + def readlines(self, hint: int = ...) -> List[bytes]: ... + def write(self, data: ByteString): ... + def writelines(self, data: Iterable[bytes]): ... + def isatty(self) -> bool: ... + def flush(self): ... + def close(self): ... + @property + def closed(self) -> bool: ... + def tell(self) -> int: ... + def readall(self) -> bytes: ... + def __iter__(self): ... + def __next__(self): ... + def next(self): ... + def read(self, size: int = ...) -> bytes: ... + def read1(self, size: int = ...) -> bytes: ... + def readinto(self, b) -> int: ... + def readinto1(self, b) -> int: ... + +class ZstdCompressionWriter(BinaryIO): + def __enter__(self) -> "ZstdCompressionWriter": ... + def __exit__(self, exc_type, exc_value, exc_tb): ... + def memory_size(self) -> int: ... + def fileno(self) -> int: ... + def close(self): ... + @property + def closed(self) -> bool: ... + def isatty(self) -> bool: ... + def readable(self) -> bool: ... + def readline(self, size: int = ...) -> bytes: ... + def readlines(self, hint: int = ...) -> List[bytes]: ... + def seek(self, offset: int, whence: int = ...): ... + def seekable(self) -> bool: ... + def truncate(self, size: int = ...): ... + def writable(self) -> bool: ... + def writelines(self, lines: Iterable[bytes]): ... + def read(self, size: int = ...) -> bytes: ... + def readall(self) -> bytes: ... + def readinto(self, b): ... + def write(self, data: ByteString) -> int: ... + def flush(self, flush_mode: int = ...) -> int: ... + def tell(self) -> int: ... + +class ZstdCompressor(object): + def __init__( + self, + level: int = ..., + dict_data: Optional[ZstdCompressionDict] = ..., + compression_params: Optional[ZstdCompressionParameters] = ..., + write_checksum: Optional[bool] = ..., + write_content_size: Optional[bool] = ..., + write_dict_id: Optional[bool] = ..., + threads: int = ..., + ): ... + def memory_size(self) -> int: ... + def compress(self, data: ByteString) -> bytes: ... + def compressobj(self, size: int = ...) -> ZstdCompressionObj: ... + def chunker( + self, size: int = ..., chunk_size: int = ... + ) -> ZstdCompressionChunker: ... + def copy_stream( + self, + ifh: IO[bytes], + ofh: IO[bytes], + size: int = ..., + read_size: int = ..., + write_size: int = ..., + ) -> Tuple[int, int]: ... + def stream_reader( + self, + source: Union[IO[bytes], ByteString], + size: int = ..., + read_size: int = ..., + *, + closefd: bool = ..., + ) -> ZstdCompressionReader: ... + def stream_writer( + self, + writer: IO[bytes], + size: int = ..., + write_size: int = ..., + write_return_read: bool = ..., + *, + closefd: bool = ..., + ) -> ZstdCompressionWriter: ... + def read_to_iter( + self, + reader: Union[IO[bytes], ByteString], + size: int = ..., + read_size: int = ..., + write_size: int = ..., + ) -> Generator[bytes, None, None]: ... + def frame_progression(self) -> Tuple[int, int, int]: ... + def multi_compress_to_buffer( + self, + data: Union[ + BufferWithSegments, + BufferWithSegmentsCollection, + List[ByteString], + ], + threads: int = ..., + ) -> BufferWithSegmentsCollection: ... + +class ZstdDecompressionObj(object): + def decompress(self, data: ByteString) -> bytes: ... + def flush(self, length: int = ...) -> bytes: ... + @property + def unused_data(self) -> bytes: ... + @property + def unconsumed_tail(self) -> bytes: ... + @property + def eof(self) -> bool: ... + +class ZstdDecompressionReader(BinaryIO): + def __enter__(self) -> "ZstdDecompressionReader": ... + def __exit__(self, exc_type, exc_value, exc_tb): ... + def readable(self) -> bool: ... + def writable(self) -> bool: ... + def seekable(self) -> bool: ... + def readline(self, size: int = ...): ... + def readlines(self, hint: int = ...): ... + def write(self, data: ByteString): ... + def writelines(self, lines: Iterable[bytes]): ... + def isatty(self) -> bool: ... + def flush(self): ... + def close(self): ... + @property + def closed(self) -> bool: ... + def tell(self) -> int: ... + def readall(self) -> bytes: ... + def __iter__(self): ... + def __next__(self): ... + def next(self): ... + def read(self, size: int = ...) -> bytes: ... + def readinto(self, b) -> int: ... + def read1(self, size: int = ...) -> bytes: ... + def readinto1(self, b) -> int: ... + def seek(self, pos: int, whence: int = ...) -> int: ... + +class ZstdDecompressionWriter(BinaryIO): + def __enter__(self) -> "ZstdDecompressionWriter": ... + def __exit__(self, exc_type, exc_value, exc_tb): ... + def memory_size(self) -> int: ... + def close(self): ... + @property + def closed(self) -> bool: ... + def fileno(self) -> int: ... + def flush(self): ... + def isatty(self) -> bool: ... + def readable(self) -> bool: ... + def readline(self, size: int = ...): ... + def readlines(self, hint: int = ...): ... + def seek(self, offset: int, whence: int = ...): ... + def seekable(self) -> bool: ... + def tell(self): ... + def truncate(self, size: int = ...): ... + def writable(self) -> bool: ... + def writelines(self, lines: Iterable[bytes]): ... + def read(self, size: int = ...): ... + def readall(self): ... + def readinto(self, b): ... + def write(self, data: ByteString) -> int: ... + +class ZstdDecompressor(object): + def __init__( + self, + dict_data: Optional[ZstdCompressionDict] = ..., + max_window_size: int = ..., + format: int = ..., + ): ... + def memory_size(self) -> int: ... + def decompress( + self, + data: ByteString, + max_output_size: int = ..., + read_across_frames: bool = ..., + allow_extra_data: bool = ..., + ) -> bytes: ... + def stream_reader( + self, + source: Union[IO[bytes], ByteString], + read_size: int = ..., + read_across_frames: bool = ..., + *, + closefd=False, + ) -> ZstdDecompressionReader: ... + def decompressobj( + self, write_size: int = ..., read_across_frames: bool = False + ) -> ZstdDecompressionObj: ... + def read_to_iter( + self, + reader: Union[IO[bytes], ByteString], + read_size: int = ..., + write_size: int = ..., + skip_bytes: int = ..., + ) -> Generator[bytes, None, None]: ... + def stream_writer( + self, + writer: IO[bytes], + write_size: int = ..., + write_return_read: bool = ..., + *, + closefd: bool = ..., + ) -> ZstdDecompressionWriter: ... + def copy_stream( + self, + ifh: IO[bytes], + ofh: IO[bytes], + read_size: int = ..., + write_size: int = ..., + ) -> Tuple[int, int]: ... + def decompress_content_dict_chain( + self, frames: list[ByteString] + ) -> bytes: ... + def multi_decompress_to_buffer( + self, + frames: Union[ + BufferWithSegments, + BufferWithSegmentsCollection, + List[ByteString], + ], + decompressed_sizes: ByteString = ..., + threads: int = ..., + ) -> BufferWithSegmentsCollection: ... + +class FrameParameters(object): + content_size: int + window_size: int + dict_id: int + has_checksum: bool + +def estimate_decompression_context_size() -> int: ... +def frame_content_size(data: ByteString) -> int: ... +def frame_header_size(data: ByteString) -> int: ... +def get_frame_parameters(data: ByteString) -> FrameParameters: ... +def train_dictionary( + dict_size: int, + samples: list[ByteString], + k: int = ..., + d: int = ..., + f: int = ..., + split_point: float = ..., + accel: int = ..., + notifications: int = ..., + dict_id: int = ..., + level: int = ..., + steps: int = ..., + threads: int = ..., +) -> ZstdCompressionDict: ... +def open( + filename: Union[bytes, str, os.PathLike, BinaryIO], + mode: str = ..., + cctx: Optional[ZstdCompressor] = ..., + dctx: Optional[ZstdDecompressor] = ..., + encoding: Optional[str] = ..., + errors: Optional[str] = ..., + newline: Optional[str] = ..., + closefd: bool = ..., +): ... +def compress(data: ByteString, level: int = ...) -> bytes: ... +def decompress(data: ByteString, max_output_size: int = ...) -> bytes: ... diff --git a/env-llmeval/lib/python3.10/site-packages/zstandard/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/zstandard/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ea909591162cfc4de74ea6b5e3a6c2150b3fca3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/zstandard/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/zstandard/__pycache__/backend_cffi.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/zstandard/__pycache__/backend_cffi.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f736c2da0799eef4cf7b2c0d75a19cb52f91fc21 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/zstandard/__pycache__/backend_cffi.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/zstandard/backend_cffi.py b/env-llmeval/lib/python3.10/site-packages/zstandard/backend_cffi.py new file mode 100644 index 0000000000000000000000000000000000000000..7137542f189cf842006b5da461583f0486a94493 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/zstandard/backend_cffi.py @@ -0,0 +1,4477 @@ +# Copyright (c) 2016-present, Gregory Szorc +# All rights reserved. +# +# This software may be modified and distributed under the terms +# of the BSD license. See the LICENSE file for details. + +"""Python interface to the Zstandard (zstd) compression library.""" + +from __future__ import absolute_import, unicode_literals + +# This should match what the C extension exports. +__all__ = [ + "BufferSegment", + "BufferSegments", + "BufferWithSegments", + "BufferWithSegmentsCollection", + "ZstdCompressionChunker", + "ZstdCompressionDict", + "ZstdCompressionObj", + "ZstdCompressionParameters", + "ZstdCompressionReader", + "ZstdCompressionWriter", + "ZstdCompressor", + "ZstdDecompressionObj", + "ZstdDecompressionReader", + "ZstdDecompressionWriter", + "ZstdDecompressor", + "ZstdError", + "FrameParameters", + "backend_features", + "estimate_decompression_context_size", + "frame_content_size", + "frame_header_size", + "get_frame_parameters", + "train_dictionary", + # Constants. + "FLUSH_BLOCK", + "FLUSH_FRAME", + "COMPRESSOBJ_FLUSH_FINISH", + "COMPRESSOBJ_FLUSH_BLOCK", + "ZSTD_VERSION", + "FRAME_HEADER", + "CONTENTSIZE_UNKNOWN", + "CONTENTSIZE_ERROR", + "MAX_COMPRESSION_LEVEL", + "COMPRESSION_RECOMMENDED_INPUT_SIZE", + "COMPRESSION_RECOMMENDED_OUTPUT_SIZE", + "DECOMPRESSION_RECOMMENDED_INPUT_SIZE", + "DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE", + "MAGIC_NUMBER", + "BLOCKSIZELOG_MAX", + "BLOCKSIZE_MAX", + "WINDOWLOG_MIN", + "WINDOWLOG_MAX", + "CHAINLOG_MIN", + "CHAINLOG_MAX", + "HASHLOG_MIN", + "HASHLOG_MAX", + "MINMATCH_MIN", + "MINMATCH_MAX", + "SEARCHLOG_MIN", + "SEARCHLOG_MAX", + "SEARCHLENGTH_MIN", + "SEARCHLENGTH_MAX", + "TARGETLENGTH_MIN", + "TARGETLENGTH_MAX", + "LDM_MINMATCH_MIN", + "LDM_MINMATCH_MAX", + "LDM_BUCKETSIZELOG_MAX", + "STRATEGY_FAST", + "STRATEGY_DFAST", + "STRATEGY_GREEDY", + "STRATEGY_LAZY", + "STRATEGY_LAZY2", + "STRATEGY_BTLAZY2", + "STRATEGY_BTOPT", + "STRATEGY_BTULTRA", + "STRATEGY_BTULTRA2", + "DICT_TYPE_AUTO", + "DICT_TYPE_RAWCONTENT", + "DICT_TYPE_FULLDICT", + "FORMAT_ZSTD1", + "FORMAT_ZSTD1_MAGICLESS", +] + +import io +import os + +from ._cffi import ( # type: ignore + ffi, + lib, +) + + +backend_features = set() # type: ignore + +COMPRESSION_RECOMMENDED_INPUT_SIZE = lib.ZSTD_CStreamInSize() +COMPRESSION_RECOMMENDED_OUTPUT_SIZE = lib.ZSTD_CStreamOutSize() +DECOMPRESSION_RECOMMENDED_INPUT_SIZE = lib.ZSTD_DStreamInSize() +DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE = lib.ZSTD_DStreamOutSize() + +new_nonzero = ffi.new_allocator(should_clear_after_alloc=False) + + +MAX_COMPRESSION_LEVEL = lib.ZSTD_maxCLevel() +MAGIC_NUMBER = lib.ZSTD_MAGICNUMBER +FRAME_HEADER = b"\x28\xb5\x2f\xfd" +CONTENTSIZE_UNKNOWN = lib.ZSTD_CONTENTSIZE_UNKNOWN +CONTENTSIZE_ERROR = lib.ZSTD_CONTENTSIZE_ERROR +ZSTD_VERSION = ( + lib.ZSTD_VERSION_MAJOR, + lib.ZSTD_VERSION_MINOR, + lib.ZSTD_VERSION_RELEASE, +) + +BLOCKSIZELOG_MAX = lib.ZSTD_BLOCKSIZELOG_MAX +BLOCKSIZE_MAX = lib.ZSTD_BLOCKSIZE_MAX +WINDOWLOG_MIN = lib.ZSTD_WINDOWLOG_MIN +WINDOWLOG_MAX = lib.ZSTD_WINDOWLOG_MAX +CHAINLOG_MIN = lib.ZSTD_CHAINLOG_MIN +CHAINLOG_MAX = lib.ZSTD_CHAINLOG_MAX +HASHLOG_MIN = lib.ZSTD_HASHLOG_MIN +HASHLOG_MAX = lib.ZSTD_HASHLOG_MAX +MINMATCH_MIN = lib.ZSTD_MINMATCH_MIN +MINMATCH_MAX = lib.ZSTD_MINMATCH_MAX +SEARCHLOG_MIN = lib.ZSTD_SEARCHLOG_MIN +SEARCHLOG_MAX = lib.ZSTD_SEARCHLOG_MAX +SEARCHLENGTH_MIN = lib.ZSTD_MINMATCH_MIN +SEARCHLENGTH_MAX = lib.ZSTD_MINMATCH_MAX +TARGETLENGTH_MIN = lib.ZSTD_TARGETLENGTH_MIN +TARGETLENGTH_MAX = lib.ZSTD_TARGETLENGTH_MAX +LDM_MINMATCH_MIN = lib.ZSTD_LDM_MINMATCH_MIN +LDM_MINMATCH_MAX = lib.ZSTD_LDM_MINMATCH_MAX +LDM_BUCKETSIZELOG_MAX = lib.ZSTD_LDM_BUCKETSIZELOG_MAX + +STRATEGY_FAST = lib.ZSTD_fast +STRATEGY_DFAST = lib.ZSTD_dfast +STRATEGY_GREEDY = lib.ZSTD_greedy +STRATEGY_LAZY = lib.ZSTD_lazy +STRATEGY_LAZY2 = lib.ZSTD_lazy2 +STRATEGY_BTLAZY2 = lib.ZSTD_btlazy2 +STRATEGY_BTOPT = lib.ZSTD_btopt +STRATEGY_BTULTRA = lib.ZSTD_btultra +STRATEGY_BTULTRA2 = lib.ZSTD_btultra2 + +DICT_TYPE_AUTO = lib.ZSTD_dct_auto +DICT_TYPE_RAWCONTENT = lib.ZSTD_dct_rawContent +DICT_TYPE_FULLDICT = lib.ZSTD_dct_fullDict + +FORMAT_ZSTD1 = lib.ZSTD_f_zstd1 +FORMAT_ZSTD1_MAGICLESS = lib.ZSTD_f_zstd1_magicless + +FLUSH_BLOCK = 0 +FLUSH_FRAME = 1 + +COMPRESSOBJ_FLUSH_FINISH = 0 +COMPRESSOBJ_FLUSH_BLOCK = 1 + + +def _cpu_count(): + # os.cpu_count() was introducd in Python 3.4. + try: + return os.cpu_count() or 0 + except AttributeError: + pass + + # Linux. + try: + return os.sysconf("SC_NPROCESSORS_ONLN") + except (AttributeError, ValueError): + pass + + # TODO implement on other platforms. + return 0 + + +class BufferSegment: + """Represents a segment within a ``BufferWithSegments``. + + This type is essentially a reference to N bytes within a + ``BufferWithSegments``. + + The object conforms to the buffer protocol. + """ + + @property + def offset(self): + """The byte offset of this segment within its parent buffer.""" + raise NotImplementedError() + + def __len__(self): + """Obtain the length of the segment, in bytes.""" + raise NotImplementedError() + + def tobytes(self): + """Obtain bytes copy of this segment.""" + raise NotImplementedError() + + +class BufferSegments: + """Represents an array of ``(offset, length)`` integers. + + This type is effectively an index used by :py:class:`BufferWithSegments`. + + The array members are 64-bit unsigned integers using host/native bit order. + + Instances conform to the buffer protocol. + """ + + +class BufferWithSegments: + """A memory buffer containing N discrete items of known lengths. + + This type is essentially a fixed size memory address and an array + of 2-tuples of ``(offset, length)`` 64-bit unsigned native-endian + integers defining the byte offset and length of each segment within + the buffer. + + Instances behave like containers. + + Instances also conform to the buffer protocol. So a reference to the + backing bytes can be obtained via ``memoryview(o)``. A *copy* of the + backing bytes can be obtained via ``.tobytes()``. + + This type exists to facilitate operations against N>1 items without + the overhead of Python object creation and management. Used with + APIs like :py:meth:`ZstdDecompressor.multi_decompress_to_buffer`, it + is possible to decompress many objects in parallel without the GIL + held, leading to even better performance. + """ + + @property + def size(self): + """Total sizein bytes of the backing buffer.""" + raise NotImplementedError() + + def __len__(self): + raise NotImplementedError() + + def __getitem__(self, i): + """Obtains a segment within the buffer. + + The returned object references memory within this buffer. + + :param i: + Integer index of segment to retrieve. + :return: + :py:class:`BufferSegment` + """ + raise NotImplementedError() + + def segments(self): + """Obtain the array of ``(offset, length)`` segments in the buffer. + + :return: + :py:class:`BufferSegments` + """ + raise NotImplementedError() + + def tobytes(self): + """Obtain bytes copy of this instance.""" + raise NotImplementedError() + + +class BufferWithSegmentsCollection: + """A virtual spanning view over multiple BufferWithSegments. + + Instances are constructed from 1 or more :py:class:`BufferWithSegments` + instances. The resulting object behaves like an ordered sequence whose + members are the segments within each ``BufferWithSegments``. + + If the object is composed of 2 ``BufferWithSegments`` instances with the + first having 2 segments and the second have 3 segments, then ``b[0]`` + and ``b[1]`` access segments in the first object and ``b[2]``, ``b[3]``, + and ``b[4]`` access segments from the second. + """ + + def __len__(self): + """The number of segments within all ``BufferWithSegments``.""" + raise NotImplementedError() + + def __getitem__(self, i): + """Obtain the ``BufferSegment`` at an offset.""" + raise NotImplementedError() + + +class ZstdError(Exception): + pass + + +def _zstd_error(zresult): + # Resolves to bytes on Python 2 and 3. We use the string for formatting + # into error messages, which will be literal unicode. So convert it to + # unicode. + return ffi.string(lib.ZSTD_getErrorName(zresult)).decode("utf-8") + + +def _make_cctx_params(params): + res = lib.ZSTD_createCCtxParams() + if res == ffi.NULL: + raise MemoryError() + + res = ffi.gc(res, lib.ZSTD_freeCCtxParams) + + attrs = [ + (lib.ZSTD_c_format, params.format), + (lib.ZSTD_c_compressionLevel, params.compression_level), + (lib.ZSTD_c_windowLog, params.window_log), + (lib.ZSTD_c_hashLog, params.hash_log), + (lib.ZSTD_c_chainLog, params.chain_log), + (lib.ZSTD_c_searchLog, params.search_log), + (lib.ZSTD_c_minMatch, params.min_match), + (lib.ZSTD_c_targetLength, params.target_length), + (lib.ZSTD_c_strategy, params.strategy), + (lib.ZSTD_c_contentSizeFlag, params.write_content_size), + (lib.ZSTD_c_checksumFlag, params.write_checksum), + (lib.ZSTD_c_dictIDFlag, params.write_dict_id), + (lib.ZSTD_c_nbWorkers, params.threads), + (lib.ZSTD_c_jobSize, params.job_size), + (lib.ZSTD_c_overlapLog, params.overlap_log), + (lib.ZSTD_c_forceMaxWindow, params.force_max_window), + (lib.ZSTD_c_enableLongDistanceMatching, params.enable_ldm), + (lib.ZSTD_c_ldmHashLog, params.ldm_hash_log), + (lib.ZSTD_c_ldmMinMatch, params.ldm_min_match), + (lib.ZSTD_c_ldmBucketSizeLog, params.ldm_bucket_size_log), + (lib.ZSTD_c_ldmHashRateLog, params.ldm_hash_rate_log), + ] + + for param, value in attrs: + _set_compression_parameter(res, param, value) + + return res + + +class ZstdCompressionParameters(object): + """Low-level zstd compression parameters. + + This type represents a collection of parameters to control how zstd + compression is performed. + + Instances can be constructed from raw parameters or derived from a + base set of defaults specified from a compression level (recommended) + via :py:meth:`ZstdCompressionParameters.from_level`. + + >>> # Derive compression settings for compression level 7. + >>> params = zstandard.ZstdCompressionParameters.from_level(7) + + >>> # With an input size of 1MB + >>> params = zstandard.ZstdCompressionParameters.from_level(7, source_size=1048576) + + Using ``from_level()``, it is also possible to override individual compression + parameters or to define additional settings that aren't automatically derived. + e.g.: + + >>> params = zstandard.ZstdCompressionParameters.from_level(4, window_log=10) + >>> params = zstandard.ZstdCompressionParameters.from_level(5, threads=4) + + Or you can define low-level compression settings directly: + + >>> params = zstandard.ZstdCompressionParameters(window_log=12, enable_ldm=True) + + Once a ``ZstdCompressionParameters`` instance is obtained, it can be used to + configure a compressor: + + >>> cctx = zstandard.ZstdCompressor(compression_params=params) + + Some of these are very low-level settings. It may help to consult the official + zstandard documentation for their behavior. Look for the ``ZSTD_p_*`` constants + in ``zstd.h`` (https://github.com/facebook/zstd/blob/dev/lib/zstd.h). + """ + + @staticmethod + def from_level(level, source_size=0, dict_size=0, **kwargs): + """Create compression parameters from a compression level. + + :param level: + Integer compression level. + :param source_size: + Integer size in bytes of source to be compressed. + :param dict_size: + Integer size in bytes of compression dictionary to use. + :return: + :py:class:`ZstdCompressionParameters` + """ + params = lib.ZSTD_getCParams(level, source_size, dict_size) + + args = { + "window_log": "windowLog", + "chain_log": "chainLog", + "hash_log": "hashLog", + "search_log": "searchLog", + "min_match": "minMatch", + "target_length": "targetLength", + "strategy": "strategy", + } + + for arg, attr in args.items(): + if arg not in kwargs: + kwargs[arg] = getattr(params, attr) + + return ZstdCompressionParameters(**kwargs) + + def __init__( + self, + format=0, + compression_level=0, + window_log=0, + hash_log=0, + chain_log=0, + search_log=0, + min_match=0, + target_length=0, + strategy=-1, + write_content_size=1, + write_checksum=0, + write_dict_id=0, + job_size=0, + overlap_log=-1, + force_max_window=0, + enable_ldm=0, + ldm_hash_log=0, + ldm_min_match=0, + ldm_bucket_size_log=0, + ldm_hash_rate_log=-1, + threads=0, + ): + params = lib.ZSTD_createCCtxParams() + if params == ffi.NULL: + raise MemoryError() + + params = ffi.gc(params, lib.ZSTD_freeCCtxParams) + + self._params = params + + if threads < 0: + threads = _cpu_count() + + # We need to set ZSTD_c_nbWorkers before ZSTD_c_jobSize and ZSTD_c_overlapLog + # because setting ZSTD_c_nbWorkers resets the other parameters. + _set_compression_parameter(params, lib.ZSTD_c_nbWorkers, threads) + + _set_compression_parameter(params, lib.ZSTD_c_format, format) + _set_compression_parameter( + params, lib.ZSTD_c_compressionLevel, compression_level + ) + _set_compression_parameter(params, lib.ZSTD_c_windowLog, window_log) + _set_compression_parameter(params, lib.ZSTD_c_hashLog, hash_log) + _set_compression_parameter(params, lib.ZSTD_c_chainLog, chain_log) + _set_compression_parameter(params, lib.ZSTD_c_searchLog, search_log) + _set_compression_parameter(params, lib.ZSTD_c_minMatch, min_match) + _set_compression_parameter( + params, lib.ZSTD_c_targetLength, target_length + ) + + if strategy == -1: + strategy = 0 + + _set_compression_parameter(params, lib.ZSTD_c_strategy, strategy) + _set_compression_parameter( + params, lib.ZSTD_c_contentSizeFlag, write_content_size + ) + _set_compression_parameter( + params, lib.ZSTD_c_checksumFlag, write_checksum + ) + _set_compression_parameter(params, lib.ZSTD_c_dictIDFlag, write_dict_id) + _set_compression_parameter(params, lib.ZSTD_c_jobSize, job_size) + + if overlap_log == -1: + overlap_log = 0 + + _set_compression_parameter(params, lib.ZSTD_c_overlapLog, overlap_log) + _set_compression_parameter( + params, lib.ZSTD_c_forceMaxWindow, force_max_window + ) + _set_compression_parameter( + params, lib.ZSTD_c_enableLongDistanceMatching, enable_ldm + ) + _set_compression_parameter(params, lib.ZSTD_c_ldmHashLog, ldm_hash_log) + _set_compression_parameter( + params, lib.ZSTD_c_ldmMinMatch, ldm_min_match + ) + _set_compression_parameter( + params, lib.ZSTD_c_ldmBucketSizeLog, ldm_bucket_size_log + ) + + if ldm_hash_rate_log == -1: + ldm_hash_rate_log = 0 + + _set_compression_parameter( + params, lib.ZSTD_c_ldmHashRateLog, ldm_hash_rate_log + ) + + @property + def format(self): + return _get_compression_parameter(self._params, lib.ZSTD_c_format) + + @property + def compression_level(self): + return _get_compression_parameter( + self._params, lib.ZSTD_c_compressionLevel + ) + + @property + def window_log(self): + return _get_compression_parameter(self._params, lib.ZSTD_c_windowLog) + + @property + def hash_log(self): + return _get_compression_parameter(self._params, lib.ZSTD_c_hashLog) + + @property + def chain_log(self): + return _get_compression_parameter(self._params, lib.ZSTD_c_chainLog) + + @property + def search_log(self): + return _get_compression_parameter(self._params, lib.ZSTD_c_searchLog) + + @property + def min_match(self): + return _get_compression_parameter(self._params, lib.ZSTD_c_minMatch) + + @property + def target_length(self): + return _get_compression_parameter(self._params, lib.ZSTD_c_targetLength) + + @property + def strategy(self): + return _get_compression_parameter(self._params, lib.ZSTD_c_strategy) + + @property + def write_content_size(self): + return _get_compression_parameter( + self._params, lib.ZSTD_c_contentSizeFlag + ) + + @property + def write_checksum(self): + return _get_compression_parameter(self._params, lib.ZSTD_c_checksumFlag) + + @property + def write_dict_id(self): + return _get_compression_parameter(self._params, lib.ZSTD_c_dictIDFlag) + + @property + def job_size(self): + return _get_compression_parameter(self._params, lib.ZSTD_c_jobSize) + + @property + def overlap_log(self): + return _get_compression_parameter(self._params, lib.ZSTD_c_overlapLog) + + @property + def force_max_window(self): + return _get_compression_parameter( + self._params, lib.ZSTD_c_forceMaxWindow + ) + + @property + def enable_ldm(self): + return _get_compression_parameter( + self._params, lib.ZSTD_c_enableLongDistanceMatching + ) + + @property + def ldm_hash_log(self): + return _get_compression_parameter(self._params, lib.ZSTD_c_ldmHashLog) + + @property + def ldm_min_match(self): + return _get_compression_parameter(self._params, lib.ZSTD_c_ldmMinMatch) + + @property + def ldm_bucket_size_log(self): + return _get_compression_parameter( + self._params, lib.ZSTD_c_ldmBucketSizeLog + ) + + @property + def ldm_hash_rate_log(self): + return _get_compression_parameter( + self._params, lib.ZSTD_c_ldmHashRateLog + ) + + @property + def threads(self): + return _get_compression_parameter(self._params, lib.ZSTD_c_nbWorkers) + + def estimated_compression_context_size(self): + """Estimated size in bytes needed to compress with these parameters.""" + return lib.ZSTD_estimateCCtxSize_usingCCtxParams(self._params) + + +def estimate_decompression_context_size(): + """Estimate the memory size requirements for a decompressor instance. + + :return: + Integer number of bytes. + """ + return lib.ZSTD_estimateDCtxSize() + + +def _set_compression_parameter(params, param, value): + zresult = lib.ZSTD_CCtxParams_setParameter(params, param, value) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "unable to set compression context parameter: %s" + % _zstd_error(zresult) + ) + + +def _get_compression_parameter(params, param): + result = ffi.new("int *") + + zresult = lib.ZSTD_CCtxParams_getParameter(params, param, result) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "unable to get compression context parameter: %s" + % _zstd_error(zresult) + ) + + return result[0] + + +class ZstdCompressionWriter(object): + """Writable compressing stream wrapper. + + ``ZstdCompressionWriter`` is a write-only stream interface for writing + compressed data to another stream. + + This type conforms to the ``io.RawIOBase`` interface and should be usable + by any type that operates against a *file-object* (``typing.BinaryIO`` + in Python type hinting speak). Only methods that involve writing will do + useful things. + + As data is written to this stream (e.g. via ``write()``), that data + is sent to the compressor. As compressed data becomes available from + the compressor, it is sent to the underlying stream by calling its + ``write()`` method. + + Both ``write()`` and ``flush()`` return the number of bytes written to the + object's ``write()``. In many cases, small inputs do not accumulate enough + data to cause a write and ``write()`` will return ``0``. + + Calling ``close()`` will mark the stream as closed and subsequent I/O + operations will raise ``ValueError`` (per the documented behavior of + ``io.RawIOBase``). ``close()`` will also call ``close()`` on the underlying + stream if such a method exists and the instance was constructed with + ``closefd=True`` + + Instances are obtained by calling :py:meth:`ZstdCompressor.stream_writer`. + + Typically usage is as follows: + + >>> cctx = zstandard.ZstdCompressor(level=10) + >>> compressor = cctx.stream_writer(fh) + >>> compressor.write(b"chunk 0\\n") + >>> compressor.write(b"chunk 1\\n") + >>> compressor.flush() + >>> # Receiver will be able to decode ``chunk 0\\nchunk 1\\n`` at this point. + >>> # Receiver is also expecting more data in the zstd *frame*. + >>> + >>> compressor.write(b"chunk 2\\n") + >>> compressor.flush(zstandard.FLUSH_FRAME) + >>> # Receiver will be able to decode ``chunk 0\\nchunk 1\\nchunk 2``. + >>> # Receiver is expecting no more data, as the zstd frame is closed. + >>> # Any future calls to ``write()`` at this point will construct a new + >>> # zstd frame. + + Instances can be used as context managers. Exiting the context manager is + the equivalent of calling ``close()``, which is equivalent to calling + ``flush(zstandard.FLUSH_FRAME)``: + + >>> cctx = zstandard.ZstdCompressor(level=10) + >>> with cctx.stream_writer(fh) as compressor: + ... compressor.write(b'chunk 0') + ... compressor.write(b'chunk 1') + ... ... + + .. important:: + + If ``flush(FLUSH_FRAME)`` is not called, emitted data doesn't + constitute a full zstd *frame* and consumers of this data may complain + about malformed input. It is recommended to use instances as a context + manager to ensure *frames* are properly finished. + + If the size of the data being fed to this streaming compressor is known, + you can declare it before compression begins: + + >>> cctx = zstandard.ZstdCompressor() + >>> with cctx.stream_writer(fh, size=data_len) as compressor: + ... compressor.write(chunk0) + ... compressor.write(chunk1) + ... ... + + Declaring the size of the source data allows compression parameters to + be tuned. And if ``write_content_size`` is used, it also results in the + content size being written into the frame header of the output data. + + The size of chunks being ``write()`` to the destination can be specified: + + >>> cctx = zstandard.ZstdCompressor() + >>> with cctx.stream_writer(fh, write_size=32768) as compressor: + ... ... + + To see how much memory is being used by the streaming compressor: + + >>> cctx = zstandard.ZstdCompressor() + >>> with cctx.stream_writer(fh) as compressor: + ... ... + ... byte_size = compressor.memory_size() + + Thte total number of bytes written so far are exposed via ``tell()``: + + >>> cctx = zstandard.ZstdCompressor() + >>> with cctx.stream_writer(fh) as compressor: + ... ... + ... total_written = compressor.tell() + + ``stream_writer()`` accepts a ``write_return_read`` boolean argument to + control the return value of ``write()``. When ``False`` (the default), + ``write()`` returns the number of bytes that were ``write()``'en to the + underlying object. When ``True``, ``write()`` returns the number of bytes + read from the input that were subsequently written to the compressor. + ``True`` is the *proper* behavior for ``write()`` as specified by the + ``io.RawIOBase`` interface and will become the default value in a future + release. + """ + + def __init__( + self, + compressor, + writer, + source_size, + write_size, + write_return_read, + closefd=True, + ): + self._compressor = compressor + self._writer = writer + self._write_size = write_size + self._write_return_read = bool(write_return_read) + self._closefd = bool(closefd) + self._entered = False + self._closing = False + self._closed = False + self._bytes_compressed = 0 + + self._dst_buffer = ffi.new("char[]", write_size) + self._out_buffer = ffi.new("ZSTD_outBuffer *") + self._out_buffer.dst = self._dst_buffer + self._out_buffer.size = len(self._dst_buffer) + self._out_buffer.pos = 0 + + zresult = lib.ZSTD_CCtx_setPledgedSrcSize(compressor._cctx, source_size) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "error setting source size: %s" % _zstd_error(zresult) + ) + + def __enter__(self): + if self._closed: + raise ValueError("stream is closed") + + if self._entered: + raise ZstdError("cannot __enter__ multiple times") + + self._entered = True + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self._entered = False + self.close() + self._compressor = None + + return False + + def __iter__(self): + raise io.UnsupportedOperation() + + def __next__(self): + raise io.UnsupportedOperation() + + def memory_size(self): + return lib.ZSTD_sizeof_CCtx(self._compressor._cctx) + + def fileno(self): + f = getattr(self._writer, "fileno", None) + if f: + return f() + else: + raise OSError("fileno not available on underlying writer") + + def close(self): + if self._closed: + return + + try: + self._closing = True + self.flush(FLUSH_FRAME) + finally: + self._closing = False + self._closed = True + + # Call close() on underlying stream as well. + f = getattr(self._writer, "close", None) + if self._closefd and f: + f() + + @property + def closed(self): + return self._closed + + def isatty(self): + return False + + def readable(self): + return False + + def readline(self, size=-1): + raise io.UnsupportedOperation() + + def readlines(self, hint=-1): + raise io.UnsupportedOperation() + + def seek(self, offset, whence=None): + raise io.UnsupportedOperation() + + def seekable(self): + return False + + def truncate(self, size=None): + raise io.UnsupportedOperation() + + def writable(self): + return True + + def writelines(self, lines): + raise NotImplementedError("writelines() is not yet implemented") + + def read(self, size=-1): + raise io.UnsupportedOperation() + + def readall(self): + raise io.UnsupportedOperation() + + def readinto(self, b): + raise io.UnsupportedOperation() + + def write(self, data): + """Send data to the compressor and possibly to the inner stream.""" + if self._closed: + raise ValueError("stream is closed") + + total_write = 0 + + data_buffer = ffi.from_buffer(data) + + in_buffer = ffi.new("ZSTD_inBuffer *") + in_buffer.src = data_buffer + in_buffer.size = len(data_buffer) + in_buffer.pos = 0 + + out_buffer = self._out_buffer + out_buffer.pos = 0 + + while in_buffer.pos < in_buffer.size: + zresult = lib.ZSTD_compressStream2( + self._compressor._cctx, + out_buffer, + in_buffer, + lib.ZSTD_e_continue, + ) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "zstd compress error: %s" % _zstd_error(zresult) + ) + + if out_buffer.pos: + self._writer.write( + ffi.buffer(out_buffer.dst, out_buffer.pos)[:] + ) + total_write += out_buffer.pos + self._bytes_compressed += out_buffer.pos + out_buffer.pos = 0 + + if self._write_return_read: + return in_buffer.pos + else: + return total_write + + def flush(self, flush_mode=FLUSH_BLOCK): + """Evict data from compressor's internal state and write it to inner stream. + + Calling this method may result in 0 or more ``write()`` calls to the + inner stream. + + This method will also call ``flush()`` on the inner stream, if such a + method exists. + + :param flush_mode: + How to flush the zstd compressor. + + ``zstandard.FLUSH_BLOCK`` will flush data already sent to the + compressor but not emitted to the inner stream. The stream is still + writable after calling this. This is the default behavior. + + See documentation for other ``zstandard.FLUSH_*`` constants for more + flushing options. + :return: + Integer number of bytes written to the inner stream. + """ + + if flush_mode == FLUSH_BLOCK: + flush = lib.ZSTD_e_flush + elif flush_mode == FLUSH_FRAME: + flush = lib.ZSTD_e_end + else: + raise ValueError("unknown flush_mode: %r" % flush_mode) + + if self._closed: + raise ValueError("stream is closed") + + total_write = 0 + + out_buffer = self._out_buffer + out_buffer.pos = 0 + + in_buffer = ffi.new("ZSTD_inBuffer *") + in_buffer.src = ffi.NULL + in_buffer.size = 0 + in_buffer.pos = 0 + + while True: + zresult = lib.ZSTD_compressStream2( + self._compressor._cctx, out_buffer, in_buffer, flush + ) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "zstd compress error: %s" % _zstd_error(zresult) + ) + + if out_buffer.pos: + self._writer.write( + ffi.buffer(out_buffer.dst, out_buffer.pos)[:] + ) + total_write += out_buffer.pos + self._bytes_compressed += out_buffer.pos + out_buffer.pos = 0 + + if not zresult: + break + + f = getattr(self._writer, "flush", None) + if f and not self._closing: + f() + + return total_write + + def tell(self): + return self._bytes_compressed + + +class ZstdCompressionObj(object): + """A compressor conforming to the API in Python's standard library. + + This type implements an API similar to compression types in Python's + standard library such as ``zlib.compressobj`` and ``bz2.BZ2Compressor``. + This enables existing code targeting the standard library API to swap + in this type to achieve zstd compression. + + .. important:: + + The design of this API is not ideal for optimal performance. + + The reason performance is not optimal is because the API is limited to + returning a single buffer holding compressed data. When compressing + data, we don't know how much data will be emitted. So in order to + capture all this data in a single buffer, we need to perform buffer + reallocations and/or extra memory copies. This can add significant + overhead depending on the size or nature of the compressed data how + much your application calls this type. + + If performance is critical, consider an API like + :py:meth:`ZstdCompressor.stream_reader`, + :py:meth:`ZstdCompressor.stream_writer`, + :py:meth:`ZstdCompressor.chunker`, or + :py:meth:`ZstdCompressor.read_to_iter`, which result in less overhead + managing buffers. + + Instances are obtained by calling :py:meth:`ZstdCompressor.compressobj`. + + Here is how this API should be used: + + >>> cctx = zstandard.ZstdCompressor() + >>> cobj = cctx.compressobj() + >>> data = cobj.compress(b"raw input 0") + >>> data = cobj.compress(b"raw input 1") + >>> data = cobj.flush() + + Or to flush blocks: + + >>> cctx.zstandard.ZstdCompressor() + >>> cobj = cctx.compressobj() + >>> data = cobj.compress(b"chunk in first block") + >>> data = cobj.flush(zstandard.COMPRESSOBJ_FLUSH_BLOCK) + >>> data = cobj.compress(b"chunk in second block") + >>> data = cobj.flush() + + For best performance results, keep input chunks under 256KB. This avoids + extra allocations for a large output object. + + It is possible to declare the input size of the data that will be fed + into the compressor: + + >>> cctx = zstandard.ZstdCompressor() + >>> cobj = cctx.compressobj(size=6) + >>> data = cobj.compress(b"foobar") + >>> data = cobj.flush() + """ + + def compress(self, data): + """Send data to the compressor. + + This method receives bytes to feed to the compressor and returns + bytes constituting zstd compressed data. + + The zstd compressor accumulates bytes and the returned bytes may be + substantially smaller or larger than the size of the input data on + any given call. The returned value may be the empty byte string + (``b""``). + + :param data: + Data to write to the compressor. + :return: + Compressed data. + """ + if self._finished: + raise ZstdError("cannot call compress() after compressor finished") + + data_buffer = ffi.from_buffer(data) + source = ffi.new("ZSTD_inBuffer *") + source.src = data_buffer + source.size = len(data_buffer) + source.pos = 0 + + chunks = [] + + while source.pos < len(data): + zresult = lib.ZSTD_compressStream2( + self._compressor._cctx, self._out, source, lib.ZSTD_e_continue + ) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "zstd compress error: %s" % _zstd_error(zresult) + ) + + if self._out.pos: + chunks.append(ffi.buffer(self._out.dst, self._out.pos)[:]) + self._out.pos = 0 + + return b"".join(chunks) + + def flush(self, flush_mode=COMPRESSOBJ_FLUSH_FINISH): + """Emit data accumulated in the compressor that hasn't been outputted yet. + + The ``flush_mode`` argument controls how to end the stream. + + ``zstandard.COMPRESSOBJ_FLUSH_FINISH`` (the default) ends the + compression stream and finishes a zstd frame. Once this type of flush + is performed, ``compress()`` and ``flush()`` can no longer be called. + This type of flush **must** be called to end the compression context. If + not called, the emitted data may be incomplete and may not be readable + by a decompressor. + + ``zstandard.COMPRESSOBJ_FLUSH_BLOCK`` will flush a zstd block. This + ensures that all data fed to this instance will have been omitted and + can be decoded by a decompressor. Flushes of this type can be performed + multiple times. The next call to ``compress()`` will begin a new zstd + block. + + :param flush_mode: + How to flush the zstd compressor. + :return: + Compressed data. + """ + if flush_mode not in ( + COMPRESSOBJ_FLUSH_FINISH, + COMPRESSOBJ_FLUSH_BLOCK, + ): + raise ValueError("flush mode not recognized") + + if self._finished: + raise ZstdError("compressor object already finished") + + if flush_mode == COMPRESSOBJ_FLUSH_BLOCK: + z_flush_mode = lib.ZSTD_e_flush + elif flush_mode == COMPRESSOBJ_FLUSH_FINISH: + z_flush_mode = lib.ZSTD_e_end + self._finished = True + else: + raise ZstdError("unhandled flush mode") + + assert self._out.pos == 0 + + in_buffer = ffi.new("ZSTD_inBuffer *") + in_buffer.src = ffi.NULL + in_buffer.size = 0 + in_buffer.pos = 0 + + chunks = [] + + while True: + zresult = lib.ZSTD_compressStream2( + self._compressor._cctx, self._out, in_buffer, z_flush_mode + ) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "error ending compression stream: %s" % _zstd_error(zresult) + ) + + if self._out.pos: + chunks.append(ffi.buffer(self._out.dst, self._out.pos)[:]) + self._out.pos = 0 + + if not zresult: + break + + return b"".join(chunks) + + +class ZstdCompressionChunker(object): + """Compress data to uniformly sized chunks. + + This type allows you to iteratively feed chunks of data into a compressor + and produce output chunks of uniform size. + + ``compress()``, ``flush()``, and ``finish()`` all return an iterator of + ``bytes`` instances holding compressed data. The iterator may be empty. + Callers MUST iterate through all elements of the returned iterator before + performing another operation on the object or else the compressor's + internal state may become confused. This can result in an exception being + raised or malformed data being emitted. + + All chunks emitted by ``compress()`` will have a length of the configured + chunk size. + + ``flush()`` and ``finish()`` may return a final chunk smaller than + the configured chunk size. + + Instances are obtained by calling :py:meth:`ZstdCompressor.chunker`. + + Here is how the API should be used: + + >>> cctx = zstandard.ZstdCompressor() + >>> chunker = cctx.chunker(chunk_size=32768) + >>> + >>> with open(path, 'rb') as fh: + ... while True: + ... in_chunk = fh.read(32768) + ... if not in_chunk: + ... break + ... + ... for out_chunk in chunker.compress(in_chunk): + ... # Do something with output chunk of size 32768. + ... + ... for out_chunk in chunker.finish(): + ... # Do something with output chunks that finalize the zstd frame. + + This compressor type is often a better alternative to + :py:class:`ZstdCompressor.compressobj` because it has better performance + properties. + + ``compressobj()`` will emit output data as it is available. This results + in a *stream* of output chunks of varying sizes. The consistency of the + output chunk size with ``chunker()`` is more appropriate for many usages, + such as sending compressed data to a socket. + + ``compressobj()`` may also perform extra memory reallocations in order + to dynamically adjust the sizes of the output chunks. Since ``chunker()`` + output chunks are all the same size (except for flushed or final chunks), + there is less memory allocation/copying overhead. + """ + + def __init__(self, compressor, chunk_size): + self._compressor = compressor + self._out = ffi.new("ZSTD_outBuffer *") + self._dst_buffer = ffi.new("char[]", chunk_size) + self._out.dst = self._dst_buffer + self._out.size = chunk_size + self._out.pos = 0 + + self._in = ffi.new("ZSTD_inBuffer *") + self._in.src = ffi.NULL + self._in.size = 0 + self._in.pos = 0 + self._finished = False + + def compress(self, data): + """Feed new input data into the compressor. + + :param data: + Data to feed to compressor. + :return: + Iterator of ``bytes`` representing chunks of compressed data. + """ + if self._finished: + raise ZstdError("cannot call compress() after compression finished") + + if self._in.src != ffi.NULL: + raise ZstdError( + "cannot perform operation before consuming output " + "from previous operation" + ) + + data_buffer = ffi.from_buffer(data) + + if not len(data_buffer): + return + + self._in.src = data_buffer + self._in.size = len(data_buffer) + self._in.pos = 0 + + while self._in.pos < self._in.size: + zresult = lib.ZSTD_compressStream2( + self._compressor._cctx, self._out, self._in, lib.ZSTD_e_continue + ) + + if self._in.pos == self._in.size: + self._in.src = ffi.NULL + self._in.size = 0 + self._in.pos = 0 + + if lib.ZSTD_isError(zresult): + raise ZstdError( + "zstd compress error: %s" % _zstd_error(zresult) + ) + + if self._out.pos == self._out.size: + yield ffi.buffer(self._out.dst, self._out.pos)[:] + self._out.pos = 0 + + def flush(self): + """Flushes all data currently in the compressor. + + :return: + Iterator of ``bytes`` of compressed data. + """ + if self._finished: + raise ZstdError("cannot call flush() after compression finished") + + if self._in.src != ffi.NULL: + raise ZstdError( + "cannot call flush() before consuming output from " + "previous operation" + ) + + while True: + zresult = lib.ZSTD_compressStream2( + self._compressor._cctx, self._out, self._in, lib.ZSTD_e_flush + ) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "zstd compress error: %s" % _zstd_error(zresult) + ) + + if self._out.pos: + yield ffi.buffer(self._out.dst, self._out.pos)[:] + self._out.pos = 0 + + if not zresult: + return + + def finish(self): + """Signals the end of input data. + + No new data can be compressed after this method is called. + + This method will flush buffered data and finish the zstd frame. + + :return: + Iterator of ``bytes`` of compressed data. + """ + if self._finished: + raise ZstdError("cannot call finish() after compression finished") + + if self._in.src != ffi.NULL: + raise ZstdError( + "cannot call finish() before consuming output from " + "previous operation" + ) + + while True: + zresult = lib.ZSTD_compressStream2( + self._compressor._cctx, self._out, self._in, lib.ZSTD_e_end + ) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "zstd compress error: %s" % _zstd_error(zresult) + ) + + if self._out.pos: + yield ffi.buffer(self._out.dst, self._out.pos)[:] + self._out.pos = 0 + + if not zresult: + self._finished = True + return + + +class ZstdCompressionReader(object): + """Readable compressing stream wrapper. + + ``ZstdCompressionReader`` is a read-only stream interface for obtaining + compressed data from a source. + + This type conforms to the ``io.RawIOBase`` interface and should be usable + by any type that operates against a *file-object* (``typing.BinaryIO`` + in Python type hinting speak). + + Instances are neither writable nor seekable (even if the underlying + source is seekable). ``readline()`` and ``readlines()`` are not implemented + because they don't make sense for compressed data. ``tell()`` returns the + number of compressed bytes emitted so far. + + Instances are obtained by calling :py:meth:`ZstdCompressor.stream_reader`. + + In this example, we open a file for reading and then wrap that file + handle with a stream from which compressed data can be ``read()``. + + >>> with open(path, 'rb') as fh: + ... cctx = zstandard.ZstdCompressor() + ... reader = cctx.stream_reader(fh) + ... while True: + ... chunk = reader.read(16384) + ... if not chunk: + ... break + ... + ... # Do something with compressed chunk. + + Instances can also be used as context managers: + + >>> with open(path, 'rb') as fh: + ... cctx = zstandard.ZstdCompressor() + ... with cctx.stream_reader(fh) as reader: + ... while True: + ... chunk = reader.read(16384) + ... if not chunk: + ... break + ... + ... # Do something with compressed chunk. + + When the context manager exits or ``close()`` is called, the stream is + closed, underlying resources are released, and future operations against + the compression stream will fail. + + ``stream_reader()`` accepts a ``size`` argument specifying how large the + input stream is. This is used to adjust compression parameters so they are + tailored to the source size. e.g. + + >>> with open(path, 'rb') as fh: + ... cctx = zstandard.ZstdCompressor() + ... with cctx.stream_reader(fh, size=os.stat(path).st_size) as reader: + ... ... + + If the ``source`` is a stream, you can specify how large ``read()`` + requests to that stream should be via the ``read_size`` argument. + It defaults to ``zstandard.COMPRESSION_RECOMMENDED_INPUT_SIZE``. e.g. + + >>> with open(path, 'rb') as fh: + ... cctx = zstandard.ZstdCompressor() + ... # Will perform fh.read(8192) when obtaining data to feed into the + ... # compressor. + ... with cctx.stream_reader(fh, read_size=8192) as reader: + ... ... + """ + + def __init__(self, compressor, source, read_size, closefd=True): + self._compressor = compressor + self._source = source + self._read_size = read_size + self._closefd = closefd + self._entered = False + self._closed = False + self._bytes_compressed = 0 + self._finished_input = False + self._finished_output = False + + self._in_buffer = ffi.new("ZSTD_inBuffer *") + # Holds a ref so backing bytes in self._in_buffer stay alive. + self._source_buffer = None + + def __enter__(self): + if self._entered: + raise ValueError("cannot __enter__ multiple times") + + if self._closed: + raise ValueError("stream is closed") + + self._entered = True + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self._entered = False + self._compressor = None + self.close() + self._source = None + + return False + + def readable(self): + return True + + def writable(self): + return False + + def seekable(self): + return False + + def readline(self): + raise io.UnsupportedOperation() + + def readlines(self): + raise io.UnsupportedOperation() + + def write(self, data): + raise OSError("stream is not writable") + + def writelines(self, ignored): + raise OSError("stream is not writable") + + def isatty(self): + return False + + def flush(self): + return None + + def close(self): + if self._closed: + return + + self._closed = True + + f = getattr(self._source, "close", None) + if self._closefd and f: + f() + + @property + def closed(self): + return self._closed + + def tell(self): + return self._bytes_compressed + + def readall(self): + chunks = [] + + while True: + chunk = self.read(1048576) + if not chunk: + break + + chunks.append(chunk) + + return b"".join(chunks) + + def __iter__(self): + raise io.UnsupportedOperation() + + def __next__(self): + raise io.UnsupportedOperation() + + next = __next__ + + def _read_input(self): + if self._finished_input: + return + + if hasattr(self._source, "read"): + data = self._source.read(self._read_size) + + if not data: + self._finished_input = True + return + + self._source_buffer = ffi.from_buffer(data) + self._in_buffer.src = self._source_buffer + self._in_buffer.size = len(self._source_buffer) + self._in_buffer.pos = 0 + else: + self._source_buffer = ffi.from_buffer(self._source) + self._in_buffer.src = self._source_buffer + self._in_buffer.size = len(self._source_buffer) + self._in_buffer.pos = 0 + + def _compress_into_buffer(self, out_buffer): + if self._in_buffer.pos >= self._in_buffer.size: + return + + old_pos = out_buffer.pos + + zresult = lib.ZSTD_compressStream2( + self._compressor._cctx, + out_buffer, + self._in_buffer, + lib.ZSTD_e_continue, + ) + + self._bytes_compressed += out_buffer.pos - old_pos + + if self._in_buffer.pos == self._in_buffer.size: + self._in_buffer.src = ffi.NULL + self._in_buffer.pos = 0 + self._in_buffer.size = 0 + self._source_buffer = None + + if not hasattr(self._source, "read"): + self._finished_input = True + + if lib.ZSTD_isError(zresult): + raise ZstdError("zstd compress error: %s", _zstd_error(zresult)) + + return out_buffer.pos and out_buffer.pos == out_buffer.size + + def read(self, size=-1): + if self._closed: + raise ValueError("stream is closed") + + if size < -1: + raise ValueError("cannot read negative amounts less than -1") + + if size == -1: + return self.readall() + + if self._finished_output or size == 0: + return b"" + + # Need a dedicated ref to dest buffer otherwise it gets collected. + dst_buffer = ffi.new("char[]", size) + out_buffer = ffi.new("ZSTD_outBuffer *") + out_buffer.dst = dst_buffer + out_buffer.size = size + out_buffer.pos = 0 + + if self._compress_into_buffer(out_buffer): + return ffi.buffer(out_buffer.dst, out_buffer.pos)[:] + + while not self._finished_input: + self._read_input() + + if self._compress_into_buffer(out_buffer): + return ffi.buffer(out_buffer.dst, out_buffer.pos)[:] + + # EOF + old_pos = out_buffer.pos + + zresult = lib.ZSTD_compressStream2( + self._compressor._cctx, out_buffer, self._in_buffer, lib.ZSTD_e_end + ) + + self._bytes_compressed += out_buffer.pos - old_pos + + if lib.ZSTD_isError(zresult): + raise ZstdError( + "error ending compression stream: %s", _zstd_error(zresult) + ) + + if zresult == 0: + self._finished_output = True + + return ffi.buffer(out_buffer.dst, out_buffer.pos)[:] + + def read1(self, size=-1): + if self._closed: + raise ValueError("stream is closed") + + if size < -1: + raise ValueError("cannot read negative amounts less than -1") + + if self._finished_output or size == 0: + return b"" + + # -1 returns arbitrary number of bytes. + if size == -1: + size = COMPRESSION_RECOMMENDED_OUTPUT_SIZE + + dst_buffer = ffi.new("char[]", size) + out_buffer = ffi.new("ZSTD_outBuffer *") + out_buffer.dst = dst_buffer + out_buffer.size = size + out_buffer.pos = 0 + + # read1() dictates that we can perform at most 1 call to the + # underlying stream to get input. However, we can't satisfy this + # restriction with compression because not all input generates output. + # It is possible to perform a block flush in order to ensure output. + # But this may not be desirable behavior. So we allow multiple read() + # to the underlying stream. But unlike read(), we stop once we have + # any output. + + self._compress_into_buffer(out_buffer) + if out_buffer.pos: + return ffi.buffer(out_buffer.dst, out_buffer.pos)[:] + + while not self._finished_input: + self._read_input() + + # If we've filled the output buffer, return immediately. + if self._compress_into_buffer(out_buffer): + return ffi.buffer(out_buffer.dst, out_buffer.pos)[:] + + # If we've populated the output buffer and we're not at EOF, + # also return, as we've satisfied the read1() limits. + if out_buffer.pos and not self._finished_input: + return ffi.buffer(out_buffer.dst, out_buffer.pos)[:] + + # Else if we're at EOS and we have room left in the buffer, + # fall through to below and try to add more data to the output. + + # EOF. + old_pos = out_buffer.pos + + zresult = lib.ZSTD_compressStream2( + self._compressor._cctx, out_buffer, self._in_buffer, lib.ZSTD_e_end + ) + + self._bytes_compressed += out_buffer.pos - old_pos + + if lib.ZSTD_isError(zresult): + raise ZstdError( + "error ending compression stream: %s" % _zstd_error(zresult) + ) + + if zresult == 0: + self._finished_output = True + + return ffi.buffer(out_buffer.dst, out_buffer.pos)[:] + + def readinto(self, b): + if self._closed: + raise ValueError("stream is closed") + + if self._finished_output: + return 0 + + # TODO use writable=True once we require CFFI >= 1.12. + dest_buffer = ffi.from_buffer(b) + ffi.memmove(b, b"", 0) + out_buffer = ffi.new("ZSTD_outBuffer *") + out_buffer.dst = dest_buffer + out_buffer.size = len(dest_buffer) + out_buffer.pos = 0 + + if self._compress_into_buffer(out_buffer): + return out_buffer.pos + + while not self._finished_input: + self._read_input() + if self._compress_into_buffer(out_buffer): + return out_buffer.pos + + # EOF. + old_pos = out_buffer.pos + zresult = lib.ZSTD_compressStream2( + self._compressor._cctx, out_buffer, self._in_buffer, lib.ZSTD_e_end + ) + + self._bytes_compressed += out_buffer.pos - old_pos + + if lib.ZSTD_isError(zresult): + raise ZstdError( + "error ending compression stream: %s", _zstd_error(zresult) + ) + + if zresult == 0: + self._finished_output = True + + return out_buffer.pos + + def readinto1(self, b): + if self._closed: + raise ValueError("stream is closed") + + if self._finished_output: + return 0 + + # TODO use writable=True once we require CFFI >= 1.12. + dest_buffer = ffi.from_buffer(b) + ffi.memmove(b, b"", 0) + + out_buffer = ffi.new("ZSTD_outBuffer *") + out_buffer.dst = dest_buffer + out_buffer.size = len(dest_buffer) + out_buffer.pos = 0 + + self._compress_into_buffer(out_buffer) + if out_buffer.pos: + return out_buffer.pos + + while not self._finished_input: + self._read_input() + + if self._compress_into_buffer(out_buffer): + return out_buffer.pos + + if out_buffer.pos and not self._finished_input: + return out_buffer.pos + + # EOF. + old_pos = out_buffer.pos + + zresult = lib.ZSTD_compressStream2( + self._compressor._cctx, out_buffer, self._in_buffer, lib.ZSTD_e_end + ) + + self._bytes_compressed += out_buffer.pos - old_pos + + if lib.ZSTD_isError(zresult): + raise ZstdError( + "error ending compression stream: %s" % _zstd_error(zresult) + ) + + if zresult == 0: + self._finished_output = True + + return out_buffer.pos + + +class ZstdCompressor(object): + """ + Create an object used to perform Zstandard compression. + + Each instance is essentially a wrapper around a ``ZSTD_CCtx`` from + zstd's C API. + + An instance can compress data various ways. Instances can be used + multiple times. Each compression operation will use the compression + parameters defined at construction time. + + .. note: + + When using a compression dictionary and multiple compression + operations are performed, the ``ZstdCompressionParameters`` derived + from an integer compression ``level`` and the first compressed data's + size will be reused for all subsequent operations. This may not be + desirable if source data sizes vary significantly. + + ``compression_params`` is mutually exclusive with ``level``, + ``write_checksum``, ``write_content_size``, ``write_dict_id``, and + ``threads``. + + Assume that each ``ZstdCompressor`` instance can only handle a single + logical compression operation at the same time. i.e. if you call a method + like ``stream_reader()`` to obtain multiple objects derived from the same + ``ZstdCompressor`` instance and attempt to use them simultaneously, errors + will likely occur. + + If you need to perform multiple logical compression operations and you + can't guarantee those operations are temporally non-overlapping, you need + to obtain multiple ``ZstdCompressor`` instances. + + Unless specified otherwise, assume that no two methods of + ``ZstdCompressor`` instances can be called from multiple Python + threads simultaneously. In other words, assume instances are not thread safe + unless stated otherwise. + + :param level: + Integer compression level. Valid values are all negative integers + through 22. Lower values generally yield faster operations with lower + compression ratios. Higher values are generally slower but compress + better. The default is 3, which is what the ``zstd`` CLI uses. Negative + levels effectively engage ``--fast`` mode from the ``zstd`` CLI. + :param dict_data: + A ``ZstdCompressionDict`` to be used to compress with dictionary + data. + :param compression_params: + A ``ZstdCompressionParameters`` instance defining low-level compression + parameters. If defined, this will overwrite the ``level`` argument. + :param write_checksum: + If True, a 4 byte content checksum will be written with the compressed + data, allowing the decompressor to perform content verification. + :param write_content_size: + If True (the default), the decompressed content size will be included + in the header of the compressed data. This data will only be written if + the compressor knows the size of the input data. + :param write_dict_id: + Determines whether the dictionary ID will be written into the compressed + data. Defaults to True. Only adds content to the compressed data if + a dictionary is being used. + :param threads: + Number of threads to use to compress data concurrently. When set, + compression operations are performed on multiple threads. The default + value (0) disables multi-threaded compression. A value of ``-1`` means + to set the number of threads to the number of detected logical CPUs. + """ + + def __init__( + self, + level=3, + dict_data=None, + compression_params=None, + write_checksum=None, + write_content_size=None, + write_dict_id=None, + threads=0, + ): + if level > lib.ZSTD_maxCLevel(): + raise ValueError( + "level must be less than %d" % lib.ZSTD_maxCLevel() + ) + + if threads < 0: + threads = _cpu_count() + + if compression_params and write_checksum is not None: + raise ValueError( + "cannot define compression_params and " "write_checksum" + ) + + if compression_params and write_content_size is not None: + raise ValueError( + "cannot define compression_params and " "write_content_size" + ) + + if compression_params and write_dict_id is not None: + raise ValueError( + "cannot define compression_params and " "write_dict_id" + ) + + if compression_params and threads: + raise ValueError("cannot define compression_params and threads") + + if compression_params: + self._params = _make_cctx_params(compression_params) + else: + if write_dict_id is None: + write_dict_id = True + + params = lib.ZSTD_createCCtxParams() + if params == ffi.NULL: + raise MemoryError() + + self._params = ffi.gc(params, lib.ZSTD_freeCCtxParams) + + _set_compression_parameter( + self._params, lib.ZSTD_c_compressionLevel, level + ) + + _set_compression_parameter( + self._params, + lib.ZSTD_c_contentSizeFlag, + write_content_size if write_content_size is not None else 1, + ) + + _set_compression_parameter( + self._params, + lib.ZSTD_c_checksumFlag, + 1 if write_checksum else 0, + ) + + _set_compression_parameter( + self._params, lib.ZSTD_c_dictIDFlag, 1 if write_dict_id else 0 + ) + + if threads: + _set_compression_parameter( + self._params, lib.ZSTD_c_nbWorkers, threads + ) + + cctx = lib.ZSTD_createCCtx() + if cctx == ffi.NULL: + raise MemoryError() + + self._cctx = cctx + self._dict_data = dict_data + + # We defer setting up garbage collection until after calling + # _setup_cctx() to ensure the memory size estimate is more accurate. + try: + self._setup_cctx() + finally: + self._cctx = ffi.gc( + cctx, lib.ZSTD_freeCCtx, size=lib.ZSTD_sizeof_CCtx(cctx) + ) + + def _setup_cctx(self): + zresult = lib.ZSTD_CCtx_setParametersUsingCCtxParams( + self._cctx, self._params + ) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "could not set compression parameters: %s" + % _zstd_error(zresult) + ) + + dict_data = self._dict_data + + if dict_data: + if dict_data._cdict: + zresult = lib.ZSTD_CCtx_refCDict(self._cctx, dict_data._cdict) + else: + zresult = lib.ZSTD_CCtx_loadDictionary_advanced( + self._cctx, + dict_data.as_bytes(), + len(dict_data), + lib.ZSTD_dlm_byRef, + dict_data._dict_type, + ) + + if lib.ZSTD_isError(zresult): + raise ZstdError( + "could not load compression dictionary: %s" + % _zstd_error(zresult) + ) + + def memory_size(self): + """Obtain the memory usage of this compressor, in bytes. + + >>> cctx = zstandard.ZstdCompressor() + >>> memory = cctx.memory_size() + """ + return lib.ZSTD_sizeof_CCtx(self._cctx) + + def compress(self, data): + """ + Compress data in a single operation. + + This is the simplest mechanism to perform compression: simply pass in a + value and get a compressed value back. It is almost the most prone to + abuse. + + The input and output values must fit in memory, so passing in very large + values can result in excessive memory usage. For this reason, one of the + streaming based APIs is preferred for larger values. + + :param data: + Source data to compress + :return: + Compressed data + + >>> cctx = zstandard.ZstdCompressor() + >>> compressed = cctx.compress(b"data to compress") + """ + lib.ZSTD_CCtx_reset(self._cctx, lib.ZSTD_reset_session_only) + + data_buffer = ffi.from_buffer(data) + + dest_size = lib.ZSTD_compressBound(len(data_buffer)) + out = new_nonzero("char[]", dest_size) + + zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, len(data_buffer)) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "error setting source size: %s" % _zstd_error(zresult) + ) + + out_buffer = ffi.new("ZSTD_outBuffer *") + in_buffer = ffi.new("ZSTD_inBuffer *") + + out_buffer.dst = out + out_buffer.size = dest_size + out_buffer.pos = 0 + + in_buffer.src = data_buffer + in_buffer.size = len(data_buffer) + in_buffer.pos = 0 + + zresult = lib.ZSTD_compressStream2( + self._cctx, out_buffer, in_buffer, lib.ZSTD_e_end + ) + + if lib.ZSTD_isError(zresult): + raise ZstdError("cannot compress: %s" % _zstd_error(zresult)) + elif zresult: + raise ZstdError("unexpected partial frame flush") + + return ffi.buffer(out, out_buffer.pos)[:] + + def compressobj(self, size=-1): + """ + Obtain a compressor exposing the Python standard library compression API. + + See :py:class:`ZstdCompressionObj` for the full documentation. + + :param size: + Size in bytes of data that will be compressed. + :return: + :py:class:`ZstdCompressionObj` + """ + lib.ZSTD_CCtx_reset(self._cctx, lib.ZSTD_reset_session_only) + + if size < 0: + size = lib.ZSTD_CONTENTSIZE_UNKNOWN + + zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "error setting source size: %s" % _zstd_error(zresult) + ) + + cobj = ZstdCompressionObj() + cobj._out = ffi.new("ZSTD_outBuffer *") + cobj._dst_buffer = ffi.new( + "char[]", COMPRESSION_RECOMMENDED_OUTPUT_SIZE + ) + cobj._out.dst = cobj._dst_buffer + cobj._out.size = COMPRESSION_RECOMMENDED_OUTPUT_SIZE + cobj._out.pos = 0 + cobj._compressor = self + cobj._finished = False + + return cobj + + def chunker(self, size=-1, chunk_size=COMPRESSION_RECOMMENDED_OUTPUT_SIZE): + """ + Create an object for iterative compressing to same-sized chunks. + + This API is similar to :py:meth:`ZstdCompressor.compressobj` but has + better performance properties. + + :param size: + Size in bytes of data that will be compressed. + :param chunk_size: + Size of compressed chunks. + :return: + :py:class:`ZstdCompressionChunker` + """ + lib.ZSTD_CCtx_reset(self._cctx, lib.ZSTD_reset_session_only) + + if size < 0: + size = lib.ZSTD_CONTENTSIZE_UNKNOWN + + zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "error setting source size: %s" % _zstd_error(zresult) + ) + + return ZstdCompressionChunker(self, chunk_size=chunk_size) + + def copy_stream( + self, + ifh, + ofh, + size=-1, + read_size=COMPRESSION_RECOMMENDED_INPUT_SIZE, + write_size=COMPRESSION_RECOMMENDED_OUTPUT_SIZE, + ): + """ + Copy data between 2 streams while compressing it. + + Data will be read from ``ifh``, compressed, and written to ``ofh``. + ``ifh`` must have a ``read(size)`` method. ``ofh`` must have a + ``write(data)`` + method. + + >>> cctx = zstandard.ZstdCompressor() + >>> with open(input_path, "rb") as ifh, open(output_path, "wb") as ofh: + ... cctx.copy_stream(ifh, ofh) + + It is also possible to declare the size of the source stream: + + >>> cctx = zstandard.ZstdCompressor() + >>> cctx.copy_stream(ifh, ofh, size=len_of_input) + + You can also specify how large the chunks that are ``read()`` + and ``write()`` from and to the streams: + + >>> cctx = zstandard.ZstdCompressor() + >>> cctx.copy_stream(ifh, ofh, read_size=32768, write_size=16384) + + The stream copier returns a 2-tuple of bytes read and written: + + >>> cctx = zstandard.ZstdCompressor() + >>> read_count, write_count = cctx.copy_stream(ifh, ofh) + + :param ifh: + Source stream to read from + :param ofh: + Destination stream to write to + :param size: + Size in bytes of the source stream. If defined, compression + parameters will be tuned for this size. + :param read_size: + Chunk sizes that source stream should be ``read()`` from. + :param write_size: + Chunk sizes that destination stream should be ``write()`` to. + :return: + 2-tuple of ints of bytes read and written, respectively. + """ + + if not hasattr(ifh, "read"): + raise ValueError("first argument must have a read() method") + if not hasattr(ofh, "write"): + raise ValueError("second argument must have a write() method") + + lib.ZSTD_CCtx_reset(self._cctx, lib.ZSTD_reset_session_only) + + if size < 0: + size = lib.ZSTD_CONTENTSIZE_UNKNOWN + + zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "error setting source size: %s" % _zstd_error(zresult) + ) + + in_buffer = ffi.new("ZSTD_inBuffer *") + out_buffer = ffi.new("ZSTD_outBuffer *") + + dst_buffer = ffi.new("char[]", write_size) + out_buffer.dst = dst_buffer + out_buffer.size = write_size + out_buffer.pos = 0 + + total_read, total_write = 0, 0 + + while True: + data = ifh.read(read_size) + if not data: + break + + data_buffer = ffi.from_buffer(data) + total_read += len(data_buffer) + in_buffer.src = data_buffer + in_buffer.size = len(data_buffer) + in_buffer.pos = 0 + + while in_buffer.pos < in_buffer.size: + zresult = lib.ZSTD_compressStream2( + self._cctx, out_buffer, in_buffer, lib.ZSTD_e_continue + ) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "zstd compress error: %s" % _zstd_error(zresult) + ) + + if out_buffer.pos: + ofh.write(ffi.buffer(out_buffer.dst, out_buffer.pos)) + total_write += out_buffer.pos + out_buffer.pos = 0 + + # We've finished reading. Flush the compressor. + while True: + zresult = lib.ZSTD_compressStream2( + self._cctx, out_buffer, in_buffer, lib.ZSTD_e_end + ) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "error ending compression stream: %s" % _zstd_error(zresult) + ) + + if out_buffer.pos: + ofh.write(ffi.buffer(out_buffer.dst, out_buffer.pos)) + total_write += out_buffer.pos + out_buffer.pos = 0 + + if zresult == 0: + break + + return total_read, total_write + + def stream_reader( + self, + source, + size=-1, + read_size=COMPRESSION_RECOMMENDED_INPUT_SIZE, + closefd=True, + ): + """ + Wrap a readable source with a stream that can read compressed data. + + This will produce an object conforming to the ``io.RawIOBase`` + interface which can be ``read()`` from to retrieve compressed data + from a source. + + The source object can be any object with a ``read(size)`` method + or an object that conforms to the buffer protocol. + + See :py:class:`ZstdCompressionReader` for type documentation and usage + examples. + + :param source: + Object to read source data from + :param size: + Size in bytes of source object. + :param read_size: + How many bytes to request when ``read()``'ing from the source. + :param closefd: + Whether to close the source stream when the returned stream is + closed. + :return: + :py:class:`ZstdCompressionReader` + """ + lib.ZSTD_CCtx_reset(self._cctx, lib.ZSTD_reset_session_only) + + try: + size = len(source) + except Exception: + pass + + if size < 0: + size = lib.ZSTD_CONTENTSIZE_UNKNOWN + + zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "error setting source size: %s" % _zstd_error(zresult) + ) + + return ZstdCompressionReader(self, source, read_size, closefd=closefd) + + def stream_writer( + self, + writer, + size=-1, + write_size=COMPRESSION_RECOMMENDED_OUTPUT_SIZE, + write_return_read=True, + closefd=True, + ): + """ + Create a stream that will write compressed data into another stream. + + The argument to ``stream_writer()`` must have a ``write(data)`` method. + As compressed data is available, ``write()`` will be called with the + compressed data as its argument. Many common Python types implement + ``write()``, including open file handles and ``io.BytesIO``. + + See :py:class:`ZstdCompressionWriter` for more documentation, including + usage examples. + + :param writer: + Stream to write compressed data to. + :param size: + Size in bytes of data to be compressed. If set, it will be used + to influence compression parameter tuning and could result in the + size being written into the header of the compressed data. + :param write_size: + How much data to ``write()`` to ``writer`` at a time. + :param write_return_read: + Whether ``write()`` should return the number of bytes that were + consumed from the input. + :param closefd: + Whether to ``close`` the ``writer`` when this stream is closed. + :return: + :py:class:`ZstdCompressionWriter` + """ + if not hasattr(writer, "write"): + raise ValueError("must pass an object with a write() method") + + lib.ZSTD_CCtx_reset(self._cctx, lib.ZSTD_reset_session_only) + + if size < 0: + size = lib.ZSTD_CONTENTSIZE_UNKNOWN + + return ZstdCompressionWriter( + self, writer, size, write_size, write_return_read, closefd=closefd + ) + + def read_to_iter( + self, + reader, + size=-1, + read_size=COMPRESSION_RECOMMENDED_INPUT_SIZE, + write_size=COMPRESSION_RECOMMENDED_OUTPUT_SIZE, + ): + """ + Read uncompressed data from a reader and return an iterator + + Returns an iterator of compressed data produced from reading from + ``reader``. + + This method provides a mechanism to stream compressed data out of a + source as an iterator of data chunks. + + Uncompressed data will be obtained from ``reader`` by calling the + ``read(size)`` method of it or by reading a slice (if ``reader`` + conforms to the *buffer protocol*). The source data will be streamed + into a compressor. As compressed data is available, it will be exposed + to the iterator. + + Data is read from the source in chunks of ``read_size``. Compressed + chunks are at most ``write_size`` bytes. Both values default to the + zstd input and and output defaults, respectively. + + If reading from the source via ``read()``, ``read()`` will be called + until it raises or returns an empty bytes (``b""``). It is perfectly + valid for the source to deliver fewer bytes than were what requested + by ``read(size)``. + + The caller is partially in control of how fast data is fed into the + compressor by how it consumes the returned iterator. The compressor + will not consume from the reader unless the caller consumes from the + iterator. + + >>> cctx = zstandard.ZstdCompressor() + >>> for chunk in cctx.read_to_iter(fh): + ... # Do something with emitted data. + + ``read_to_iter()`` accepts a ``size`` argument declaring the size of + the input stream: + + >>> cctx = zstandard.ZstdCompressor() + >>> for chunk in cctx.read_to_iter(fh, size=some_int): + >>> pass + + You can also control the size that data is ``read()`` from the source + and the ideal size of output chunks: + + >>> cctx = zstandard.ZstdCompressor() + >>> for chunk in cctx.read_to_iter(fh, read_size=16384, write_size=8192): + >>> pass + + ``read_to_iter()`` does not give direct control over the sizes of chunks + fed into the compressor. Instead, chunk sizes will be whatever the object + being read from delivers. These will often be of a uniform size. + + :param reader: + Stream providing data to be compressed. + :param size: + Size in bytes of input data. + :param read_size: + Controls how many bytes are ``read()`` from the source. + :param write_size: + Controls the output size of emitted chunks. + :return: + Iterator of ``bytes``. + """ + + if hasattr(reader, "read"): + have_read = True + elif hasattr(reader, "__getitem__"): + have_read = False + buffer_offset = 0 + size = len(reader) + else: + raise ValueError( + "must pass an object with a read() method or " + "conforms to buffer protocol" + ) + + lib.ZSTD_CCtx_reset(self._cctx, lib.ZSTD_reset_session_only) + + if size < 0: + size = lib.ZSTD_CONTENTSIZE_UNKNOWN + + zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "error setting source size: %s" % _zstd_error(zresult) + ) + + in_buffer = ffi.new("ZSTD_inBuffer *") + out_buffer = ffi.new("ZSTD_outBuffer *") + + in_buffer.src = ffi.NULL + in_buffer.size = 0 + in_buffer.pos = 0 + + dst_buffer = ffi.new("char[]", write_size) + out_buffer.dst = dst_buffer + out_buffer.size = write_size + out_buffer.pos = 0 + + while True: + # We should never have output data sitting around after a previous + # iteration. + assert out_buffer.pos == 0 + + # Collect input data. + if have_read: + read_result = reader.read(read_size) + else: + remaining = len(reader) - buffer_offset + slice_size = min(remaining, read_size) + read_result = reader[buffer_offset : buffer_offset + slice_size] + buffer_offset += slice_size + + # No new input data. Break out of the read loop. + if not read_result: + break + + # Feed all read data into the compressor and emit output until + # exhausted. + read_buffer = ffi.from_buffer(read_result) + in_buffer.src = read_buffer + in_buffer.size = len(read_buffer) + in_buffer.pos = 0 + + while in_buffer.pos < in_buffer.size: + zresult = lib.ZSTD_compressStream2( + self._cctx, out_buffer, in_buffer, lib.ZSTD_e_continue + ) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "zstd compress error: %s" % _zstd_error(zresult) + ) + + if out_buffer.pos: + data = ffi.buffer(out_buffer.dst, out_buffer.pos)[:] + out_buffer.pos = 0 + yield data + + assert out_buffer.pos == 0 + + # And repeat the loop to collect more data. + continue + + # If we get here, input is exhausted. End the stream and emit what + # remains. + while True: + assert out_buffer.pos == 0 + zresult = lib.ZSTD_compressStream2( + self._cctx, out_buffer, in_buffer, lib.ZSTD_e_end + ) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "error ending compression stream: %s" % _zstd_error(zresult) + ) + + if out_buffer.pos: + data = ffi.buffer(out_buffer.dst, out_buffer.pos)[:] + out_buffer.pos = 0 + yield data + + if zresult == 0: + break + + def multi_compress_to_buffer(self, data, threads=-1): + """ + Compress multiple pieces of data as a single function call. + + (Experimental. Not yet supported by CFFI backend.) + + This function is optimized to perform multiple compression operations + as as possible with as little overhead as possible. + + Data to be compressed can be passed as a ``BufferWithSegmentsCollection``, + a ``BufferWithSegments``, or a list containing byte like objects. Each + element of the container will be compressed individually using the + configured parameters on the ``ZstdCompressor`` instance. + + The ``threads`` argument controls how many threads to use for + compression. The default is ``0`` which means to use a single thread. + Negative values use the number of logical CPUs in the machine. + + The function returns a ``BufferWithSegmentsCollection``. This type + represents N discrete memory allocations, each holding 1 or more + compressed frames. + + Output data is written to shared memory buffers. This means that unlike + regular Python objects, a reference to *any* object within the collection + keeps the shared buffer and therefore memory backing it alive. This can + have undesirable effects on process memory usage. + + The API and behavior of this function is experimental and will likely + change. Known deficiencies include: + + * If asked to use multiple threads, it will always spawn that many + threads, even if the input is too small to use them. It should + automatically lower the thread count when the extra threads would + just add overhead. + * The buffer allocation strategy is fixed. There is room to make it + dynamic, perhaps even to allow one output buffer per input, + facilitating a variation of the API to return a list without the + adverse effects of shared memory buffers. + + :param data: + Source to read discrete pieces of data to compress. + + Can be a ``BufferWithSegmentsCollection``, a ``BufferWithSegments``, + or a ``list[bytes]``. + :return: + BufferWithSegmentsCollection holding compressed data. + """ + raise NotImplementedError() + + def frame_progression(self): + """ + Return information on how much work the compressor has done. + + Returns a 3-tuple of (ingested, consumed, produced). + + >>> cctx = zstandard.ZstdCompressor() + >>> (ingested, consumed, produced) = cctx.frame_progression() + """ + progression = lib.ZSTD_getFrameProgression(self._cctx) + + return progression.ingested, progression.consumed, progression.produced + + +class FrameParameters(object): + """Information about a zstd frame. + + Instances have the following attributes: + + ``content_size`` + Integer size of original, uncompressed content. This will be ``0`` if the + original content size isn't written to the frame (controlled with the + ``write_content_size`` argument to ``ZstdCompressor``) or if the input + content size was ``0``. + + ``window_size`` + Integer size of maximum back-reference distance in compressed data. + + ``dict_id`` + Integer of dictionary ID used for compression. ``0`` if no dictionary + ID was used or if the dictionary ID was ``0``. + + ``has_checksum`` + Bool indicating whether a 4 byte content checksum is stored at the end + of the frame. + """ + + def __init__(self, fparams): + self.content_size = fparams.frameContentSize + self.window_size = fparams.windowSize + self.dict_id = fparams.dictID + self.has_checksum = bool(fparams.checksumFlag) + + +def frame_content_size(data): + """Obtain the decompressed size of a frame. + + The returned value is usually accurate. But strictly speaking it should + not be trusted. + + :return: + ``-1`` if size unknown and a non-negative integer otherwise. + """ + data_buffer = ffi.from_buffer(data) + + size = lib.ZSTD_getFrameContentSize(data_buffer, len(data_buffer)) + + if size == lib.ZSTD_CONTENTSIZE_ERROR: + raise ZstdError("error when determining content size") + elif size == lib.ZSTD_CONTENTSIZE_UNKNOWN: + return -1 + else: + return size + + +def frame_header_size(data): + """Obtain the size of a frame header. + + :return: + Integer size in bytes. + """ + data_buffer = ffi.from_buffer(data) + + zresult = lib.ZSTD_frameHeaderSize(data_buffer, len(data_buffer)) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "could not determine frame header size: %s" % _zstd_error(zresult) + ) + + return zresult + + +def get_frame_parameters(data): + """ + Parse a zstd frame header into frame parameters. + + Depending on which fields are present in the frame and their values, the + length of the frame parameters varies. If insufficient bytes are passed + in to fully parse the frame parameters, ``ZstdError`` is raised. To ensure + frame parameters can be parsed, pass in at least 18 bytes. + + :param data: + Data from which to read frame parameters. + :return: + :py:class:`FrameParameters` + """ + params = ffi.new("ZSTD_frameHeader *") + + data_buffer = ffi.from_buffer(data) + zresult = lib.ZSTD_getFrameHeader(params, data_buffer, len(data_buffer)) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "cannot get frame parameters: %s" % _zstd_error(zresult) + ) + + if zresult: + raise ZstdError( + "not enough data for frame parameters; need %d bytes" % zresult + ) + + return FrameParameters(params[0]) + + +class ZstdCompressionDict(object): + """Represents a computed compression dictionary. + + Instances are obtained by calling :py:func:`train_dictionary` or by + passing bytes obtained from another source into the constructor. + + Instances can be constructed from bytes: + + >>> dict_data = zstandard.ZstdCompressionDict(data) + + It is possible to construct a dictionary from *any* data. If the data + doesn't begin with a magic header, it will be treated as a *prefix* + dictionary. *Prefix* dictionaries allow compression operations to + reference raw data within the dictionary. + + It is possible to force the use of *prefix* dictionaries or to require + a dictionary header: + + >>> dict_data = zstandard.ZstdCompressionDict(data, dict_type=zstandard.DICT_TYPE_RAWCONTENT) + >>> dict_data = zstandard.ZstdCompressionDict(data, dict_type=zstandard.DICT_TYPE_FULLDICT) + + You can see how many bytes are in the dictionary by calling ``len()``: + + >>> dict_data = zstandard.train_dictionary(size, samples) + >>> dict_size = len(dict_data) # will not be larger than ``size`` + + Once you have a dictionary, you can pass it to the objects performing + compression and decompression: + + >>> dict_data = zstandard.train_dictionary(131072, samples) + >>> cctx = zstandard.ZstdCompressor(dict_data=dict_data) + >>> for source_data in input_data: + ... compressed = cctx.compress(source_data) + ... # Do something with compressed data. + ... + >>> dctx = zstandard.ZstdDecompressor(dict_data=dict_data) + >>> for compressed_data in input_data: + ... buffer = io.BytesIO() + ... with dctx.stream_writer(buffer) as decompressor: + ... decompressor.write(compressed_data) + ... # Do something with raw data in ``buffer``. + + Dictionaries have unique integer IDs. You can retrieve this ID via: + + >>> dict_id = zstandard.dictionary_id(dict_data) + + You can obtain the raw data in the dict (useful for persisting and constructing + a ``ZstdCompressionDict`` later) via ``as_bytes()``: + + >>> dict_data = zstandard.train_dictionary(size, samples) + >>> raw_data = dict_data.as_bytes() + + By default, when a ``ZstdCompressionDict`` is *attached* to a + ``ZstdCompressor``, each ``ZstdCompressor`` performs work to prepare the + dictionary for use. This is fine if only 1 compression operation is being + performed or if the ``ZstdCompressor`` is being reused for multiple operations. + But if multiple ``ZstdCompressor`` instances are being used with the dictionary, + this can add overhead. + + It is possible to *precompute* the dictionary so it can readily be consumed + by multiple ``ZstdCompressor`` instances: + + >>> d = zstandard.ZstdCompressionDict(data) + >>> # Precompute for compression level 3. + >>> d.precompute_compress(level=3) + >>> # Precompute with specific compression parameters. + >>> params = zstandard.ZstdCompressionParameters(...) + >>> d.precompute_compress(compression_params=params) + + .. note:: + + When a dictionary is precomputed, the compression parameters used to + precompute the dictionary overwrite some of the compression parameters + specified to ``ZstdCompressor``. + + :param data: + Dictionary data. + :param dict_type: + Type of dictionary. One of the ``DICT_TYPE_*`` constants. + """ + + def __init__(self, data, dict_type=DICT_TYPE_AUTO, k=0, d=0): + assert isinstance(data, bytes) + self._data = data + self.k = k + self.d = d + + if dict_type not in ( + DICT_TYPE_AUTO, + DICT_TYPE_RAWCONTENT, + DICT_TYPE_FULLDICT, + ): + raise ValueError( + "invalid dictionary load mode: %d; must use " + "DICT_TYPE_* constants" + ) + + self._dict_type = dict_type + self._cdict = None + + def __len__(self): + return len(self._data) + + def dict_id(self): + """Obtain the integer ID of the dictionary.""" + return int(lib.ZDICT_getDictID(self._data, len(self._data))) + + def as_bytes(self): + """Obtain the ``bytes`` representation of the dictionary.""" + return self._data + + def precompute_compress(self, level=0, compression_params=None): + """Precompute a dictionary os it can be used by multiple compressors. + + Calling this method on an instance that will be used by multiple + :py:class:`ZstdCompressor` instances will improve performance. + """ + if level and compression_params: + raise ValueError( + "must only specify one of level or " "compression_params" + ) + + if not level and not compression_params: + raise ValueError("must specify one of level or compression_params") + + if level: + cparams = lib.ZSTD_getCParams(level, 0, len(self._data)) + else: + cparams = ffi.new("ZSTD_compressionParameters") + cparams.chainLog = compression_params.chain_log + cparams.hashLog = compression_params.hash_log + cparams.minMatch = compression_params.min_match + cparams.searchLog = compression_params.search_log + cparams.strategy = compression_params.strategy + cparams.targetLength = compression_params.target_length + cparams.windowLog = compression_params.window_log + + cdict = lib.ZSTD_createCDict_advanced( + self._data, + len(self._data), + lib.ZSTD_dlm_byRef, + self._dict_type, + cparams, + lib.ZSTD_defaultCMem, + ) + if cdict == ffi.NULL: + raise ZstdError("unable to precompute dictionary") + + self._cdict = ffi.gc( + cdict, lib.ZSTD_freeCDict, size=lib.ZSTD_sizeof_CDict(cdict) + ) + + @property + def _ddict(self): + ddict = lib.ZSTD_createDDict_advanced( + self._data, + len(self._data), + lib.ZSTD_dlm_byRef, + self._dict_type, + lib.ZSTD_defaultCMem, + ) + + if ddict == ffi.NULL: + raise ZstdError("could not create decompression dict") + + ddict = ffi.gc( + ddict, lib.ZSTD_freeDDict, size=lib.ZSTD_sizeof_DDict(ddict) + ) + self.__dict__["_ddict"] = ddict + + return ddict + + +def train_dictionary( + dict_size, + samples, + k=0, + d=0, + f=0, + split_point=0.0, + accel=0, + notifications=0, + dict_id=0, + level=0, + steps=0, + threads=0, +): + """Train a dictionary from sample data using the COVER algorithm. + + A compression dictionary of size ``dict_size`` will be created from the + iterable of ``samples``. The raw dictionary bytes will be returned. + + The dictionary training mechanism is known as *cover*. More details about it + are available in the paper *Effective Construction of Relative Lempel-Ziv + Dictionaries* (authors: Liao, Petri, Moffat, Wirth). + + The cover algorithm takes parameters ``k`` and ``d``. These are the + *segment size* and *dmer size*, respectively. The returned dictionary + instance created by this function has ``k`` and ``d`` attributes + containing the values for these parameters. If a ``ZstdCompressionDict`` + is constructed from raw bytes data (a content-only dictionary), the + ``k`` and ``d`` attributes will be ``0``. + + The segment and dmer size parameters to the cover algorithm can either be + specified manually or ``train_dictionary()`` can try multiple values + and pick the best one, where *best* means the smallest compressed data size. + This later mode is called *optimization* mode. + + Under the hood, this function always calls + ``ZDICT_optimizeTrainFromBuffer_fastCover()``. See the corresponding C library + documentation for more. + + If neither ``steps`` nor ``threads`` is defined, defaults for ``d``, ``steps``, + and ``level`` will be used that are equivalent with what + ``ZDICT_trainFromBuffer()`` would use. + + + :param dict_size: + Target size in bytes of the dictionary to generate. + :param samples: + A list of bytes holding samples the dictionary will be trained from. + :param k: + Segment size : constraint: 0 < k : Reasonable range [16, 2048+] + :param d: + dmer size : constraint: 0 < d <= k : Reasonable range [6, 16] + :param f: + log of size of frequency array : constraint: 0 < f <= 31 : 1 means + default(20) + :param split_point: + Percentage of samples used for training: Only used for optimization. + The first # samples * ``split_point`` samples will be used to training. + The last # samples * (1 - split_point) samples will be used for testing. + 0 means default (0.75), 1.0 when all samples are used for both training + and testing. + :param accel: + Acceleration level: constraint: 0 < accel <= 10. Higher means faster + and less accurate, 0 means default(1). + :param dict_id: + Integer dictionary ID for the produced dictionary. Default is 0, which uses + a random value. + :param steps: + Number of steps through ``k`` values to perform when trying parameter + variations. + :param threads: + Number of threads to use when trying parameter variations. Default is 0, + which means to use a single thread. A negative value can be specified to + use as many threads as there are detected logical CPUs. + :param level: + Integer target compression level when trying parameter variations. + :param notifications: + Controls writing of informational messages to ``stderr``. ``0`` (the + default) means to write nothing. ``1`` writes errors. ``2`` writes + progression info. ``3`` writes more details. And ``4`` writes all info. + """ + + if not isinstance(samples, list): + raise TypeError("samples must be a list") + + if threads < 0: + threads = _cpu_count() + + if not steps and not threads: + d = d or 8 + steps = steps or 4 + level = level or 3 + + total_size = sum(map(len, samples)) + + samples_buffer = new_nonzero("char[]", total_size) + sample_sizes = new_nonzero("size_t[]", len(samples)) + + offset = 0 + for i, sample in enumerate(samples): + if not isinstance(sample, bytes): + raise ValueError("samples must be bytes") + + l = len(sample) + ffi.memmove(samples_buffer + offset, sample, l) + offset += l + sample_sizes[i] = l + + dict_data = new_nonzero("char[]", dict_size) + + dparams = ffi.new("ZDICT_fastCover_params_t *")[0] + dparams.k = k + dparams.d = d + dparams.f = f + dparams.steps = steps + dparams.nbThreads = threads + dparams.splitPoint = split_point + dparams.accel = accel + dparams.zParams.notificationLevel = notifications + dparams.zParams.dictID = dict_id + dparams.zParams.compressionLevel = level + + zresult = lib.ZDICT_optimizeTrainFromBuffer_fastCover( + ffi.addressof(dict_data), + dict_size, + ffi.addressof(samples_buffer), + ffi.addressof(sample_sizes, 0), + len(samples), + ffi.addressof(dparams), + ) + + if lib.ZDICT_isError(zresult): + msg = ffi.string(lib.ZDICT_getErrorName(zresult)).decode("utf-8") + raise ZstdError("cannot train dict: %s" % msg) + + return ZstdCompressionDict( + ffi.buffer(dict_data, zresult)[:], + dict_type=DICT_TYPE_FULLDICT, + k=dparams.k, + d=dparams.d, + ) + + +class ZstdDecompressionObj(object): + """A standard library API compatible decompressor. + + This type implements a compressor that conforms to the API by other + decompressors in Python's standard library. e.g. ``zlib.decompressobj`` + or ``bz2.BZ2Decompressor``. This allows callers to use zstd compression + while conforming to a similar API. + + Compressed data chunks are fed into ``decompress(data)`` and + uncompressed output (or an empty bytes) is returned. Output from + subsequent calls needs to be concatenated to reassemble the full + decompressed byte sequence. + + If ``read_across_frames=False``, each instance is single use: once an + input frame is decoded, ``decompress()`` will raise an exception. If + ``read_across_frames=True``, instances can decode multiple frames. + + >>> dctx = zstandard.ZstdDecompressor() + >>> dobj = dctx.decompressobj() + >>> data = dobj.decompress(compressed_chunk_0) + >>> data = dobj.decompress(compressed_chunk_1) + + By default, calls to ``decompress()`` write output data in chunks of size + ``DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE``. These chunks are concatenated + before being returned to the caller. It is possible to define the size of + these temporary chunks by passing ``write_size`` to ``decompressobj()``: + + >>> dctx = zstandard.ZstdDecompressor() + >>> dobj = dctx.decompressobj(write_size=1048576) + + .. note:: + + Because calls to ``decompress()`` may need to perform multiple + memory (re)allocations, this streaming decompression API isn't as + efficient as other APIs. + """ + + def __init__(self, decompressor, write_size, read_across_frames): + self._decompressor = decompressor + self._write_size = write_size + self._finished = False + self._read_across_frames = read_across_frames + self._unused_input = b"" + + def decompress(self, data): + """Send compressed data to the decompressor and obtain decompressed data. + + :param data: + Data to feed into the decompressor. + :return: + Decompressed bytes. + """ + if self._finished: + raise ZstdError("cannot use a decompressobj multiple times") + + in_buffer = ffi.new("ZSTD_inBuffer *") + out_buffer = ffi.new("ZSTD_outBuffer *") + + data_buffer = ffi.from_buffer(data) + + if len(data_buffer) == 0: + return b"" + + in_buffer.src = data_buffer + in_buffer.size = len(data_buffer) + in_buffer.pos = 0 + + dst_buffer = ffi.new("char[]", self._write_size) + out_buffer.dst = dst_buffer + out_buffer.size = len(dst_buffer) + out_buffer.pos = 0 + + chunks = [] + + while True: + zresult = lib.ZSTD_decompressStream( + self._decompressor._dctx, out_buffer, in_buffer + ) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "zstd decompressor error: %s" % _zstd_error(zresult) + ) + + # Always record any output from decompressor. + if out_buffer.pos: + chunks.append(ffi.buffer(out_buffer.dst, out_buffer.pos)[:]) + + # 0 is only seen when a frame is fully decoded *and* fully flushed. + # Behavior depends on whether we're in single or multiple frame + # mode. + if zresult == 0 and not self._read_across_frames: + # Mark the instance as done and make any unconsumed input available + # for retrieval. + self._finished = True + self._decompressor = None + self._unused_input = data[in_buffer.pos : in_buffer.size] + break + elif zresult == 0 and self._read_across_frames: + # We're at the end of a fully flushed frame and we can read more. + # Try to read more if there's any more input. + if in_buffer.pos == in_buffer.size: + break + else: + out_buffer.pos = 0 + + # We're not at the end of the frame *or* we're not fully flushed. + + # The decompressor will write out all the bytes it can to the output + # buffer. So if the output buffer is partially filled and the input + # is exhausted, there's nothing more to write. So we've done all we + # can. + elif ( + in_buffer.pos == in_buffer.size + and out_buffer.pos < out_buffer.size + ): + break + else: + out_buffer.pos = 0 + + return b"".join(chunks) + + def flush(self, length=0): + """Effectively a no-op. + + Implemented for compatibility with the standard library APIs. + + Safe to call at any time. + + :return: + Empty bytes. + """ + return b"" + + @property + def unused_data(self): + """Bytes past the end of compressed data. + + If ``decompress()`` is fed additional data beyond the end of a zstd + frame, this value will be non-empty once ``decompress()`` fully decodes + the input frame. + """ + return self._unused_input + + @property + def unconsumed_tail(self): + """Data that has not yet been fed into the decompressor.""" + return b"" + + @property + def eof(self): + """Whether the end of the compressed data stream has been reached.""" + return self._finished + + +class ZstdDecompressionReader(object): + """Read only decompressor that pull uncompressed data from another stream. + + This type provides a read-only stream interface for performing transparent + decompression from another stream or data source. It conforms to the + ``io.RawIOBase`` interface. Only methods relevant to reading are + implemented. + + >>> with open(path, 'rb') as fh: + >>> dctx = zstandard.ZstdDecompressor() + >>> reader = dctx.stream_reader(fh) + >>> while True: + ... chunk = reader.read(16384) + ... if not chunk: + ... break + ... # Do something with decompressed chunk. + + The stream can also be used as a context manager: + + >>> with open(path, 'rb') as fh: + ... dctx = zstandard.ZstdDecompressor() + ... with dctx.stream_reader(fh) as reader: + ... ... + + When used as a context manager, the stream is closed and the underlying + resources are released when the context manager exits. Future operations + against the stream will fail. + + The ``source`` argument to ``stream_reader()`` can be any object with a + ``read(size)`` method or any object implementing the *buffer protocol*. + + If the ``source`` is a stream, you can specify how large ``read()`` requests + to that stream should be via the ``read_size`` argument. It defaults to + ``zstandard.DECOMPRESSION_RECOMMENDED_INPUT_SIZE``.: + + >>> with open(path, 'rb') as fh: + ... dctx = zstandard.ZstdDecompressor() + ... # Will perform fh.read(8192) when obtaining data for the decompressor. + ... with dctx.stream_reader(fh, read_size=8192) as reader: + ... ... + + Instances are *partially* seekable. Absolute and relative positions + (``SEEK_SET`` and ``SEEK_CUR``) forward of the current position are + allowed. Offsets behind the current read position and offsets relative + to the end of stream are not allowed and will raise ``ValueError`` + if attempted. + + ``tell()`` returns the number of decompressed bytes read so far. + + Not all I/O methods are implemented. Notably missing is support for + ``readline()``, ``readlines()``, and linewise iteration support. This is + because streams operate on binary data - not text data. If you want to + convert decompressed output to text, you can chain an ``io.TextIOWrapper`` + to the stream: + + >>> with open(path, 'rb') as fh: + ... dctx = zstandard.ZstdDecompressor() + ... stream_reader = dctx.stream_reader(fh) + ... text_stream = io.TextIOWrapper(stream_reader, encoding='utf-8') + ... for line in text_stream: + ... ... + """ + + def __init__( + self, + decompressor, + source, + read_size, + read_across_frames, + closefd=True, + ): + self._decompressor = decompressor + self._source = source + self._read_size = read_size + self._read_across_frames = bool(read_across_frames) + self._closefd = bool(closefd) + self._entered = False + self._closed = False + self._bytes_decompressed = 0 + self._finished_input = False + self._finished_output = False + self._in_buffer = ffi.new("ZSTD_inBuffer *") + # Holds a ref to self._in_buffer.src. + self._source_buffer = None + + def __enter__(self): + if self._entered: + raise ValueError("cannot __enter__ multiple times") + + if self._closed: + raise ValueError("stream is closed") + + self._entered = True + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self._entered = False + self._decompressor = None + self.close() + self._source = None + + return False + + def readable(self): + return True + + def writable(self): + return False + + def seekable(self): + return False + + def readline(self, size=-1): + raise io.UnsupportedOperation() + + def readlines(self, hint=-1): + raise io.UnsupportedOperation() + + def write(self, data): + raise io.UnsupportedOperation() + + def writelines(self, lines): + raise io.UnsupportedOperation() + + def isatty(self): + return False + + def flush(self): + return None + + def close(self): + if self._closed: + return None + + self._closed = True + + f = getattr(self._source, "close", None) + if self._closefd and f: + f() + + @property + def closed(self): + return self._closed + + def tell(self): + return self._bytes_decompressed + + def readall(self): + chunks = [] + + while True: + chunk = self.read(1048576) + if not chunk: + break + + chunks.append(chunk) + + return b"".join(chunks) + + def __iter__(self): + raise io.UnsupportedOperation() + + def __next__(self): + raise io.UnsupportedOperation() + + next = __next__ + + def _read_input(self): + # We have data left over in the input buffer. Use it. + if self._in_buffer.pos < self._in_buffer.size: + return + + # All input data exhausted. Nothing to do. + if self._finished_input: + return + + # Else populate the input buffer from our source. + if hasattr(self._source, "read"): + data = self._source.read(self._read_size) + + if not data: + self._finished_input = True + return + + self._source_buffer = ffi.from_buffer(data) + self._in_buffer.src = self._source_buffer + self._in_buffer.size = len(self._source_buffer) + self._in_buffer.pos = 0 + else: + self._source_buffer = ffi.from_buffer(self._source) + self._in_buffer.src = self._source_buffer + self._in_buffer.size = len(self._source_buffer) + self._in_buffer.pos = 0 + + def _decompress_into_buffer(self, out_buffer): + """Decompress available input into an output buffer. + + Returns True if data in output buffer should be emitted. + """ + zresult = lib.ZSTD_decompressStream( + self._decompressor._dctx, out_buffer, self._in_buffer + ) + + if self._in_buffer.pos == self._in_buffer.size: + self._in_buffer.src = ffi.NULL + self._in_buffer.pos = 0 + self._in_buffer.size = 0 + self._source_buffer = None + + if not hasattr(self._source, "read"): + self._finished_input = True + + if lib.ZSTD_isError(zresult): + raise ZstdError("zstd decompress error: %s" % _zstd_error(zresult)) + + # Emit data if there is data AND either: + # a) output buffer is full (read amount is satisfied) + # b) we're at end of a frame and not in frame spanning mode + return out_buffer.pos and ( + out_buffer.pos == out_buffer.size + or zresult == 0 + and not self._read_across_frames + ) + + def read(self, size=-1): + if self._closed: + raise ValueError("stream is closed") + + if size < -1: + raise ValueError("cannot read negative amounts less than -1") + + if size == -1: + # This is recursive. But it gets the job done. + return self.readall() + + if self._finished_output or size == 0: + return b"" + + # We /could/ call into readinto() here. But that introduces more + # overhead. + dst_buffer = ffi.new("char[]", size) + out_buffer = ffi.new("ZSTD_outBuffer *") + out_buffer.dst = dst_buffer + out_buffer.size = size + out_buffer.pos = 0 + + self._read_input() + if self._decompress_into_buffer(out_buffer): + self._bytes_decompressed += out_buffer.pos + return ffi.buffer(out_buffer.dst, out_buffer.pos)[:] + + while not self._finished_input: + self._read_input() + if self._decompress_into_buffer(out_buffer): + self._bytes_decompressed += out_buffer.pos + return ffi.buffer(out_buffer.dst, out_buffer.pos)[:] + + self._bytes_decompressed += out_buffer.pos + return ffi.buffer(out_buffer.dst, out_buffer.pos)[:] + + def readinto(self, b): + if self._closed: + raise ValueError("stream is closed") + + if self._finished_output: + return 0 + + # TODO use writable=True once we require CFFI >= 1.12. + dest_buffer = ffi.from_buffer(b) + ffi.memmove(b, b"", 0) + out_buffer = ffi.new("ZSTD_outBuffer *") + out_buffer.dst = dest_buffer + out_buffer.size = len(dest_buffer) + out_buffer.pos = 0 + + self._read_input() + if self._decompress_into_buffer(out_buffer): + self._bytes_decompressed += out_buffer.pos + return out_buffer.pos + + while not self._finished_input: + self._read_input() + if self._decompress_into_buffer(out_buffer): + self._bytes_decompressed += out_buffer.pos + return out_buffer.pos + + self._bytes_decompressed += out_buffer.pos + return out_buffer.pos + + def read1(self, size=-1): + if self._closed: + raise ValueError("stream is closed") + + if size < -1: + raise ValueError("cannot read negative amounts less than -1") + + if self._finished_output or size == 0: + return b"" + + # -1 returns arbitrary number of bytes. + if size == -1: + size = DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE + + dst_buffer = ffi.new("char[]", size) + out_buffer = ffi.new("ZSTD_outBuffer *") + out_buffer.dst = dst_buffer + out_buffer.size = size + out_buffer.pos = 0 + + # read1() dictates that we can perform at most 1 call to underlying + # stream to get input. However, we can't satisfy this restriction with + # decompression because not all input generates output. So we allow + # multiple read(). But unlike read(), we stop once we have any output. + while not self._finished_input: + self._read_input() + self._decompress_into_buffer(out_buffer) + + if out_buffer.pos: + break + + self._bytes_decompressed += out_buffer.pos + return ffi.buffer(out_buffer.dst, out_buffer.pos)[:] + + def readinto1(self, b): + if self._closed: + raise ValueError("stream is closed") + + if self._finished_output: + return 0 + + # TODO use writable=True once we require CFFI >= 1.12. + dest_buffer = ffi.from_buffer(b) + ffi.memmove(b, b"", 0) + + out_buffer = ffi.new("ZSTD_outBuffer *") + out_buffer.dst = dest_buffer + out_buffer.size = len(dest_buffer) + out_buffer.pos = 0 + + while not self._finished_input and not self._finished_output: + self._read_input() + self._decompress_into_buffer(out_buffer) + + if out_buffer.pos: + break + + self._bytes_decompressed += out_buffer.pos + return out_buffer.pos + + def seek(self, pos, whence=os.SEEK_SET): + if self._closed: + raise ValueError("stream is closed") + + read_amount = 0 + + if whence == os.SEEK_SET: + if pos < 0: + raise OSError("cannot seek to negative position with SEEK_SET") + + if pos < self._bytes_decompressed: + raise OSError( + "cannot seek zstd decompression stream " "backwards" + ) + + read_amount = pos - self._bytes_decompressed + + elif whence == os.SEEK_CUR: + if pos < 0: + raise OSError( + "cannot seek zstd decompression stream " "backwards" + ) + + read_amount = pos + elif whence == os.SEEK_END: + raise OSError( + "zstd decompression streams cannot be seeked " "with SEEK_END" + ) + + while read_amount: + result = self.read( + min(read_amount, DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE) + ) + + if not result: + break + + read_amount -= len(result) + + return self._bytes_decompressed + + +class ZstdDecompressionWriter(object): + """ + Write-only stream wrapper that performs decompression. + + This type provides a writable stream that performs decompression and writes + decompressed data to another stream. + + This type implements the ``io.RawIOBase`` interface. Only methods that + involve writing will do useful things. + + Behavior is similar to :py:meth:`ZstdCompressor.stream_writer`: compressed + data is sent to the decompressor by calling ``write(data)`` and decompressed + output is written to the inner stream by calling its ``write(data)`` + method: + + >>> dctx = zstandard.ZstdDecompressor() + >>> decompressor = dctx.stream_writer(fh) + >>> # Will call fh.write() with uncompressed data. + >>> decompressor.write(compressed_data) + + Instances can be used as context managers. However, context managers add no + extra special behavior other than automatically calling ``close()`` when + they exit. + + Calling ``close()`` will mark the stream as closed and subsequent I/O + operations will raise ``ValueError`` (per the documented behavior of + ``io.RawIOBase``). ``close()`` will also call ``close()`` on the + underlying stream if such a method exists and the instance was created with + ``closefd=True``. + + The size of chunks to ``write()`` to the destination can be specified: + + >>> dctx = zstandard.ZstdDecompressor() + >>> with dctx.stream_writer(fh, write_size=16384) as decompressor: + >>> pass + + You can see how much memory is being used by the decompressor: + + >>> dctx = zstandard.ZstdDecompressor() + >>> with dctx.stream_writer(fh) as decompressor: + >>> byte_size = decompressor.memory_size() + + ``stream_writer()`` accepts a ``write_return_read`` boolean argument to control + the return value of ``write()``. When ``True`` (the default)``, ``write()`` + returns the number of bytes that were read from the input. When ``False``, + ``write()`` returns the number of bytes that were ``write()`` to the inner + stream. + """ + + def __init__( + self, + decompressor, + writer, + write_size, + write_return_read, + closefd=True, + ): + decompressor._ensure_dctx() + + self._decompressor = decompressor + self._writer = writer + self._write_size = write_size + self._write_return_read = bool(write_return_read) + self._closefd = bool(closefd) + self._entered = False + self._closing = False + self._closed = False + + def __enter__(self): + if self._closed: + raise ValueError("stream is closed") + + if self._entered: + raise ZstdError("cannot __enter__ multiple times") + + self._entered = True + + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self._entered = False + self.close() + + return False + + def __iter__(self): + raise io.UnsupportedOperation() + + def __next__(self): + raise io.UnsupportedOperation() + + def memory_size(self): + return lib.ZSTD_sizeof_DCtx(self._decompressor._dctx) + + def close(self): + if self._closed: + return + + try: + self._closing = True + self.flush() + finally: + self._closing = False + self._closed = True + + f = getattr(self._writer, "close", None) + if self._closefd and f: + f() + + @property + def closed(self): + return self._closed + + def fileno(self): + f = getattr(self._writer, "fileno", None) + if f: + return f() + else: + raise OSError("fileno not available on underlying writer") + + def flush(self): + if self._closed: + raise ValueError("stream is closed") + + f = getattr(self._writer, "flush", None) + if f and not self._closing: + return f() + + def isatty(self): + return False + + def readable(self): + return False + + def readline(self, size=-1): + raise io.UnsupportedOperation() + + def readlines(self, hint=-1): + raise io.UnsupportedOperation() + + def seek(self, offset, whence=None): + raise io.UnsupportedOperation() + + def seekable(self): + return False + + def tell(self): + raise io.UnsupportedOperation() + + def truncate(self, size=None): + raise io.UnsupportedOperation() + + def writable(self): + return True + + def writelines(self, lines): + raise io.UnsupportedOperation() + + def read(self, size=-1): + raise io.UnsupportedOperation() + + def readall(self): + raise io.UnsupportedOperation() + + def readinto(self, b): + raise io.UnsupportedOperation() + + def write(self, data): + if self._closed: + raise ValueError("stream is closed") + + total_write = 0 + + in_buffer = ffi.new("ZSTD_inBuffer *") + out_buffer = ffi.new("ZSTD_outBuffer *") + + data_buffer = ffi.from_buffer(data) + in_buffer.src = data_buffer + in_buffer.size = len(data_buffer) + in_buffer.pos = 0 + + dst_buffer = ffi.new("char[]", self._write_size) + out_buffer.dst = dst_buffer + out_buffer.size = len(dst_buffer) + out_buffer.pos = 0 + + dctx = self._decompressor._dctx + + while in_buffer.pos < in_buffer.size: + zresult = lib.ZSTD_decompressStream(dctx, out_buffer, in_buffer) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "zstd decompress error: %s" % _zstd_error(zresult) + ) + + if out_buffer.pos: + self._writer.write( + ffi.buffer(out_buffer.dst, out_buffer.pos)[:] + ) + total_write += out_buffer.pos + out_buffer.pos = 0 + + if self._write_return_read: + return in_buffer.pos + else: + return total_write + + +class ZstdDecompressor(object): + """ + Context for performing zstandard decompression. + + Each instance is essentially a wrapper around a ``ZSTD_DCtx`` from zstd's + C API. + + An instance can compress data various ways. Instances can be used multiple + times. + + The interface of this class is very similar to + :py:class:`zstandard.ZstdCompressor` (by design). + + Assume that each ``ZstdDecompressor`` instance can only handle a single + logical compression operation at the same time. i.e. if you call a method + like ``decompressobj()`` to obtain multiple objects derived from the same + ``ZstdDecompressor`` instance and attempt to use them simultaneously, errors + will likely occur. + + If you need to perform multiple logical decompression operations and you + can't guarantee those operations are temporally non-overlapping, you need + to obtain multiple ``ZstdDecompressor`` instances. + + Unless specified otherwise, assume that no two methods of + ``ZstdDecompressor`` instances can be called from multiple Python + threads simultaneously. In other words, assume instances are not thread safe + unless stated otherwise. + + :param dict_data: + Compression dictionary to use. + :param max_window_size: + Sets an upper limit on the window size for decompression operations in + kibibytes. This setting can be used to prevent large memory allocations + for inputs using large compression windows. + :param format: + Set the format of data for the decoder. + + By default this is ``zstandard.FORMAT_ZSTD1``. It can be set to + ``zstandard.FORMAT_ZSTD1_MAGICLESS`` to allow decoding frames without + the 4 byte magic header. Not all decompression APIs support this mode. + """ + + def __init__(self, dict_data=None, max_window_size=0, format=FORMAT_ZSTD1): + self._dict_data = dict_data + self._max_window_size = max_window_size + self._format = format + + dctx = lib.ZSTD_createDCtx() + if dctx == ffi.NULL: + raise MemoryError() + + self._dctx = dctx + + # Defer setting up garbage collection until full state is loaded so + # the memory size is more accurate. + try: + self._ensure_dctx() + finally: + self._dctx = ffi.gc( + dctx, lib.ZSTD_freeDCtx, size=lib.ZSTD_sizeof_DCtx(dctx) + ) + + def memory_size(self): + """Size of decompression context, in bytes. + + >>> dctx = zstandard.ZstdDecompressor() + >>> size = dctx.memory_size() + """ + return lib.ZSTD_sizeof_DCtx(self._dctx) + + def decompress( + self, + data, + max_output_size=0, + read_across_frames=False, + allow_extra_data=True, + ): + """ + Decompress data in a single operation. + + This method will decompress the input data in a single operation and + return the decompressed data. + + The input bytes are expected to contain at least 1 full Zstandard frame + (something compressed with :py:meth:`ZstdCompressor.compress` or + similar). If the input does not contain a full frame, an exception will + be raised. + + ``read_across_frames`` controls whether to read multiple zstandard + frames in the input. When False, decompression stops after reading the + first frame. This feature is not yet implemented but the argument is + provided for forward API compatibility when the default is changed to + True in a future release. For now, if you need to decompress multiple + frames, use an API like :py:meth:`ZstdCompressor.stream_reader` with + ``read_across_frames=True``. + + ``allow_extra_data`` controls how to handle extra input data after a + fully decoded frame. If False, any extra data (which could be a valid + zstd frame) will result in ``ZstdError`` being raised. If True, extra + data is silently ignored. The default will likely change to False in a + future release when ``read_across_frames`` defaults to True. + + If the input contains extra data after a full frame, that extra input + data is silently ignored. This behavior is undesirable in many scenarios + and will likely be changed or controllable in a future release (see + #181). + + If the frame header of the compressed data does not contain the content + size, ``max_output_size`` must be specified or ``ZstdError`` will be + raised. An allocation of size ``max_output_size`` will be performed and an + attempt will be made to perform decompression into that buffer. If the + buffer is too small or cannot be allocated, ``ZstdError`` will be + raised. The buffer will be resized if it is too large. + + Uncompressed data could be much larger than compressed data. As a result, + calling this function could result in a very large memory allocation + being performed to hold the uncompressed data. This could potentially + result in ``MemoryError`` or system memory swapping. If you don't need + the full output data in a single contiguous array in memory, consider + using streaming decompression for more resilient memory behavior. + + Usage: + + >>> dctx = zstandard.ZstdDecompressor() + >>> decompressed = dctx.decompress(data) + + If the compressed data doesn't have its content size embedded within it, + decompression can be attempted by specifying the ``max_output_size`` + argument: + + >>> dctx = zstandard.ZstdDecompressor() + >>> uncompressed = dctx.decompress(data, max_output_size=1048576) + + Ideally, ``max_output_size`` will be identical to the decompressed + output size. + + .. important:: + + If the exact size of decompressed data is unknown (not passed in + explicitly and not stored in the zstd frame), for performance + reasons it is encouraged to use a streaming API. + + :param data: + Compressed data to decompress. + :param max_output_size: + Integer max size of response. + + If ``0``, there is no limit and we can attempt to allocate an output + buffer of infinite size. + :return: + ``bytes`` representing decompressed output. + """ + + if read_across_frames: + raise ZstdError( + "ZstdDecompressor.read_across_frames=True is not yet implemented" + ) + + self._ensure_dctx() + + data_buffer = ffi.from_buffer(data) + + output_size = lib.ZSTD_getFrameContentSize( + data_buffer, len(data_buffer) + ) + + if output_size == lib.ZSTD_CONTENTSIZE_ERROR: + raise ZstdError("error determining content size from frame header") + elif output_size == 0: + return b"" + elif output_size == lib.ZSTD_CONTENTSIZE_UNKNOWN: + if not max_output_size: + raise ZstdError( + "could not determine content size in frame header" + ) + + result_buffer = ffi.new("char[]", max_output_size) + result_size = max_output_size + output_size = 0 + else: + result_buffer = ffi.new("char[]", output_size) + result_size = output_size + + out_buffer = ffi.new("ZSTD_outBuffer *") + out_buffer.dst = result_buffer + out_buffer.size = result_size + out_buffer.pos = 0 + + in_buffer = ffi.new("ZSTD_inBuffer *") + in_buffer.src = data_buffer + in_buffer.size = len(data_buffer) + in_buffer.pos = 0 + + zresult = lib.ZSTD_decompressStream(self._dctx, out_buffer, in_buffer) + if lib.ZSTD_isError(zresult): + raise ZstdError("decompression error: %s" % _zstd_error(zresult)) + elif zresult: + raise ZstdError( + "decompression error: did not decompress full frame" + ) + elif output_size and out_buffer.pos != output_size: + raise ZstdError( + "decompression error: decompressed %d bytes; expected %d" + % (zresult, output_size) + ) + elif not allow_extra_data and in_buffer.pos < in_buffer.size: + count = in_buffer.size - in_buffer.pos + + raise ZstdError( + "compressed input contains %d bytes of unused data, which is disallowed" + % count + ) + + return ffi.buffer(result_buffer, out_buffer.pos)[:] + + def stream_reader( + self, + source, + read_size=DECOMPRESSION_RECOMMENDED_INPUT_SIZE, + read_across_frames=False, + closefd=True, + ): + """ + Read-only stream wrapper that performs decompression. + + This method obtains an object that conforms to the ``io.RawIOBase`` + interface and performs transparent decompression via ``read()`` + operations. Source data is obtained by calling ``read()`` on a + source stream or object implementing the buffer protocol. + + See :py:class:`zstandard.ZstdDecompressionReader` for more documentation + and usage examples. + + :param source: + Source of compressed data to decompress. Can be any object + with a ``read(size)`` method or that conforms to the buffer protocol. + :param read_size: + Integer number of bytes to read from the source and feed into the + compressor at a time. + :param read_across_frames: + Whether to read data across multiple zstd frames. If False, + decompression is stopped at frame boundaries. + :param closefd: + Whether to close the source stream when this instance is closed. + :return: + :py:class:`zstandard.ZstdDecompressionReader`. + """ + self._ensure_dctx() + return ZstdDecompressionReader( + self, source, read_size, read_across_frames, closefd=closefd + ) + + def decompressobj( + self, + write_size=DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE, + read_across_frames=False, + ): + """Obtain a standard library compatible incremental decompressor. + + See :py:class:`ZstdDecompressionObj` for more documentation + and usage examples. + + :param write_size: size of internal output buffer to collect decompressed + chunks in. + :param read_across_frames: whether to read across multiple zstd frames. + If False, reading stops after 1 frame and subsequent decompress + attempts will raise an exception. + :return: + :py:class:`zstandard.ZstdDecompressionObj` + """ + if write_size < 1: + raise ValueError("write_size must be positive") + + self._ensure_dctx() + return ZstdDecompressionObj( + self, write_size=write_size, read_across_frames=read_across_frames + ) + + def read_to_iter( + self, + reader, + read_size=DECOMPRESSION_RECOMMENDED_INPUT_SIZE, + write_size=DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE, + skip_bytes=0, + ): + """Read compressed data to an iterator of uncompressed chunks. + + This method will read data from ``reader``, feed it to a decompressor, + and emit ``bytes`` chunks representing the decompressed result. + + >>> dctx = zstandard.ZstdDecompressor() + >>> for chunk in dctx.read_to_iter(fh): + ... # Do something with original data. + + ``read_to_iter()`` accepts an object with a ``read(size)`` method that + will return compressed bytes or an object conforming to the buffer + protocol. + + ``read_to_iter()`` returns an iterator whose elements are chunks of the + decompressed data. + + The size of requested ``read()`` from the source can be specified: + + >>> dctx = zstandard.ZstdDecompressor() + >>> for chunk in dctx.read_to_iter(fh, read_size=16384): + ... pass + + It is also possible to skip leading bytes in the input data: + + >>> dctx = zstandard.ZstdDecompressor() + >>> for chunk in dctx.read_to_iter(fh, skip_bytes=1): + ... pass + + .. tip:: + + Skipping leading bytes is useful if the source data contains extra + *header* data. Traditionally, you would need to create a slice or + ``memoryview`` of the data you want to decompress. This would create + overhead. It is more efficient to pass the offset into this API. + + Similarly to :py:meth:`ZstdCompressor.read_to_iter`, the consumer of the + iterator controls when data is decompressed. If the iterator isn't consumed, + decompression is put on hold. + + When ``read_to_iter()`` is passed an object conforming to the buffer protocol, + the behavior may seem similar to what occurs when the simple decompression + API is used. However, this API works when the decompressed size is unknown. + Furthermore, if feeding large inputs, the decompressor will work in chunks + instead of performing a single operation. + + :param reader: + Source of compressed data. Can be any object with a + ``read(size)`` method or any object conforming to the buffer + protocol. + :param read_size: + Integer size of data chunks to read from ``reader`` and feed into + the decompressor. + :param write_size: + Integer size of data chunks to emit from iterator. + :param skip_bytes: + Integer number of bytes to skip over before sending data into + the decompressor. + :return: + Iterator of ``bytes`` representing uncompressed data. + """ + + if skip_bytes >= read_size: + raise ValueError("skip_bytes must be smaller than read_size") + + if hasattr(reader, "read"): + have_read = True + elif hasattr(reader, "__getitem__"): + have_read = False + buffer_offset = 0 + size = len(reader) + else: + raise ValueError( + "must pass an object with a read() method or " + "conforms to buffer protocol" + ) + + if skip_bytes: + if have_read: + reader.read(skip_bytes) + else: + if skip_bytes > size: + raise ValueError("skip_bytes larger than first input chunk") + + buffer_offset = skip_bytes + + self._ensure_dctx() + + in_buffer = ffi.new("ZSTD_inBuffer *") + out_buffer = ffi.new("ZSTD_outBuffer *") + + dst_buffer = ffi.new("char[]", write_size) + out_buffer.dst = dst_buffer + out_buffer.size = len(dst_buffer) + out_buffer.pos = 0 + + while True: + assert out_buffer.pos == 0 + + if have_read: + read_result = reader.read(read_size) + else: + remaining = size - buffer_offset + slice_size = min(remaining, read_size) + read_result = reader[buffer_offset : buffer_offset + slice_size] + buffer_offset += slice_size + + # No new input. Break out of read loop. + if not read_result: + break + + # Feed all read data into decompressor and emit output until + # exhausted. + read_buffer = ffi.from_buffer(read_result) + in_buffer.src = read_buffer + in_buffer.size = len(read_buffer) + in_buffer.pos = 0 + + while in_buffer.pos < in_buffer.size: + assert out_buffer.pos == 0 + + zresult = lib.ZSTD_decompressStream( + self._dctx, out_buffer, in_buffer + ) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "zstd decompress error: %s" % _zstd_error(zresult) + ) + + if out_buffer.pos: + data = ffi.buffer(out_buffer.dst, out_buffer.pos)[:] + out_buffer.pos = 0 + yield data + + if zresult == 0: + return + + # Repeat loop to collect more input data. + continue + + # If we get here, input is exhausted. + + def stream_writer( + self, + writer, + write_size=DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE, + write_return_read=True, + closefd=True, + ): + """ + Push-based stream wrapper that performs decompression. + + This method constructs a stream wrapper that conforms to the + ``io.RawIOBase`` interface and performs transparent decompression + when writing to a wrapper stream. + + See :py:class:`zstandard.ZstdDecompressionWriter` for more documentation + and usage examples. + + :param writer: + Destination for decompressed output. Can be any object with a + ``write(data)``. + :param write_size: + Integer size of chunks to ``write()`` to ``writer``. + :param write_return_read: + Whether ``write()`` should return the number of bytes of input + consumed. If False, ``write()`` returns the number of bytes sent + to the inner stream. + :param closefd: + Whether to ``close()`` the inner stream when this stream is closed. + :return: + :py:class:`zstandard.ZstdDecompressionWriter` + """ + if not hasattr(writer, "write"): + raise ValueError("must pass an object with a write() method") + + return ZstdDecompressionWriter( + self, + writer, + write_size, + write_return_read, + closefd=closefd, + ) + + def copy_stream( + self, + ifh, + ofh, + read_size=DECOMPRESSION_RECOMMENDED_INPUT_SIZE, + write_size=DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE, + ): + """ + Copy data between streams, decompressing in the process. + + Compressed data will be read from ``ifh``, decompressed, and written + to ``ofh``. + + >>> dctx = zstandard.ZstdDecompressor() + >>> dctx.copy_stream(ifh, ofh) + + e.g. to decompress a file to another file: + + >>> dctx = zstandard.ZstdDecompressor() + >>> with open(input_path, 'rb') as ifh, open(output_path, 'wb') as ofh: + ... dctx.copy_stream(ifh, ofh) + + The size of chunks being ``read()`` and ``write()`` from and to the + streams can be specified: + + >>> dctx = zstandard.ZstdDecompressor() + >>> dctx.copy_stream(ifh, ofh, read_size=8192, write_size=16384) + + :param ifh: + Source stream to read compressed data from. + + Must have a ``read()`` method. + :param ofh: + Destination stream to write uncompressed data to. + + Must have a ``write()`` method. + :param read_size: + The number of bytes to ``read()`` from the source in a single + operation. + :param write_size: + The number of bytes to ``write()`` to the destination in a single + operation. + :return: + 2-tuple of integers representing the number of bytes read and + written, respectively. + """ + + if not hasattr(ifh, "read"): + raise ValueError("first argument must have a read() method") + if not hasattr(ofh, "write"): + raise ValueError("second argument must have a write() method") + + self._ensure_dctx() + + in_buffer = ffi.new("ZSTD_inBuffer *") + out_buffer = ffi.new("ZSTD_outBuffer *") + + dst_buffer = ffi.new("char[]", write_size) + out_buffer.dst = dst_buffer + out_buffer.size = write_size + out_buffer.pos = 0 + + total_read, total_write = 0, 0 + + # Read all available input. + while True: + data = ifh.read(read_size) + if not data: + break + + data_buffer = ffi.from_buffer(data) + total_read += len(data_buffer) + in_buffer.src = data_buffer + in_buffer.size = len(data_buffer) + in_buffer.pos = 0 + + # Flush all read data to output. + while in_buffer.pos < in_buffer.size: + zresult = lib.ZSTD_decompressStream( + self._dctx, out_buffer, in_buffer + ) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "zstd decompressor error: %s" % _zstd_error(zresult) + ) + + if out_buffer.pos: + ofh.write(ffi.buffer(out_buffer.dst, out_buffer.pos)) + total_write += out_buffer.pos + out_buffer.pos = 0 + + # Continue loop to keep reading. + + return total_read, total_write + + def decompress_content_dict_chain(self, frames): + """ + Decompress a series of frames using the content dictionary chaining technique. + + Such a list of frames is produced by compressing discrete inputs where + each non-initial input is compressed with a *prefix* dictionary consisting + of the content of the previous input. + + For example, say you have the following inputs: + + >>> inputs = [b"input 1", b"input 2", b"input 3"] + + The zstd frame chain consists of: + + 1. ``b"input 1"`` compressed in standalone/discrete mode + 2. ``b"input 2"`` compressed using ``b"input 1"`` as a *prefix* dictionary + 3. ``b"input 3"`` compressed using ``b"input 2"`` as a *prefix* dictionary + + Each zstd frame **must** have the content size written. + + The following Python code can be used to produce a *prefix dictionary chain*: + + >>> def make_chain(inputs): + ... frames = [] + ... + ... # First frame is compressed in standalone/discrete mode. + ... zctx = zstandard.ZstdCompressor() + ... frames.append(zctx.compress(inputs[0])) + ... + ... # Subsequent frames use the previous fulltext as a prefix dictionary + ... for i, raw in enumerate(inputs[1:]): + ... dict_data = zstandard.ZstdCompressionDict( + ... inputs[i], dict_type=zstandard.DICT_TYPE_RAWCONTENT) + ... zctx = zstandard.ZstdCompressor(dict_data=dict_data) + ... frames.append(zctx.compress(raw)) + ... + ... return frames + + ``decompress_content_dict_chain()`` returns the uncompressed data of the last + element in the input chain. + + .. note:: + + It is possible to implement *prefix dictionary chain* decompression + on top of other APIs. However, this function will likely be faster - + especially for long input chains - as it avoids the overhead of + instantiating and passing around intermediate objects between + multiple functions. + + :param frames: + List of ``bytes`` holding compressed zstd frames. + :return: + """ + if not isinstance(frames, list): + raise TypeError("argument must be a list") + + if not frames: + raise ValueError("empty input chain") + + # First chunk should not be using a dictionary. We handle it specially. + chunk = frames[0] + if not isinstance(chunk, bytes): + raise ValueError("chunk 0 must be bytes") + + # All chunks should be zstd frames and should have content size set. + chunk_buffer = ffi.from_buffer(chunk) + params = ffi.new("ZSTD_frameHeader *") + zresult = lib.ZSTD_getFrameHeader( + params, chunk_buffer, len(chunk_buffer) + ) + if lib.ZSTD_isError(zresult): + raise ValueError("chunk 0 is not a valid zstd frame") + elif zresult: + raise ValueError("chunk 0 is too small to contain a zstd frame") + + if params.frameContentSize == lib.ZSTD_CONTENTSIZE_UNKNOWN: + raise ValueError("chunk 0 missing content size in frame") + + self._ensure_dctx(load_dict=False) + + last_buffer = ffi.new("char[]", params.frameContentSize) + + out_buffer = ffi.new("ZSTD_outBuffer *") + out_buffer.dst = last_buffer + out_buffer.size = len(last_buffer) + out_buffer.pos = 0 + + in_buffer = ffi.new("ZSTD_inBuffer *") + in_buffer.src = chunk_buffer + in_buffer.size = len(chunk_buffer) + in_buffer.pos = 0 + + zresult = lib.ZSTD_decompressStream(self._dctx, out_buffer, in_buffer) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "could not decompress chunk 0: %s" % _zstd_error(zresult) + ) + elif zresult: + raise ZstdError("chunk 0 did not decompress full frame") + + # Special case of chain length of 1 + if len(frames) == 1: + return ffi.buffer(last_buffer, len(last_buffer))[:] + + i = 1 + while i < len(frames): + chunk = frames[i] + if not isinstance(chunk, bytes): + raise ValueError("chunk %d must be bytes" % i) + + chunk_buffer = ffi.from_buffer(chunk) + zresult = lib.ZSTD_getFrameHeader( + params, chunk_buffer, len(chunk_buffer) + ) + if lib.ZSTD_isError(zresult): + raise ValueError("chunk %d is not a valid zstd frame" % i) + elif zresult: + raise ValueError( + "chunk %d is too small to contain a zstd frame" % i + ) + + if params.frameContentSize == lib.ZSTD_CONTENTSIZE_UNKNOWN: + raise ValueError("chunk %d missing content size in frame" % i) + + dest_buffer = ffi.new("char[]", params.frameContentSize) + + out_buffer.dst = dest_buffer + out_buffer.size = len(dest_buffer) + out_buffer.pos = 0 + + in_buffer.src = chunk_buffer + in_buffer.size = len(chunk_buffer) + in_buffer.pos = 0 + + zresult = lib.ZSTD_decompressStream( + self._dctx, out_buffer, in_buffer + ) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "could not decompress chunk %d: %s" % _zstd_error(zresult) + ) + elif zresult: + raise ZstdError("chunk %d did not decompress full frame" % i) + + last_buffer = dest_buffer + i += 1 + + return ffi.buffer(last_buffer, len(last_buffer))[:] + + def multi_decompress_to_buffer( + self, frames, decompressed_sizes=None, threads=0 + ): + """ + Decompress multiple zstd frames to output buffers as a single operation. + + (Experimental. Not available in CFFI backend.) + + Compressed frames can be passed to the function as a + ``BufferWithSegments``, a ``BufferWithSegmentsCollection``, or as a + list containing objects that conform to the buffer protocol. For best + performance, pass a ``BufferWithSegmentsCollection`` or a + ``BufferWithSegments``, as minimal input validation will be done for + that type. If calling from Python (as opposed to C), constructing one + of these instances may add overhead cancelling out the performance + overhead of validation for list inputs. + + Returns a ``BufferWithSegmentsCollection`` containing the decompressed + data. All decompressed data is allocated in a single memory buffer. The + ``BufferWithSegments`` instance tracks which objects are at which offsets + and their respective lengths. + + >>> dctx = zstandard.ZstdDecompressor() + >>> results = dctx.multi_decompress_to_buffer([b'...', b'...']) + + The decompressed size of each frame MUST be discoverable. It can either be + embedded within the zstd frame or passed in via the ``decompressed_sizes`` + argument. + + The ``decompressed_sizes`` argument is an object conforming to the buffer + protocol which holds an array of 64-bit unsigned integers in the machine's + native format defining the decompressed sizes of each frame. If this argument + is passed, it avoids having to scan each frame for its decompressed size. + This frame scanning can add noticeable overhead in some scenarios. + + >>> frames = [...] + >>> sizes = struct.pack('=QQQQ', len0, len1, len2, len3) + >>> + >>> dctx = zstandard.ZstdDecompressor() + >>> results = dctx.multi_decompress_to_buffer(frames, decompressed_sizes=sizes) + + .. note:: + + It is possible to pass a ``mmap.mmap()`` instance into this function by + wrapping it with a ``BufferWithSegments`` instance (which will define the + offsets of frames within the memory mapped region). + + This function is logically equivalent to performing + :py:meth:`ZstdCompressor.decompress` on each input frame and returning the + result. + + This function exists to perform decompression on multiple frames as fast + as possible by having as little overhead as possible. Since decompression is + performed as a single operation and since the decompressed output is stored in + a single buffer, extra memory allocations, Python objects, and Python function + calls are avoided. This is ideal for scenarios where callers know up front that + they need to access data for multiple frames, such as when *delta chains* are + being used. + + Currently, the implementation always spawns multiple threads when requested, + even if the amount of work to do is small. In the future, it will be smarter + about avoiding threads and their associated overhead when the amount of + work to do is small. + + :param frames: + Source defining zstd frames to decompress. + :param decompressed_sizes: + Array of integers representing sizes of decompressed zstd frames. + :param threads: + How many threads to use for decompression operations. + + Negative values will use the same number of threads as logical CPUs + on the machine. Values ``0`` or ``1`` use a single thread. + :return: + ``BufferWithSegmentsCollection`` + """ + raise NotImplementedError() + + def _ensure_dctx(self, load_dict=True): + lib.ZSTD_DCtx_reset(self._dctx, lib.ZSTD_reset_session_only) + + if self._max_window_size: + zresult = lib.ZSTD_DCtx_setMaxWindowSize( + self._dctx, self._max_window_size + ) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "unable to set max window size: %s" % _zstd_error(zresult) + ) + + zresult = lib.ZSTD_DCtx_setParameter( + self._dctx, lib.ZSTD_d_format, self._format + ) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "unable to set decoding format: %s" % _zstd_error(zresult) + ) + + if self._dict_data and load_dict: + zresult = lib.ZSTD_DCtx_refDDict(self._dctx, self._dict_data._ddict) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "unable to reference prepared dictionary: %s" + % _zstd_error(zresult) + ) diff --git a/env-llmeval/lib/python3.10/site-packages/zstandard/py.typed b/env-llmeval/lib/python3.10/site-packages/zstandard/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391