diff --git a/llmeval-env/lib/python3.10/site-packages/nltk-3.8.1.dist-info/AUTHORS.md b/llmeval-env/lib/python3.10/site-packages/nltk-3.8.1.dist-info/AUTHORS.md new file mode 100644 index 0000000000000000000000000000000000000000..baa8504e2036be53e3fcaaaa4f2ea76d12773322 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk-3.8.1.dist-info/AUTHORS.md @@ -0,0 +1,313 @@ +# Natural Language Toolkit (NLTK) Authors + +## Original Authors + +- Steven Bird +- Edward Loper +- Ewan Klein + +## Contributors + +- Tom Aarsen +- Rami Al-Rfou' +- Mark Amery +- Greg Aumann +- Ivan Barria +- Ingolf Becker +- Yonatan Becker +- Paul Bedaride +- Steven Bethard +- Robert Berwick +- Dan Blanchard +- Nathan Bodenstab +- Alexander Böhm +- Francis Bond +- Paul Bone +- Jordan Boyd-Graber +- Daniel Blanchard +- Phil Blunsom +- Lars Buitinck +- Cristian Capdevila +- Steve Cassidy +- Chen-Fu Chiang +- Dmitry Chichkov +- Jinyoung Choi +- Andrew Clausen +- Lucas Champollion +- Graham Christensen +- Trevor Cohn +- David Coles +- Tom Conroy +- Claude Coulombe +- Lucas Cooper +- Robin Cooper +- Chris Crowner +- James Curran +- Arthur Darcet +- Dariel Dato-on +- Selina Dennis +- Leon Derczynski +- Alexis Dimitriadis +- Nikhil Dinesh +- Liang Dong +- David Doukhan +- Rebecca Dridan +- Pablo Duboue +- Long Duong +- Christian Federmann +- Campion Fellin +- Michelle Fullwood +- Dan Garrette +- Maciej Gawinecki +- Jean Mark Gawron +- Sumukh Ghodke +- Yoav Goldberg +- Michael Wayne Goodman +- Dougal Graham +- Brent Gray +- Simon Greenhill +- Clark Grubb +- Eduardo Pereira Habkost +- Masato Hagiwara +- Lauri Hallila +- Michael Hansen +- Yurie Hara +- Will Hardy +- Tyler Hartley +- Peter Hawkins +- Saimadhav Heblikar +- Fredrik Hedman +- Helder +- Michael Heilman +- Ofer Helman +- Christopher Hench +- Bruce Hill +- Amy Holland +- Kristy Hollingshead +- Marcus Huderle +- Baden Hughes +- Nancy Ide +- Rebecca Ingram +- Edward Ivanovic +- Thomas Jakobsen +- Nick Johnson +- Eric Kafe +- Piotr Kasprzyk +- Angelos Katharopoulos +- Sudharshan Kaushik +- Chris Koenig +- Mikhail Korobov +- Denis Krusko +- Ilia Kurenkov +- Stefano Lattarini +- Pierre-François Laquerre +- Stefano Lattarini +- Haejoong Lee +- Jackson Lee +- Max Leonov +- Chris Liechti +- Hyuckin David Lim +- Tom Lippincott +- Peter Ljunglöf +- Alex Louden +- Joseph Lynch +- Nitin Madnani +- Felipe Madrigal +- Bjørn Mæland +- Dean Malmgren +- Christopher Maloof +- Rob Malouf +- Iker Manterola +- Carl de Marcken +- Mitch Marcus +- Torsten Marek +- Robert Marshall +- Marius Mather +- Duncan McGreggor +- David McClosky +- Xinfan Meng +- Dmitrijs Milajevs +- Margaret Mitchell +- Tomonori Nagano +- Jason Narad +- Shari A’aidil Nasruddin +- Lance Nathan +- Morten Neergaard +- David Nemeskey +- Eric Nichols +- Joel Nothman +- Alireza Nourian +- Alexander Oleynikov +- Pierpaolo Pantone +- Ted Pedersen +- Jacob Perkins +- Alberto Planas +- Ondrej Platek +- Alessandro Presta +- Qi Liu +- Martin Thorsen Ranang +- Michael Recachinas +- Brandon Rhodes +- Joshua Ritterman +- Will Roberts +- Stuart Robinson +- Carlos Rodriguez +- Lorenzo Rubio +- Alex Rudnick +- Jussi Salmela +- Geoffrey Sampson +- Kepa Sarasola +- Kevin Scannell +- Nathan Schneider +- Rico Sennrich +- Thomas Skardal +- Eric Smith +- Lynn Soe +- Rob Speer +- Peter Spiller +- Richard Sproat +- Ceri Stagg +- Peter Stahl +- Oliver Steele +- Thomas Stieglmaier +- Jan Strunk +- Liling Tan +- Claire Taylor +- Louis Tiao +- Steven Tomcavage +- Tiago Tresoldi +- Marcus Uneson +- Yu Usami +- Petro Verkhogliad +- Peter Wang +- Zhe Wang +- Charlotte Wilson +- Chuck Wooters +- Steven Xu +- Beracah Yankama +- Lei Ye (叶磊) +- Patrick Ye +- Geraldine Sim Wei Ying +- Jason Yoder +- Thomas Zieglier +- 0ssifrage +- ducki13 +- kiwipi +- lade +- isnowfy +- onesandzeros +- pquentin +- wvanlint +- Álvaro Justen +- bjut-hz +- Sergio Oller +- Will Monroe +- Elijah Rippeth +- Emil Manukyan +- Casper Lehmann-Strøm +- Andrew Giel +- Tanin Na Nakorn +- Linghao Zhang +- Colin Carroll +- Heguang Miao +- Hannah Aizenman (story645) +- George Berry +- Adam Nelson +- J Richard Snape +- Alex Constantin +- Tsolak Ghukasyan +- Prasasto Adi +- Safwan Kamarrudin +- Arthur Tilley +- Vilhjalmur Thorsteinsson +- Jaehoon Hwang +- Chintan Shah +- sbagan +- Zicheng Xu +- Albert Au Yeung +- Shenjian Zhao +- Deng Wang +- Ali Abdullah +- Stoytcho Stoytchev +- Lakhdar Benzahia +- Kheireddine Abainia +- Yibin Lin +- Artiem Krinitsyn +- Björn Mattsson +- Oleg Chislov +- Pavan Gururaj Joshi +- Ethan Hill +- Vivek Lakshmanan +- Somnath Rakshit +- Anlan Du +- Pulkit Maloo +- Brandon M. Burroughs +- John Stewart +- Iaroslav Tymchenko +- Aleš Tamchyna +- Tim Gianitsos +- Philippe Partarrieu +- Andrew Owen Martin +- Adrian Ellis +- Nat Quayle Nelson +- Yanpeng Zhao +- Matan Rak +- Nick Ulle +- Uday Krishna +- Osman Zubair +- Viresh Gupta +- Ondřej Cífka +- Iris X. Zhou +- Devashish Lal +- Gerhard Kremer +- Nicolas Darr +- Hervé Nicol +- Alexandre H. T. Dias +- Daksh Shah +- Jacob Weightman +- Bonifacio de Oliveira +- Armins Bagrats Stepanjans +- Vassilis Palassopoulos +- Ram Rachum +- Or Sharir +- Denali Molitor +- Jacob Moorman +- Cory Nezin +- Matt Chaput +- Danny Sepler +- Akshita Bhagia +- Pratap Yadav +- Hiroki Teranishi +- Ruben Cartuyvels +- Dalton Pearson +- Robby Horvath +- Gavish Poddar +- Saibo Geng +- Ahmet Yildirim +- Yuta Nakamura +- Adam Hawley +- Panagiotis Simakis +- Richard Wang +- Alexandre Perez-Lebel +- Fernando Carranza +- Martin Kondratzky +- Heungson Lee +- M.K. Pawelkiewicz +- Steven Thomas Smith +- Jan Lennartz + +## Others whose work we've taken and included in NLTK, but who didn't directly contribute it: + +### Contributors to the Porter Stemmer + +- Martin Porter +- Vivake Gupta +- Barry Wilkins +- Hiranmay Ghosh +- Chris Emerson + +### Authors of snowball arabic stemmer algorithm + +- Assem Chelli +- Abdelkrim Aries +- Lakhdar Benzahia diff --git a/llmeval-env/lib/python3.10/site-packages/nltk-3.8.1.dist-info/INSTALLER b/llmeval-env/lib/python3.10/site-packages/nltk-3.8.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk-3.8.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/llmeval-env/lib/python3.10/site-packages/nltk-3.8.1.dist-info/LICENSE.txt b/llmeval-env/lib/python3.10/site-packages/nltk-3.8.1.dist-info/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..75b52484ea471f882c29e02693b4f02dba175b5e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk-3.8.1.dist-info/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/llmeval-env/lib/python3.10/site-packages/nltk-3.8.1.dist-info/METADATA b/llmeval-env/lib/python3.10/site-packages/nltk-3.8.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..cad1d0d46a462a56f88ade0772b96d12ba0e41e8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk-3.8.1.dist-info/METADATA @@ -0,0 +1,67 @@ +Metadata-Version: 2.1 +Name: nltk +Version: 3.8.1 +Summary: Natural Language Toolkit +Home-page: https://www.nltk.org/ +Author: NLTK Team +Author-email: nltk.team@gmail.com +Maintainer: NLTK Team +Maintainer-email: nltk.team@gmail.com +License: Apache License, Version 2.0 +Project-URL: Documentation, https://www.nltk.org/ +Project-URL: Source Code, https://github.com/nltk/nltk +Project-URL: Issue Tracker, https://github.com/nltk/nltk/issues +Keywords: NLP,CL,natural language processing,computational linguistics,parsing,tagging,tokenizing,syntax,linguistics,language,natural language,text analytics +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Education +Classifier: Intended Audience :: Information Technology +Classifier: Intended Audience :: Science/Research +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Topic :: Scientific/Engineering +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Classifier: Topic :: Scientific/Engineering :: Human Machine Interfaces +Classifier: Topic :: Scientific/Engineering :: Information Analysis +Classifier: Topic :: Text Processing +Classifier: Topic :: Text Processing :: Filters +Classifier: Topic :: Text Processing :: General +Classifier: Topic :: Text Processing :: Indexing +Classifier: Topic :: Text Processing :: Linguistic +Requires-Python: >=3.7 +Requires-Dist: click +Requires-Dist: joblib +Requires-Dist: regex (>=2021.8.3) +Requires-Dist: tqdm +Provides-Extra: all +Requires-Dist: scikit-learn ; extra == 'all' +Requires-Dist: python-crfsuite ; extra == 'all' +Requires-Dist: requests ; extra == 'all' +Requires-Dist: numpy ; extra == 'all' +Requires-Dist: pyparsing ; extra == 'all' +Requires-Dist: twython ; extra == 'all' +Requires-Dist: scipy ; extra == 'all' +Requires-Dist: matplotlib ; extra == 'all' +Provides-Extra: corenlp +Requires-Dist: requests ; extra == 'corenlp' +Provides-Extra: machine_learning +Requires-Dist: numpy ; extra == 'machine_learning' +Requires-Dist: python-crfsuite ; extra == 'machine_learning' +Requires-Dist: scikit-learn ; extra == 'machine_learning' +Requires-Dist: scipy ; extra == 'machine_learning' +Provides-Extra: plot +Requires-Dist: matplotlib ; extra == 'plot' +Provides-Extra: tgrep +Requires-Dist: pyparsing ; extra == 'tgrep' +Provides-Extra: twitter +Requires-Dist: twython ; extra == 'twitter' + +The Natural Language Toolkit (NLTK) is a Python package for +natural language processing. NLTK requires Python 3.7, 3.8, 3.9, 3.10 or 3.11. + diff --git a/llmeval-env/lib/python3.10/site-packages/nltk-3.8.1.dist-info/README.md b/llmeval-env/lib/python3.10/site-packages/nltk-3.8.1.dist-info/README.md new file mode 100644 index 0000000000000000000000000000000000000000..0794d38b7fb70be938f66e34cac7d5b3ce0aa083 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk-3.8.1.dist-info/README.md @@ -0,0 +1,50 @@ +# Natural Language Toolkit (NLTK) +[![PyPI](https://img.shields.io/pypi/v/nltk.svg)](https://pypi.python.org/pypi/nltk) +![CI](https://github.com/nltk/nltk/actions/workflows/ci.yaml/badge.svg?branch=develop) + +NLTK -- the Natural Language Toolkit -- is a suite of open source Python +modules, data sets, and tutorials supporting research and development in Natural +Language Processing. NLTK requires Python version 3.7, 3.8, 3.9, 3.10 or 3.11. + +For documentation, please visit [nltk.org](https://www.nltk.org/). + + +## Contributing + +Do you want to contribute to NLTK development? Great! +Please read [CONTRIBUTING.md](CONTRIBUTING.md) for more details. + +See also [how to contribute to NLTK](https://www.nltk.org/contribute.html). + + +## Donate + +Have you found the toolkit helpful? Please support NLTK development by donating +to the project via PayPal, using the link on the NLTK homepage. + + +## Citing + +If you publish work that uses NLTK, please cite the NLTK book, as follows: + + Bird, Steven, Edward Loper and Ewan Klein (2009). + Natural Language Processing with Python. O'Reilly Media Inc. + + +## Copyright + +Copyright (C) 2001-2023 NLTK Project + +For license information, see [LICENSE.txt](LICENSE.txt). + +[AUTHORS.md](AUTHORS.md) contains a list of everyone who has contributed to NLTK. + + +### Redistributing + +- NLTK source code is distributed under the Apache 2.0 License. +- NLTK documentation is distributed under the Creative Commons + Attribution-Noncommercial-No Derivative Works 3.0 United States license. +- NLTK corpora are provided under the terms given in the README file for each + corpus; all are redistributable and available for non-commercial use. +- NLTK may be freely redistributed, subject to the provisions of these licenses. diff --git a/llmeval-env/lib/python3.10/site-packages/nltk-3.8.1.dist-info/RECORD b/llmeval-env/lib/python3.10/site-packages/nltk-3.8.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..6b3f2d8a93ac86306e17227a999928df2de3be7b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk-3.8.1.dist-info/RECORD @@ -0,0 +1,782 @@ +../../../bin/nltk,sha256=BPORS870sJtlzIbvcsuWYIoNzWWuAW4oAPgSh-ugPCc,239 +nltk-3.8.1.dist-info/AUTHORS.md,sha256=lwegiKq14iCouEfpgu85VSAWadP2X1MkLhUsgYBfPOI,7628 +nltk-3.8.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +nltk-3.8.1.dist-info/LICENSE.txt,sha256=Pd-b5cKP4n2tFDpdx27qJSIq0d1ok0oEcGTlbtL6QMU,11560 +nltk-3.8.1.dist-info/METADATA,sha256=CUHc77qyEPWGmH6DiO6622SIsRU2fGENF_LCKUzGEOI,2847 +nltk-3.8.1.dist-info/README.md,sha256=_oLlVxk8v-ARv0t4wAyrPKZ8KmLA2y1tlhJ4C3QjRk0,1789 +nltk-3.8.1.dist-info/RECORD,, +nltk-3.8.1.dist-info/WHEEL,sha256=ewwEueio1C2XeHTvT17n8dZUJgOvyCWCt0WVNLClP9o,92 +nltk-3.8.1.dist-info/entry_points.txt,sha256=SK6SzMicwUtBiwUOmv5P1ZVs0h-xqey6PnRpsUGGx5c,37 +nltk-3.8.1.dist-info/top_level.txt,sha256=YoQ-mwqckmTv1Qktmlk5Ylb6lDG77jg5qwoEB7c-pXo,5 +nltk/VERSION,sha256=932VxvO5Mh-hWNyZVXsqpdneLGKgY0kKcA4_XVSSvyQ,7 +nltk/__init__.py,sha256=RSji7RAoc5vyHzOA6pB6LPpCUxB8o68igd6CrYElOGA,6432 +nltk/__pycache__/__init__.cpython-310.pyc,, +nltk/__pycache__/book.cpython-310.pyc,, +nltk/__pycache__/cli.cpython-310.pyc,, +nltk/__pycache__/collections.cpython-310.pyc,, +nltk/__pycache__/collocations.cpython-310.pyc,, +nltk/__pycache__/compat.cpython-310.pyc,, +nltk/__pycache__/data.cpython-310.pyc,, +nltk/__pycache__/decorators.cpython-310.pyc,, +nltk/__pycache__/downloader.cpython-310.pyc,, +nltk/__pycache__/featstruct.cpython-310.pyc,, +nltk/__pycache__/grammar.cpython-310.pyc,, +nltk/__pycache__/help.cpython-310.pyc,, +nltk/__pycache__/internals.cpython-310.pyc,, +nltk/__pycache__/jsontags.cpython-310.pyc,, +nltk/__pycache__/langnames.cpython-310.pyc,, +nltk/__pycache__/lazyimport.cpython-310.pyc,, +nltk/__pycache__/probability.cpython-310.pyc,, +nltk/__pycache__/text.cpython-310.pyc,, +nltk/__pycache__/tgrep.cpython-310.pyc,, +nltk/__pycache__/toolbox.cpython-310.pyc,, +nltk/__pycache__/treeprettyprinter.cpython-310.pyc,, +nltk/__pycache__/treetransforms.cpython-310.pyc,, +nltk/__pycache__/util.cpython-310.pyc,, +nltk/__pycache__/wsd.cpython-310.pyc,, +nltk/app/__init__.py,sha256=xGZbbDC3xv67XnHHusxZmCPMNqj07BM9W6ZSlkWn9eQ,1578 +nltk/app/__pycache__/__init__.cpython-310.pyc,, +nltk/app/__pycache__/chartparser_app.cpython-310.pyc,, +nltk/app/__pycache__/chunkparser_app.cpython-310.pyc,, +nltk/app/__pycache__/collocations_app.cpython-310.pyc,, +nltk/app/__pycache__/concordance_app.cpython-310.pyc,, +nltk/app/__pycache__/nemo_app.cpython-310.pyc,, +nltk/app/__pycache__/rdparser_app.cpython-310.pyc,, +nltk/app/__pycache__/srparser_app.cpython-310.pyc,, +nltk/app/__pycache__/wordfreq_app.cpython-310.pyc,, +nltk/app/__pycache__/wordnet_app.cpython-310.pyc,, +nltk/app/chartparser_app.py,sha256=8FX3-eJQmB-8LT9k-lQJe_y5dSO3Ly4AD2K_wIs9FuE,88195 +nltk/app/chunkparser_app.py,sha256=tbEPtYtccyTcbSCUhFhhbkjCVi_rtel4EKPSogeJOT8,58322 +nltk/app/collocations_app.py,sha256=gJBWxNmkUjXQWkruqcOSE8l51Md2fcM3RmZlrJHJZK4,14664 +nltk/app/concordance_app.py,sha256=HjLN9ybKbjbKqkhJNUakivwIPojvDxqvwnqb7u07BYE,24882 +nltk/app/nemo_app.py,sha256=6ZBJXlJWKWoYnsrEy3Yy6IeFxcy3FNaGWK6QnMYEy4E,12305 +nltk/app/rdparser_app.py,sha256=j4tMGNLnwrwkVw3MyMr3-56TXAwAIIEo-v0yWyjDKEQ,37781 +nltk/app/srparser_app.py,sha256=UQwxqEPDfSYJTw22SSdp5EUX0QwlbztyH4EYHtBerw0,34401 +nltk/app/wordfreq_app.py,sha256=0mzSrNosW3Wh_J_5FdJV8Bq-F_a2x5HRV3iQAI03fnQ,957 +nltk/app/wordnet_app.py,sha256=Ut5VU3hzM4inRayi4c5uyNsbAz7YcGW7_q8BBmrJpPs,35574 +nltk/book.py,sha256=enAPUeJxxAXY0C60vlmPHCVhUxVY2K2gx3wWPH6tU6k,3912 +nltk/ccg/__init__.py,sha256=Gz2z13lWdN_wdcvn78rxJGvU23EKIN_sNm5twz_2nWw,915 +nltk/ccg/__pycache__/__init__.cpython-310.pyc,, +nltk/ccg/__pycache__/api.cpython-310.pyc,, +nltk/ccg/__pycache__/chart.cpython-310.pyc,, +nltk/ccg/__pycache__/combinator.cpython-310.pyc,, +nltk/ccg/__pycache__/lexicon.cpython-310.pyc,, +nltk/ccg/__pycache__/logic.cpython-310.pyc,, +nltk/ccg/api.py,sha256=3xzrsFkp0XM_SihDIEODQEMgFh-KYBaCQs4WNStLTgU,10360 +nltk/ccg/chart.py,sha256=2lyYNM8PY6AhucRmNetqgylPfKz3Pzn4faAKtkvYuFA,14147 +nltk/ccg/combinator.py,sha256=1C5Tqwhp-diD7rHtUfpPbVt4v1a7oPBY0bkHPm52OD4,10633 +nltk/ccg/lexicon.py,sha256=9rC11EzdzOVMybBt6TeYdVw4xj73Ufy7NS1cTqtq5sU,9863 +nltk/ccg/logic.py,sha256=MEukXOQu6dX-i-irRH3Nko5D2ElpDGjVosdgHPZs8wg,1871 +nltk/chat/__init__.py,sha256=4aSic0g0Zwhlxm7PC_t-0JZjChyKcPXTS0hoWTyTvLw,1556 +nltk/chat/__pycache__/__init__.cpython-310.pyc,, +nltk/chat/__pycache__/eliza.cpython-310.pyc,, +nltk/chat/__pycache__/iesha.cpython-310.pyc,, +nltk/chat/__pycache__/rude.cpython-310.pyc,, +nltk/chat/__pycache__/suntsu.cpython-310.pyc,, +nltk/chat/__pycache__/util.cpython-310.pyc,, +nltk/chat/__pycache__/zen.cpython-310.pyc,, +nltk/chat/eliza.py,sha256=27GYLQfKpMzsBvPixXQnZHqZSmaI_8H3mAvuPZAUVNw,9626 +nltk/chat/iesha.py,sha256=WassBbqcT2LbxZHda7vwcIBeIOfzefZ19cxUGay-NNM,4407 +nltk/chat/rude.py,sha256=JMoqOg2_r30pNRwknXWG8qIi_0mm__AnI7tTM1orj2I,3289 +nltk/chat/suntsu.py,sha256=dlYCRQ3INyOXbfL0qwyLaq1E-fqIVS8weRk2gOC8tq0,7185 +nltk/chat/util.py,sha256=dbgxikuBJGP6YhDPFw_ZYTSsBqpqEV5HU1ipWfj21Bw,4014 +nltk/chat/zen.py,sha256=KtZcUzKXlwyfL_tQpa9rtuNB12PAscwaWt2pbvk6GcM,11679 +nltk/chunk/__init__.py,sha256=hIssYRWZj_6YmHQOhJe3DRlvqbehf-Y7e6kSy8Sicp0,7597 +nltk/chunk/__pycache__/__init__.cpython-310.pyc,, +nltk/chunk/__pycache__/api.cpython-310.pyc,, +nltk/chunk/__pycache__/named_entity.cpython-310.pyc,, +nltk/chunk/__pycache__/regexp.cpython-310.pyc,, +nltk/chunk/__pycache__/util.cpython-310.pyc,, +nltk/chunk/api.py,sha256=-gEfVh1nv3CO-YXV3kTSMNDS4_sbuKnM3xVuTq2oc60,1946 +nltk/chunk/named_entity.py,sha256=v__H3Rply3PvrzKRUM2ktkLQcMYJc_14qHFbiKqqaMo,11140 +nltk/chunk/regexp.py,sha256=KXfm9-KJNqSRSJFfV5192yErXVuLH2jmOeCJbROkPRU,55980 +nltk/chunk/util.py,sha256=Ll5PB0ozF7rwNJtsdM6YiA1zktVLO7MOaQtJDR2Qx4g,21311 +nltk/classify/__init__.py,sha256=2s2RPR2IPix1aXumcnpzKSYJ8BzaC-VsKcpVHHZPT0E,4596 +nltk/classify/__pycache__/__init__.cpython-310.pyc,, +nltk/classify/__pycache__/api.cpython-310.pyc,, +nltk/classify/__pycache__/decisiontree.cpython-310.pyc,, +nltk/classify/__pycache__/maxent.cpython-310.pyc,, +nltk/classify/__pycache__/megam.cpython-310.pyc,, +nltk/classify/__pycache__/naivebayes.cpython-310.pyc,, +nltk/classify/__pycache__/positivenaivebayes.cpython-310.pyc,, +nltk/classify/__pycache__/rte_classify.cpython-310.pyc,, +nltk/classify/__pycache__/scikitlearn.cpython-310.pyc,, +nltk/classify/__pycache__/senna.cpython-310.pyc,, +nltk/classify/__pycache__/svm.cpython-310.pyc,, +nltk/classify/__pycache__/tadm.cpython-310.pyc,, +nltk/classify/__pycache__/textcat.cpython-310.pyc,, +nltk/classify/__pycache__/util.cpython-310.pyc,, +nltk/classify/__pycache__/weka.cpython-310.pyc,, +nltk/classify/api.py,sha256=PN1b_jw2InZWMNuzMaPSs2PP-f9_7IZfokohkKd0Xro,6625 +nltk/classify/decisiontree.py,sha256=HL-V9gcFYX2uYaonc3glQq_CAEqyCxKTb1FnKxkpx8U,13083 +nltk/classify/maxent.py,sha256=pJZFnshxF4jfYlY-8zgf3N8P5jVczqxTOCAI6HrVTqA,60921 +nltk/classify/megam.py,sha256=4d2NlMAyrXca2TB_phpff41-qY8YZdqx6LrYLL5s0jI,6396 +nltk/classify/naivebayes.py,sha256=fahYSKoSAMisUKOjXxkIDsRRU7swLyFMSAloL8mToNU,10713 +nltk/classify/positivenaivebayes.py,sha256=WckMp6Olu6x6Ku__NCRVPO2n6WY_AI2yr1y46cy-IgU,7412 +nltk/classify/rte_classify.py,sha256=d7BhvcXp-j1ovcbFs0jz2I22nZtn1pBvfT79kpc1xnY,6301 +nltk/classify/scikitlearn.py,sha256=_D3TQC-jxEn-eq3Y7Ydc1OczkhIAbednxDpcFpbj99U,5548 +nltk/classify/senna.py,sha256=WGne67HygHBl85t4DKqTjWgjILTVOaXoDrQgV7odLm8,6931 +nltk/classify/svm.py,sha256=Izn33z8jQhQ70hJdbli-HUc_dly9O2sxMso0v1MZ5dY,525 +nltk/classify/tadm.py,sha256=jGR9ga8n1rQUCoRg49kkSyZkJ7thteHc0TAApdtVaVU,3555 +nltk/classify/textcat.py,sha256=BeHyxtRXdqAGJtnipjORVrCIjWTHVa0OJm1-WOoMybI,6035 +nltk/classify/util.py,sha256=1Puz0ks5SrYXYQ8eJbcJpqRtGdwS1Hji0TMQk70ZCy4,12461 +nltk/classify/weka.py,sha256=em2Rij5vMKo5LFZbHWBsPZlDkBb-QOLMMEGIPjSgBoI,12938 +nltk/cli.py,sha256=ZdakKKRjRmDn_b3e4TL1UNqaTF4VsLSMgQ4juVWstEM,1897 +nltk/cluster/__init__.py,sha256=1mPkvd-mjaRXe0Aha9qx5Gn_Dr39BRovq-qT74bUi54,4361 +nltk/cluster/__pycache__/__init__.cpython-310.pyc,, +nltk/cluster/__pycache__/api.cpython-310.pyc,, +nltk/cluster/__pycache__/em.cpython-310.pyc,, +nltk/cluster/__pycache__/gaac.cpython-310.pyc,, +nltk/cluster/__pycache__/kmeans.cpython-310.pyc,, +nltk/cluster/__pycache__/util.cpython-310.pyc,, +nltk/cluster/api.py,sha256=sranVby2NHrTr3vmefGkZgREzzOjlE94MLCUbO63rlU,2162 +nltk/cluster/em.py,sha256=uxd6qQ0T1PSE5e_3q41yk66OlkPMX0ws4L0J7ciq1YM,8419 +nltk/cluster/gaac.py,sha256=c_2ewAkcLdWLhW0WjUedESoV7I1UPGlNeY_BNhOTQqY,5921 +nltk/cluster/kmeans.py,sha256=1Ik_3_pIjCfpgDag0LmP4hi2SQiRIwy8sQqtXFSuHYA,8592 +nltk/cluster/util.py,sha256=TVZtWob_8SoZOwG6NtsBPk4fOD40ZVjxnHj9oPa6eC8,10039 +nltk/collections.py,sha256=DiSo-vicLp7UQLpDiDTljFwdDLdQVUi7UVAdgublO8A,23673 +nltk/collocations.py,sha256=NWC5upNNulRI_FYmCHX0rNeZCkCQxqVXMlLnOC_bwa8,14964 +nltk/compat.py,sha256=7f0Eg2_MbidKae8brT_oCuqDSHcfmOskS88Y6-lycmw,1307 +nltk/corpus/__init__.py,sha256=qGkuNZ2GIP4qDOR6mMVtJHnC7oMV3kRxiNr8aGFHhfs,17359 +nltk/corpus/__pycache__/__init__.cpython-310.pyc,, +nltk/corpus/__pycache__/europarl_raw.cpython-310.pyc,, +nltk/corpus/__pycache__/util.cpython-310.pyc,, +nltk/corpus/europarl_raw.py,sha256=aXMKViytBRbry4J0FrO0P20JTOV2bgjuJQ5hOxFkJ-0,1896 +nltk/corpus/reader/__init__.py,sha256=urxkSILuhBlGI9qvsIlhQap6nFKSfkKrYi-rb4LCV5U,6677 +nltk/corpus/reader/__pycache__/__init__.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/aligned.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/api.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/bcp47.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/bnc.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/bracket_parse.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/categorized_sents.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/chasen.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/childes.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/chunked.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/cmudict.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/comparative_sents.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/conll.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/crubadan.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/dependency.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/framenet.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/ieer.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/indian.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/ipipan.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/knbc.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/lin.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/markdown.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/mte.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/nkjp.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/nombank.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/nps_chat.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/opinion_lexicon.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/panlex_lite.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/panlex_swadesh.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/pl196x.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/plaintext.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/ppattach.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/propbank.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/pros_cons.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/reviews.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/rte.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/semcor.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/senseval.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/sentiwordnet.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/sinica_treebank.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/string_category.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/switchboard.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/tagged.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/timit.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/toolbox.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/twitter.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/udhr.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/util.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/verbnet.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/wordlist.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/wordnet.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/xmldocs.cpython-310.pyc,, +nltk/corpus/reader/__pycache__/ycoe.cpython-310.pyc,, +nltk/corpus/reader/aligned.py,sha256=OJUpm8HyzqR0e8hPHd5CrAmH1bfQfXV2hZ4KCak0Zzw,5005 +nltk/corpus/reader/api.py,sha256=Dhu491gmTJnWSilm6lWdQBN59RM1aIT410mrbiDwD1k,19671 +nltk/corpus/reader/bcp47.py,sha256=I27Lj4hMW2IRM62RyXeK9OX-kSeAvEEkoXVWN1HwEiY,8534 +nltk/corpus/reader/bnc.py,sha256=AhFjLzhCpgijSGvhhHiB5AT5vmAPRP6LO-bHEXOEXas,9716 +nltk/corpus/reader/bracket_parse.py,sha256=1EfRuNmCU9DESWjwalyiDZhQuOwb3IYDjL-aAXlId_U,9619 +nltk/corpus/reader/categorized_sents.py,sha256=QLpHrDk3JlbgitkpbfndSXGxfssAyAA5MsnBesFSKis,6221 +nltk/corpus/reader/chasen.py,sha256=iVW9xtRtGcr3EpEf_BjbusRcPqRqPzKoFlOJfxdooTM,4699 +nltk/corpus/reader/childes.py,sha256=keelogBlaIvHWv1A0Q_tHbzmKyMthB2_YNWXQQmG53g,26105 +nltk/corpus/reader/chunked.py,sha256=8foVT90OCSIzv7B99rHsVGSiAX62e4HFc8QyGDqc_4Y,9366 +nltk/corpus/reader/cmudict.py,sha256=CCDpjWDzwPMOXKprKxmNtvX_-gUSFahFO-vUdTJbNhU,3366 +nltk/corpus/reader/comparative_sents.py,sha256=9YwKIk0xf_EhB6qBepCMOENtxoyvOqwuV_aIjW_WRk4,12069 +nltk/corpus/reader/conll.py,sha256=l-pgBQfCiZP90EJo1hnG3TLM9wM5imFvqlt4FfXlDII,22301 +nltk/corpus/reader/crubadan.py,sha256=FLorWHpsugP1Z3bgkukr51baIw_FvAdNqRKIcPeURL4,3627 +nltk/corpus/reader/dependency.py,sha256=nAdjT0H-HV9JW6iUMNe4cW8RR5rdA8IB8ZQ5IQY9aIc,3890 +nltk/corpus/reader/framenet.py,sha256=n59uoGpOSFIapeYnkbPmCamuslARpoa9q8773DkxB5Y,134791 +nltk/corpus/reader/ieer.py,sha256=x6CnLllJTOQYpwGKxjsbtGjej-rxVCEKb4HUyUbmCAw,3802 +nltk/corpus/reader/indian.py,sha256=GQMsNmZwoWvSS4bdeBT5guNUyU8-LErUoMfuT2ukJb0,3014 +nltk/corpus/reader/ipipan.py,sha256=1yJ5cl7AwX9SFmJs07HsuW-eZHp_YkTL-n1nRhadzgw,13092 +nltk/corpus/reader/knbc.py,sha256=rKg0SDmeQCUaSc3SOr97_IfM5FGJjjvTCdFDn1Vnvn8,5787 +nltk/corpus/reader/lin.py,sha256=1gx48Odd8WJ9PpoF6_f5UjmqJ5j22Rsu8GMraepu7Mg,6654 +nltk/corpus/reader/markdown.py,sha256=Y9AeB3F1gbUuEK7HNjR6iwHDAYeneI5XH1rMkw_ubWQ,12028 +nltk/corpus/reader/mte.py,sha256=8p5iQIJOuxu9MOWa1xH1Iijj9J9KmcRUWh2vmcWSOqQ,14385 +nltk/corpus/reader/nkjp.py,sha256=9npzIMG-tOJjb1NpiwDvH1uXXQ5eBEq8xp_KeQ5jD10,16332 +nltk/corpus/reader/nombank.py,sha256=v-smV3YSQfl54cF-NM0St9dodOKawPL6JGsMv5tvI4g,16247 +nltk/corpus/reader/nps_chat.py,sha256=tjp98O7FNsyoxXenMT5OW0XaJN1RciICJfeekN6AWa8,2940 +nltk/corpus/reader/opinion_lexicon.py,sha256=Xnb9RpQJBxZWdnN-DN41Cw1S4HR35FmomKO-ISy476k,4230 +nltk/corpus/reader/panlex_lite.py,sha256=QJg00vmQTl-iTot524qmEah5LBiacNA9GmpNp9hhZYE,5440 +nltk/corpus/reader/panlex_swadesh.py,sha256=JxpPNLY-WqHZvXsSG7eKFc9eBUHsKO0qOCEUm4iupUw,3287 +nltk/corpus/reader/pl196x.py,sha256=e7yXJOjyKh4uTxn78rvSdSF-K8wSDghYutvrf1IMPjk,12320 +nltk/corpus/reader/plaintext.py,sha256=o1ekALImW_Uyv4pjXn3y2P7uAevWqAAU-X1FrAinqiA,8456 +nltk/corpus/reader/ppattach.py,sha256=vGCcRHcwSMWaHYI1K6KliD9kTKtvabGosqGvPWZjIVs,2903 +nltk/corpus/reader/propbank.py,sha256=1Bv1psLPUwLsbfVUuTP9RanC4MoJp1JHboxgRqGJxrs,17776 +nltk/corpus/reader/pros_cons.py,sha256=DzQ0xyw7fiyZucbrrOqUooPsDv-SCARTMwTxcVzzwlo,4896 +nltk/corpus/reader/reviews.py,sha256=nDNnRfW6XqdJEYh1nKbIOOHFxpHTPD27nxgQanmL8go,12321 +nltk/corpus/reader/rte.py,sha256=ihayGJCEZ9Mw4idk337owoXGlC1Ikn4yM_eUZyHlqgY,4785 +nltk/corpus/reader/semcor.py,sha256=UxLpm2sbR5_6kPQD-9QSwzYkciA71OGRZ4PGudlln1w,11694 +nltk/corpus/reader/senseval.py,sha256=ZNyHHh5gpa2dGQHThMVUoxekDP0DRkrQhKAs8wmY0vI,7539 +nltk/corpus/reader/sentiwordnet.py,sha256=yQeTvAT2T1XFg45dgYfFyIJBhXvkxvoEB7H7w-tEosk,4626 +nltk/corpus/reader/sinica_treebank.py,sha256=64GeJSNCg3eXUFXDasGIql60OC6Pq1Fwc9OtB8Dsais,2541 +nltk/corpus/reader/string_category.py,sha256=-oPUy5R2qb4xiyFsCKGAlUwc3CCsWYCvQsmc2F7NNt8,1919 +nltk/corpus/reader/switchboard.py,sha256=yRjPwtDRx-9rZLsBiX_cAcrMl90zaJ2kanD7RB5hT2A,4547 +nltk/corpus/reader/tagged.py,sha256=P-gUFkUTazKCJzOlqvwA8aAPWYB6Pw08pkieRduIaJU,12140 +nltk/corpus/reader/timit.py,sha256=FpbCiufjEoKPZNgGjvUO1-dAhTq36qeCauaFIzqzRO8,18473 +nltk/corpus/reader/toolbox.py,sha256=yVxaqjDOVCjHRNz4Zs_8Zu8fkbi88YEb_JxwCYtHwVE,2121 +nltk/corpus/reader/twitter.py,sha256=UdnwB2Hh6quI1KhFAAlriybVrYBoUyNqxzUBCdFLics,4608 +nltk/corpus/reader/udhr.py,sha256=tjqXc1JQiTURHnsTU8RIzwxdQIZasbtdMSPz4msnHVo,2592 +nltk/corpus/reader/util.py,sha256=fZy5GyMxJ-urpTqO19Sj-oRQqgL-EI65_UDscr2nizg,32225 +nltk/corpus/reader/verbnet.py,sha256=NBIwOd2JrnHUamt29yzQjHS5VyzqQsOoCI9ZwOfXZIU,25404 +nltk/corpus/reader/wordlist.py,sha256=8dlcXRIjDuJ6U_dvbw6OF_VOgbC4EVeXI8uE2tCxjbM,5812 +nltk/corpus/reader/wordnet.py,sha256=nSIDdVHF_FDx6eOlsU5pUgV5z6YjPbhvQoWyEfb8_Yo,93352 +nltk/corpus/reader/xmldocs.py,sha256=_fVqoEIAaYKT772IEcYmuk_7OaqUdsRLKZpbaL4up88,16285 +nltk/corpus/reader/ycoe.py,sha256=9VbkO_JnFG2joiWfjsfYZ53vsSPl8lWfK00faIAaLN4,10504 +nltk/corpus/util.py,sha256=Q9xYJ97UUOy8vuuDA-uidzpE1oEU_-k6M6L0CcxsZ90,5867 +nltk/data.py,sha256=rr3iRF4UJi7bh3Ss1Gp0bn8qml3YmTOV8kyWeYOavO8,52814 +nltk/decorators.py,sha256=U1-DvExxy0Uv96M0St_rR8IAh8Em3eK6uS4AXIf_Ti4,8526 +nltk/downloader.py,sha256=SWUlq_6w6PDWKs4UCXmY3HyvEfDII1Mp7bgjChv-KEM,95506 +nltk/draw/__init__.py,sha256=vtk9kECEd_9ZZ0pqST6z5Sb-no-VDpEohi7UHD_YQcE,810 +nltk/draw/__pycache__/__init__.cpython-310.pyc,, +nltk/draw/__pycache__/cfg.cpython-310.pyc,, +nltk/draw/__pycache__/dispersion.cpython-310.pyc,, +nltk/draw/__pycache__/table.cpython-310.pyc,, +nltk/draw/__pycache__/tree.cpython-310.pyc,, +nltk/draw/__pycache__/util.cpython-310.pyc,, +nltk/draw/cfg.py,sha256=Y-89bIKWPdiCAn1GkA0eOP08L6eeQHc4gVQKMzBj2sk,30794 +nltk/draw/dispersion.py,sha256=MaCehYu6cTuRhMTzDW7E_cwGjXkP7auGCMsD31WjLcE,1854 +nltk/draw/table.py,sha256=Gz7IZ6JDxsfLUc5zLui_g1IyTfhPCEJU-u8K71S_qrc,46257 +nltk/draw/tree.py,sha256=N8qbNssr6A8OLp4zLE2FJ-jQzWYWFkeASvUeGzc2wKY,39275 +nltk/draw/util.py,sha256=8n8YJrrTWSD-MUEy96bX-oaaRBufEtg74bXPXWzWbJ0,90944 +nltk/featstruct.py,sha256=BVbotcvgnlNTKMDC1bL16-i3PCw5zXgP7X20tt-yPF0,106108 +nltk/grammar.py,sha256=uTC2ScpQIVxWt38QfWceYczkTjTVzPplmD63RfLLKkY,59174 +nltk/help.py,sha256=Sj2M3-tktpBZxwHxx1btdthJZ4hhZx-XUXlYuGv2Kp8,1709 +nltk/inference/__init__.py,sha256=nw4pQFHOGUv4x7u21GrJBOUS2hc7JibvvgbVnqXuksA,814 +nltk/inference/__pycache__/__init__.cpython-310.pyc,, +nltk/inference/__pycache__/api.cpython-310.pyc,, +nltk/inference/__pycache__/discourse.cpython-310.pyc,, +nltk/inference/__pycache__/mace.cpython-310.pyc,, +nltk/inference/__pycache__/nonmonotonic.cpython-310.pyc,, +nltk/inference/__pycache__/prover9.cpython-310.pyc,, +nltk/inference/__pycache__/resolution.cpython-310.pyc,, +nltk/inference/__pycache__/tableau.cpython-310.pyc,, +nltk/inference/api.py,sha256=GdomkZQT97b7Z_HER__KuhCDehRbT1jxD7MoV-1COnY,19560 +nltk/inference/discourse.py,sha256=XojyiwkuvpTjBOZCcQ0q5CBpIMOw5fXw3eNzjZPJoqw,22691 +nltk/inference/mace.py,sha256=EKRZuwCrX320jzWrq3NOFnfIugapuYdgiH31dVf2ZvE,12243 +nltk/inference/nonmonotonic.py,sha256=4_uEiG5h11Cv9_Z7dFIGAZ8M_QpZ-gWNVew11TGbeSU,19174 +nltk/inference/prover9.py,sha256=LHyuOmISLo5c64e5VOXiZsK-bG7z-0cb3xY01kWE068,16266 +nltk/inference/resolution.py,sha256=wx0YCAC5GgICEOg0VN_x67W88cGHu854aQD3pIJcfq4,26761 +nltk/inference/tableau.py,sha256=IpDZT3FbM02R2TnfjmAOlwXivnP19DXoT9Fu6xW4Cv0,26320 +nltk/internals.py,sha256=eOGggyeDGf6gyU76p2SU6_vFQP6h5Frg8idDyS6naW8,39416 +nltk/jsontags.py,sha256=IWrXxAjSzlgaxqclkKcAnxNzANbG1Zjf_j2jjPnxUy4,1948 +nltk/langnames.py,sha256=dDNlEkDmsxWUKxU5CGpYLBHpWlcD1ZrUiO3XTMEPBFU,17957 +nltk/lazyimport.py,sha256=qBI5PNKz5qYLrxow19KwLrSE821TyRhVKCafAGML-1E,4719 +nltk/lm/__init__.py,sha256=Gsg0OaefWlZVbCDGYqh1cEluhXpWS7vN8Bgpfm3wa-w,8051 +nltk/lm/__pycache__/__init__.cpython-310.pyc,, +nltk/lm/__pycache__/api.cpython-310.pyc,, +nltk/lm/__pycache__/counter.cpython-310.pyc,, +nltk/lm/__pycache__/models.cpython-310.pyc,, +nltk/lm/__pycache__/preprocessing.cpython-310.pyc,, +nltk/lm/__pycache__/smoothing.cpython-310.pyc,, +nltk/lm/__pycache__/util.cpython-310.pyc,, +nltk/lm/__pycache__/vocabulary.cpython-310.pyc,, +nltk/lm/api.py,sha256=u65V1dwqoBjCdxNSbLLUpY0zjzY0-WBGK7HHvV0Dct4,8495 +nltk/lm/counter.py,sha256=AOqwTQFxaWNMnJZp5E6i5bPBq54Lc7dklp76J5Ty_rY,5250 +nltk/lm/models.py,sha256=ricaFU593KT1n8ri5b-3JTxwpO__XXkZCYgPadPmrLA,4903 +nltk/lm/preprocessing.py,sha256=yeW6yCp2e0zGFpcQ_puPZ0VBsjcespq2MLPPdUojY3A,1714 +nltk/lm/smoothing.py,sha256=GqBAZAgZbQgMFTfu8LJFuWobda4AC8A8va2C3hbkI28,4745 +nltk/lm/util.py,sha256=X7x-__sk-f_Z8ttRmLP1ASLIQlVLOVo1ziID3F9qDZQ,474 +nltk/lm/vocabulary.py,sha256=rMng32oqXSg1XXOFpRi0TQtjaF_fQNx3b9MGKGakPnQ,7099 +nltk/metrics/__init__.py,sha256=gu6faSWxN5vW86Lk7fvzb_NeD4H-5BcUvTqmo5lSNLg,1243 +nltk/metrics/__pycache__/__init__.cpython-310.pyc,, +nltk/metrics/__pycache__/agreement.cpython-310.pyc,, +nltk/metrics/__pycache__/aline.cpython-310.pyc,, +nltk/metrics/__pycache__/association.cpython-310.pyc,, +nltk/metrics/__pycache__/confusionmatrix.cpython-310.pyc,, +nltk/metrics/__pycache__/distance.cpython-310.pyc,, +nltk/metrics/__pycache__/paice.cpython-310.pyc,, +nltk/metrics/__pycache__/scores.cpython-310.pyc,, +nltk/metrics/__pycache__/segmentation.cpython-310.pyc,, +nltk/metrics/__pycache__/spearman.cpython-310.pyc,, +nltk/metrics/agreement.py,sha256=NveWTfhBZDQWxqwEWbDMJKpeRElk7vtdN0bh9nMnKWI,16421 +nltk/metrics/aline.py,sha256=yWktce0eQ_r60KKUioLZSkorZT8U2zPbxLA-wqLzzDA,32845 +nltk/metrics/association.py,sha256=1hNurRIKGop2rDttTKddxtZkMrJ_0qBczugF9Mm4S4o,16569 +nltk/metrics/confusionmatrix.py,sha256=szVY58vzNWQRr4objnsiouDcwd-kqAT5_b59wvwx4q8,13035 +nltk/metrics/distance.py,sha256=oQ-o9tMyv5kTOJsRe2EKHLlArLiZxlAMJY73tKFnAcw,17661 +nltk/metrics/paice.py,sha256=yge6FA2y8_AGYSmCWfzapmKBFF6kkJTe-puWp-KcTBk,14739 +nltk/metrics/scores.py,sha256=nv0f5lsR_nKY_A2pJrVMtH4Mzef_CBpDWpkBoHnb-L0,7922 +nltk/metrics/segmentation.py,sha256=WdPD6aH31Z55RfEJ7OM0FQoxmGLr_q-q6vEJ0CqxwDY,7221 +nltk/metrics/spearman.py,sha256=irorJE5fYsGGxLLvKGvvL6je-hM4aonDBVhWS_ZiOS0,2197 +nltk/misc/__init__.py,sha256=pgYpCMn6fRf90Zwn52OjzHDe5MsPgl7u9cbTKeEH8pk,406 +nltk/misc/__pycache__/__init__.cpython-310.pyc,, +nltk/misc/__pycache__/babelfish.cpython-310.pyc,, +nltk/misc/__pycache__/chomsky.cpython-310.pyc,, +nltk/misc/__pycache__/minimalset.cpython-310.pyc,, +nltk/misc/__pycache__/sort.cpython-310.pyc,, +nltk/misc/__pycache__/wordfinder.cpython-310.pyc,, +nltk/misc/babelfish.py,sha256=9UkSa6l_j1BHxaT9CU1Viv_51RB0k9AA--3ytmQpsAk,361 +nltk/misc/chomsky.py,sha256=UatgZu7Zj3W5DQzuxHZW6Zyxv5kH9l_W2u_ZnOCqXcc,5319 +nltk/misc/minimalset.py,sha256=z7-UaqT7F-2ba_qybwY46bgaH4l5zQcyBkg5K_joFQs,2979 +nltk/misc/sort.py,sha256=aeqONHRGSDcVksvB6a5npkwdf5sAy6A2vRP9wYp0Y1w,4547 +nltk/misc/wordfinder.py,sha256=2HR5Fj4hv7Q2IDFlJb5Nl644uNWtSkWEUe-hBKYt6Zg,4352 +nltk/parse/__init__.py,sha256=w2H8yrs8-Wov_wtGTHoMyRyGt7Q4iLyQC6Z05LhuEhc,3797 +nltk/parse/__pycache__/__init__.cpython-310.pyc,, +nltk/parse/__pycache__/api.cpython-310.pyc,, +nltk/parse/__pycache__/bllip.cpython-310.pyc,, +nltk/parse/__pycache__/chart.cpython-310.pyc,, +nltk/parse/__pycache__/corenlp.cpython-310.pyc,, +nltk/parse/__pycache__/dependencygraph.cpython-310.pyc,, +nltk/parse/__pycache__/earleychart.cpython-310.pyc,, +nltk/parse/__pycache__/evaluate.cpython-310.pyc,, +nltk/parse/__pycache__/featurechart.cpython-310.pyc,, +nltk/parse/__pycache__/generate.cpython-310.pyc,, +nltk/parse/__pycache__/malt.cpython-310.pyc,, +nltk/parse/__pycache__/nonprojectivedependencyparser.cpython-310.pyc,, +nltk/parse/__pycache__/pchart.cpython-310.pyc,, +nltk/parse/__pycache__/projectivedependencyparser.cpython-310.pyc,, +nltk/parse/__pycache__/recursivedescent.cpython-310.pyc,, +nltk/parse/__pycache__/shiftreduce.cpython-310.pyc,, +nltk/parse/__pycache__/stanford.cpython-310.pyc,, +nltk/parse/__pycache__/transitionparser.cpython-310.pyc,, +nltk/parse/__pycache__/util.cpython-310.pyc,, +nltk/parse/__pycache__/viterbi.cpython-310.pyc,, +nltk/parse/api.py,sha256=R3xG2NEEAq5TvzQpuTKGE_xX_JLoVwp0lTGCQsL2WV8,2354 +nltk/parse/bllip.py,sha256=LipdGFlJrKinwKVIgiEKosvdaDrFQGQq-7rZUOkkPNw,10976 +nltk/parse/chart.py,sha256=6b3HoLPbT0OlKkETtezIODvqpBKIlkAPoY7r4gV91oM,63760 +nltk/parse/corenlp.py,sha256=t9ePlsr3zWxf6CKEbczk71mCVe_y5j9U2sd8el8REms,27745 +nltk/parse/dependencygraph.py,sha256=J9DcO7sszs6Pp_TqSRL-pJLwBATw_F98_xaZ9m0-kbM,32468 +nltk/parse/earleychart.py,sha256=OVBGyzHbv6M76LyFlHv1Gz-0PvZ0xdRdIYgSb_Qet84,18274 +nltk/parse/evaluate.py,sha256=P3qGZ7WG90mOrP4MEQXzDbIOB8WWhDBG3PIDIYfozx0,4468 +nltk/parse/featurechart.py,sha256=iK0O0uC-ZAgg6Bcj9ACeQkAvBTrWo-hteu-b-cPdPhU,22532 +nltk/parse/generate.py,sha256=kDlYgWj1f_YpbzVJq7OL6O5H0zU03hrrjxLuKLzJgvE,2381 +nltk/parse/malt.py,sha256=_XMFcW3SOH0XCrzfv-o4-U8RTkcwDbFSHpfbWOzMtuM,16571 +nltk/parse/nonprojectivedependencyparser.py,sha256=ncMpvaMMc8TtRzaPSub8Qt0IUfaXQV42YSbUMQDEnvg,29446 +nltk/parse/pchart.py,sha256=KGMYKf5x2psS9XZfU6wbUnIF5jwtoeAtsHzJz6vyZ-E,20480 +nltk/parse/projectivedependencyparser.py,sha256=WIWDRYAaUqr8u9UQP5AwSLXbciyoaClSN4TavumkfkY,28243 +nltk/parse/recursivedescent.py,sha256=uaVune-fIIWa_wT3CTY4O8p9MhTP4VgW1UoNauPsBZQ,26032 +nltk/parse/shiftreduce.py,sha256=Yl2JYRdUOqJIjQXOEWv4fUfpfGu5kLq94HaYPTV5QAk,17071 +nltk/parse/stanford.py,sha256=xyPRx710ddMCVYIXje2gTwvZrvYgxOM1CMhM7ziDuVQ,19312 +nltk/parse/transitionparser.py,sha256=9EdYOIGZPq77l3fgI4dnH63146YpC5PBfaZKaBPkhlU,32272 +nltk/parse/util.py,sha256=6mu9ZVO2hRduKhZGr40bAwRPvhKyTO_Srx2MT8pAy6E,8667 +nltk/parse/viterbi.py,sha256=czOjotH__XU_A_Mpf5-xYlYi8bXYOTbIff1mrPZBNYQ,18351 +nltk/probability.py,sha256=ikWJyp0Equm4RyGxXOJFBBgNfr2jo2fK0Ck7xwmSks0,92907 +nltk/sem/__init__.py,sha256=3-QdBYTgLd1iRHzo6e2f3OT0CDBWZAVmzhnlh7Yvu24,2443 +nltk/sem/__pycache__/__init__.cpython-310.pyc,, +nltk/sem/__pycache__/boxer.cpython-310.pyc,, +nltk/sem/__pycache__/chat80.cpython-310.pyc,, +nltk/sem/__pycache__/cooper_storage.cpython-310.pyc,, +nltk/sem/__pycache__/drt.cpython-310.pyc,, +nltk/sem/__pycache__/drt_glue_demo.cpython-310.pyc,, +nltk/sem/__pycache__/evaluate.cpython-310.pyc,, +nltk/sem/__pycache__/glue.cpython-310.pyc,, +nltk/sem/__pycache__/hole.cpython-310.pyc,, +nltk/sem/__pycache__/lfg.cpython-310.pyc,, +nltk/sem/__pycache__/linearlogic.cpython-310.pyc,, +nltk/sem/__pycache__/logic.cpython-310.pyc,, +nltk/sem/__pycache__/relextract.cpython-310.pyc,, +nltk/sem/__pycache__/skolemize.cpython-310.pyc,, +nltk/sem/__pycache__/util.cpython-310.pyc,, +nltk/sem/boxer.py,sha256=rACkFGXGKpK0pMxGXxtpkaKF50-aysHw_9GQjf9xtww,55287 +nltk/sem/chat80.py,sha256=TAiZ4BROBzIibz3jVkcgWL_BlmkC-3gMziEySS8JWMw,26519 +nltk/sem/cooper_storage.py,sha256=ghg6kYG6a54I4sDiwgzX70Jcm6HlNCPDm5gZi2ZqPG4,4210 +nltk/sem/drt.py,sha256=7YDYqc9XuWBleZ0aL41LW_wqpBbpvCMgqMnTOiRXX9s,53149 +nltk/sem/drt_glue_demo.py,sha256=xe-SoNG6JVIYtB7YoMO75vEil0KSVGI-gxcfzuLgQdc,19171 +nltk/sem/evaluate.py,sha256=07NnlgTmfjJy93UtFfFB6mdZRiW2hrm9ZTfO4RLfjcM,26282 +nltk/sem/glue.py,sha256=1nBjlbsqna4gXnNG8iMQn5wb15QPxejyDn-vbTPmXrc,30254 +nltk/sem/hole.py,sha256=D9Cnc89WvG9WCDlDNjYF3cig4UmHJgFIwtevuNX1CBs,14216 +nltk/sem/lfg.py,sha256=SR-OYvj8HBtIx-EBvnfkOFstSj8eKXzAiVyQeFIsmZI,7716 +nltk/sem/linearlogic.py,sha256=Wg_jzVVQDy1nCvYkuJB-g9tdwBYRmsenQboD7eRbEU8,17234 +nltk/sem/logic.py,sha256=F0giVBpzZi3HYLQBlsiU0tLOOVEEkswlOYkRsitiVnU,70239 +nltk/sem/relextract.py,sha256=UPv_7dKm-GpdYGL_J8CsJHAz0ALxGv_4EZ_rFk7_y7o,15809 +nltk/sem/skolemize.py,sha256=CtOfU12APkeIkp-Z_jMlAOMkXIEO9le8TQUvlZ5rBns,5870 +nltk/sem/util.py,sha256=GB8SOc7dtyywa4WrlVHr1mHwRntX9ppD7N1em-Bh1Yo,9062 +nltk/sentiment/__init__.py,sha256=vre9oZROX6xHjdKjM8inuxYfiVLf341HZ_sPKyoA2Jo,382 +nltk/sentiment/__pycache__/__init__.cpython-310.pyc,, +nltk/sentiment/__pycache__/sentiment_analyzer.cpython-310.pyc,, +nltk/sentiment/__pycache__/util.cpython-310.pyc,, +nltk/sentiment/__pycache__/vader.cpython-310.pyc,, +nltk/sentiment/sentiment_analyzer.py,sha256=vB09EsrK-16IqHDD7TdsHZYGrkrUcY1rPzNtHpTrL1w,10432 +nltk/sentiment/util.py,sha256=memSkOlHeTfJpeZYtgYJ9LsiMOcoVw4wvwHBOsFwneY,31275 +nltk/sentiment/vader.py,sha256=efJHlAUaRr6bV63ZVDIMAOaolVqct6lBKo9tMVCOpkg,21764 +nltk/stem/__init__.py,sha256=BUVavJGvw_wgnL3PkklsVg9PNMbz3rCW1U6XTufamC8,1296 +nltk/stem/__pycache__/__init__.cpython-310.pyc,, +nltk/stem/__pycache__/api.cpython-310.pyc,, +nltk/stem/__pycache__/arlstem.cpython-310.pyc,, +nltk/stem/__pycache__/arlstem2.cpython-310.pyc,, +nltk/stem/__pycache__/cistem.cpython-310.pyc,, +nltk/stem/__pycache__/isri.cpython-310.pyc,, +nltk/stem/__pycache__/lancaster.cpython-310.pyc,, +nltk/stem/__pycache__/porter.cpython-310.pyc,, +nltk/stem/__pycache__/regexp.cpython-310.pyc,, +nltk/stem/__pycache__/rslp.cpython-310.pyc,, +nltk/stem/__pycache__/snowball.cpython-310.pyc,, +nltk/stem/__pycache__/util.cpython-310.pyc,, +nltk/stem/__pycache__/wordnet.cpython-310.pyc,, +nltk/stem/api.py,sha256=eCDlGturJXqs0AG-q0AHN6EiqU7ytGs43ly-eEiob6s,741 +nltk/stem/arlstem.py,sha256=3LW-2dSdsNNAGlQluBiOmHR6V7xOaRdIT2OdlZgcJVk,13006 +nltk/stem/arlstem2.py,sha256=2SrhWIANG2Gd9Rxbvj-UIWc7-zr5-zIJGB8HDyszqW0,16535 +nltk/stem/cistem.py,sha256=59OmWHS_EWFO-I-ZN1DVFnnz1rdcUxKCBwM4z8DRQoo,7259 +nltk/stem/isri.py,sha256=2tOa82AbRnXPympONMJOVc6-CsWdyWlMgcllFwqrJ-I,14990 +nltk/stem/lancaster.py,sha256=QrvXzQGULfwIMsW1Z8By8Pz8Exb1MTTydc5ZBV4dJao,12587 +nltk/stem/porter.py,sha256=c5jYrt7IHEpV-neeZp6zHOjLXYh83x0tksE3VP7YkRg,28372 +nltk/stem/regexp.py,sha256=5lb_FGFd7SixF8hyAZpNNLCCeNDfRhnp750We9dbZJA,1578 +nltk/stem/rslp.py,sha256=cMyro3T1eslSdD-sE3Vq5Nnp4yJNrlB6gTQz08tlOxU,5511 +nltk/stem/snowball.py,sha256=6somwXR8EoIN9TNmK251dPRGRZ75oC4i6CfPV8iqwk8,183890 +nltk/stem/util.py,sha256=ktwGClVb3h-AndTu60wS6rfTAdruI41M1zbccWt7wm0,644 +nltk/stem/wordnet.py,sha256=AojhkFURMhpF8vmS7uTkfeJNL-EYrvGb42v-yrTSD8w,1655 +nltk/tag/__init__.py,sha256=v7hPbsW3lrb6AFSIZ3uhZ33iwCKVx7RvBHrNkNro1NY,7298 +nltk/tag/__pycache__/__init__.cpython-310.pyc,, +nltk/tag/__pycache__/api.cpython-310.pyc,, +nltk/tag/__pycache__/brill.cpython-310.pyc,, +nltk/tag/__pycache__/brill_trainer.cpython-310.pyc,, +nltk/tag/__pycache__/crf.cpython-310.pyc,, +nltk/tag/__pycache__/hmm.cpython-310.pyc,, +nltk/tag/__pycache__/hunpos.cpython-310.pyc,, +nltk/tag/__pycache__/mapping.cpython-310.pyc,, +nltk/tag/__pycache__/perceptron.cpython-310.pyc,, +nltk/tag/__pycache__/senna.cpython-310.pyc,, +nltk/tag/__pycache__/sequential.cpython-310.pyc,, +nltk/tag/__pycache__/stanford.cpython-310.pyc,, +nltk/tag/__pycache__/tnt.cpython-310.pyc,, +nltk/tag/__pycache__/util.cpython-310.pyc,, +nltk/tag/api.py,sha256=hxGeLViDHBmcXYnWtYA8N3r7meYyOzW8nxafEaSXH0c,14810 +nltk/tag/brill.py,sha256=PA8cP2tXwbxYtSO33vrV8RwGJNm4ZU_T5iCkqKf0W4g,16829 +nltk/tag/brill_trainer.py,sha256=ba0A2b255xtF3N30iiLWy8oazQycJ3IP-gBY6SMuX2w,27900 +nltk/tag/crf.py,sha256=eaU05hpUOPRO9TRVt6VXffvCB9ZsahkDu2gQi1x8FFQ,7960 +nltk/tag/hmm.py,sha256=mkD06CBebJyBZ-lqXVsyVwCL1blqA0ucijINjynZiJQ,50349 +nltk/tag/hunpos.py,sha256=th_TEehZi3QIlaFLJhI40tBpqn1Rn3QYzfv_dT1n1w8,5195 +nltk/tag/mapping.py,sha256=TfcRmPsp-cM7FFx03ElLmf0ZYvW48N35SIl_y7M7QHY,4024 +nltk/tag/perceptron.py,sha256=ge04T_6s-qIk2ElRLA43euMSzbaxQur2KxosuLfq_Tg,13425 +nltk/tag/senna.py,sha256=_Y-mrYv1Y4SH8720pMKiXYvPFnuNJMeDeG6PUu1TyHk,5903 +nltk/tag/sequential.py,sha256=hWSEZAYlZa8uKd5-o7NeAIsEaHOj2lZVaVX5F5ymdoI,28621 +nltk/tag/stanford.py,sha256=_HuQnKPvcHn01gUtrvhaYI_GpOG2tldthDXoV3daCFA,8427 +nltk/tag/tnt.py,sha256=gNsbvbZYFnhPLnbCKCAOdwJIsK7OyNxAje2SfRclyz8,18432 +nltk/tag/util.py,sha256=FEApJmJ5lpb1mWbfhRvtKce9sR93WPiRJfUyAvBoc78,2353 +nltk/tbl/__init__.py,sha256=7w88VhcTvvCRY03cUftsmoLkf5YanRyM3PXU-Ik2t2c,790 +nltk/tbl/__pycache__/__init__.cpython-310.pyc,, +nltk/tbl/__pycache__/api.cpython-310.pyc,, +nltk/tbl/__pycache__/demo.cpython-310.pyc,, +nltk/tbl/__pycache__/erroranalysis.cpython-310.pyc,, +nltk/tbl/__pycache__/feature.cpython-310.pyc,, +nltk/tbl/__pycache__/rule.cpython-310.pyc,, +nltk/tbl/__pycache__/template.cpython-310.pyc,, +nltk/tbl/api.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nltk/tbl/demo.py,sha256=VpqlJQdaXU5wrpJF2aSLhTlF62fOQl5sCYiBJrPZcZU,15338 +nltk/tbl/erroranalysis.py,sha256=pav23zp0nOiGydqQ4wSJbXAVJmixxCwgodSthCL-onU,1454 +nltk/tbl/feature.py,sha256=CrcNBJ_BGqnEXzFshpz_MGe6mTB1V0rMWBGWyn5Kukw,9690 +nltk/tbl/rule.py,sha256=k-0gRVsZOigPWkqn_NuZvOeQ5bXx6YTYuSRA-oUSXPo,11515 +nltk/tbl/template.py,sha256=LeTafIw_oehScGgwFgl9fklSIOY5tJvuJNFgzaRsJHU,12892 +nltk/test/__init__.py,sha256=79tUwF8keWTdPOWa-gpVx0VkJr6DABwmq9j589IFABU,487 +nltk/test/__pycache__/__init__.cpython-310.pyc,, +nltk/test/__pycache__/all.cpython-310.pyc,, +nltk/test/__pycache__/childes_fixt.cpython-310.pyc,, +nltk/test/__pycache__/classify_fixt.cpython-310.pyc,, +nltk/test/__pycache__/conftest.cpython-310.pyc,, +nltk/test/__pycache__/gensim_fixt.cpython-310.pyc,, +nltk/test/__pycache__/gluesemantics_malt_fixt.cpython-310.pyc,, +nltk/test/__pycache__/portuguese_en_fixt.cpython-310.pyc,, +nltk/test/__pycache__/probability_fixt.cpython-310.pyc,, +nltk/test/__pycache__/setup_fixt.cpython-310.pyc,, +nltk/test/all.py,sha256=Ojl5ZxWh7LV7XsdOyR8kWEOxMk7E4m7QnCUrQoACzQk,819 +nltk/test/bleu.doctest,sha256=2LmQDiyg-BueBx8LbLJanXCs7UZI9zsLe5nurrXBVBs,862 +nltk/test/bnc.doctest,sha256=_8MYpY69ibBaUmkNE3lu187WDOVV_AF4GgTvxSmRuLQ,2051 +nltk/test/ccg.doctest,sha256=YHiQsfRdaAuOXrkBfD2WTSd9yWbAWyJ8K2KDtkAWyhk,19786 +nltk/test/ccg_semantics.doctest,sha256=vPdQojJQwAxOXJ12cUQXKnHTnU5IOr1z-FszyX1MAAQ,31066 +nltk/test/chat80.doctest,sha256=-bY1zKFhVZmtKf82ndwBt3aiNHy7SeSduXjLKggyy2o,8735 +nltk/test/childes.doctest,sha256=FcKzuX_RXXHZNhCyU_S2B7xTN46R54mxjYs17bejhN0,9363 +nltk/test/childes_fixt.py,sha256=A04g6D3QytqCatGOYAg6fCXqXT8XyjIPbRSkdG1ys8o,372 +nltk/test/chunk.doctest,sha256=hDVcWtgJPEZJZLceKbm_885sgpaSR6DFHQMesFzyYeU,11511 +nltk/test/classify.doctest,sha256=e1MEsCmHQcLkky6XomMc4je0mhImN8YghEy-A13Ieh8,7699 +nltk/test/classify_fixt.py,sha256=zUj2OUNhyi2sJNmVdShbQnuhLUieJ7h_AwKmvj1Gnzw,119 +nltk/test/collections.doctest,sha256=ygTv1l_HbXdVHDw91rjUxJy6KFnSvbvzpkXLvTfnVUU,622 +nltk/test/collocations.doctest,sha256=ezSwbUboIG91fbmKX7mvFo3c9C_BlbgM85tRrctZAKU,12506 +nltk/test/concordance.doctest,sha256=rPESoW6-ugpgj2LgBYgChcjOWv75x0eu3Y4A2MuZSsQ,3544 +nltk/test/conftest.py,sha256=apMfe_V5EHXZHCXCMxVqMt3gpHCUwwxR-MxESis9--Q,804 +nltk/test/corpus.doctest,sha256=K3vInYJLjonuHTysZCrHvZ1ytgseREpqZMcz9VsolHA,99206 +nltk/test/crubadan.doctest,sha256=qn63dlNT7XmxUA1m2JmcwuX_i12_cWCAfJDVxSp9Kgs,2060 +nltk/test/data.doctest,sha256=ggmXuZT1Hy7Bd5_bppQPhz64lO8E-dDHXtuh2hQ5HYM,14266 +nltk/test/dependency.doctest,sha256=pXyaAjDE232xA0Q2EEvxkMNu8COSk0l52ELMi07C-qI,7669 +nltk/test/discourse.doctest,sha256=AxJetWi0DetMfptecrI9lXac7mN4YVrfCVPr3VXDBkQ,17923 +nltk/test/drt.doctest,sha256=wTRxo8ISpwcsF9RpwlHn0PLfpaL5ZCZxN2oEpqlGHYg,20076 +nltk/test/featgram.doctest,sha256=APe6OaMrt1UwuKzLM-7NRXIQF0mPnpZj4vR1yTSnz0o,28870 +nltk/test/featstruct.doctest,sha256=hjCDZs3NN2DJdyAH9BygfXUuWAP723xQFk3YMSg7p_Q,38894 +nltk/test/framenet.doctest,sha256=KcZIkt-8_HJ2gDl7BvWG-vfOI3NI7Be2VT9sc9hcAXY,10797 +nltk/test/generate.doctest,sha256=7yYAJ1EO1qAHy9OYVcoEehPs4hoEhvTvs0VH5H08oBg,2050 +nltk/test/gensim.doctest,sha256=kmwbF1XcgZvyfrfdotjNw-VlkzgGFVxwVObQ0IPcAXI,5200 +nltk/test/gensim_fixt.py,sha256=2p-4RObBWBU-YzbGPXAj03aPhimK-l_RLNdS_RuKHh4,77 +nltk/test/gluesemantics.doctest,sha256=QVR_Hki10s1CRghGvxJDNDA2u1J89uj1O_nKt0RF8eo,12705 +nltk/test/gluesemantics_malt.doctest,sha256=4S5usX2BTPAPpK7t1SIfwHGaajpVXAuk5CuAUvSagZU,2667 +nltk/test/gluesemantics_malt_fixt.py,sha256=H7YUsT_M5LteTBF9utPjqUP8pybUaW3w2fQdGUPiT3c,232 +nltk/test/grammar.doctest,sha256=z2ZzpqBjN64pPB7J3WbZcI_QsH5a1TkgdNKN5GsHMLE,1949 +nltk/test/grammartestsuites.doctest,sha256=4eF9lME7iQqaAUMvp3IX0s815yi9MSUgbDrT_hJov2c,3309 +nltk/test/index.doctest,sha256=MuVP-xRyC9Zu_bY7PU8BZekRztycmrD28ngq6q6RonI,2701 +nltk/test/inference.doctest,sha256=8dABBDz575EQIw1qMwVdDCEQClRmaRxxMARqC1CHYBw,18365 +nltk/test/internals.doctest,sha256=dzXAofPbEfck_BZYO1ZHp5NpqQd_Ofz3nGKjP_KlzHE,4283 +nltk/test/japanese.doctest,sha256=79O_D43s3rVCS0Am06pG2hCiNUkT2A7AT1Up_BT82_g,1093 +nltk/test/lm.doctest,sha256=mTo97a-_5wZ1t_G3Hso-LuhluTTtHZrsE5b89dhQXfY,3951 +nltk/test/logic.doctest,sha256=h8numvKfRWQucoEA9HPJ02pxviSb8SdCa02BUvWPL5o,35183 +nltk/test/meteor.doctest,sha256=dcxi8dfWOG6fm-7_VlHkxnaW8d3vEw6TBir0iAsf2Qo,1523 +nltk/test/metrics.doctest,sha256=h9P-WsPkMHmFQDQw98TRwfRBAVhkuLqE3MOSl3hZaJY,11283 +nltk/test/misc.doctest,sha256=upseLcrsXziqxxF0pCmA1Nyx9ovGAuUlpG7w4PK8a1k,3464 +nltk/test/nonmonotonic.doctest,sha256=mG0_JgtIhZh9pD-jqiGAHD7nuQHGClxWhKvffiBPHNM,10370 +nltk/test/paice.doctest,sha256=9KOoUsd6O-ACSMbt3Ras4zS-FE55R4jZ6xh1JKopb3c,1273 +nltk/test/parse.doctest,sha256=fKDMs2TD3qhUgNdwW30CTYcEOl1olf3TRCHsf_i1heY,34936 +nltk/test/portuguese_en.doctest,sha256=voMsH9rhsaI2ETsXYI8T8-ZXKYv6l1-ghv4TMaEDt7c,23121 +nltk/test/portuguese_en_fixt.py,sha256=-66oHXFBbvDKHkwkcOLnpwcn29iUpwUEWrJ-LqSk5FM,130 +nltk/test/probability.doctest,sha256=4BuJPzdy6l8hlRXIJOEMxfzlLl1oaru2ezTV6olNx0U,9244 +nltk/test/probability_fixt.py,sha256=avszs9PHMTVYHHcOXA19-EsTYDahH4VPPPMqv1QkGpE,188 +nltk/test/propbank.doctest,sha256=jDD0XEFjm2-hDA8g_Y7Us3feNLeOgsw8lvDn-tApO0g,6694 +nltk/test/relextract.doctest,sha256=Klf15poDywGJTJlpmpTBnLF5ur-_0tLZC-S93Sox95A,9520 +nltk/test/resolution.doctest,sha256=XJw6Bs4CBYSEEWreyqWTaoTo1ADGwGwRptEsZV8qUH8,8010 +nltk/test/semantics.doctest,sha256=XhmM0qSpAcmqYZu6dV7Veugf3FpuyMxaHEh1t2UEqwI,25190 +nltk/test/sentiment.doctest,sha256=dwARYfcbIn6oaPX7kRAo_ZjjJ_YDowxh3zAgr-16Mak,12229 +nltk/test/sentiwordnet.doctest,sha256=7wIk6gIiYtONvkpNfAUK_xk-jXNVVzIzPlQJ4h2UTrk,1051 +nltk/test/setup_fixt.py,sha256=IQUyYM-mNaVbfsGFvfOdJc0ymAJ-0u5OAZd2cqRgF0s,912 +nltk/test/simple.doctest,sha256=ZF_0SZ5vp7pMfFp6iKf3ZvKkRYLHlxBamC0aaQItSog,2407 +nltk/test/stem.doctest,sha256=XJu6ADeinzu41KgldR5pVuiLdzkmCsoJIXLaSQocTPs,2552 +nltk/test/tag.doctest,sha256=Dl3QKGZi-1uJQwQKenGThEIfOLKKR7k3wghJKAf4GC4,34100 +nltk/test/tokenize.doctest,sha256=9fZOgyZnwOBNiPYS0xRFSPcr8asz18Tc29Et8_nzHs4,20353 +nltk/test/toolbox.doctest,sha256=NfbQ7Q_WFajCTtjxcLSzYkkvr8VVm8SGtqDerv5KBJ4,10323 +nltk/test/translate.doctest,sha256=if9_vzqjIWk0wnok6QSaLO-dry5lt3DLWCTj99VWmf0,8396 +nltk/test/tree.doctest,sha256=_MYclk55SLXJ4zGRu-bbL-oPOThpWc2G4c0cuLkyXXo,47273 +nltk/test/treeprettyprinter.doctest,sha256=yuAL_WWYVV5jCAe0TmzQ9j4N2CSZD9r13_g992DUvkM,9376 +nltk/test/treetransforms.doctest,sha256=UDSeLha6tfB-PN4_eJGQeOMifVhTIY89tho_2cuXDyc,5006 +nltk/test/unit/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nltk/test/unit/__pycache__/__init__.cpython-310.pyc,, +nltk/test/unit/__pycache__/test_aline.cpython-310.pyc,, +nltk/test/unit/__pycache__/test_bllip.cpython-310.pyc,, +nltk/test/unit/__pycache__/test_brill.cpython-310.pyc,, +nltk/test/unit/__pycache__/test_cfd_mutation.cpython-310.pyc,, +nltk/test/unit/__pycache__/test_cfg2chomsky.cpython-310.pyc,, +nltk/test/unit/__pycache__/test_chunk.cpython-310.pyc,, +nltk/test/unit/__pycache__/test_classify.cpython-310.pyc,, +nltk/test/unit/__pycache__/test_collocations.cpython-310.pyc,, +nltk/test/unit/__pycache__/test_concordance.cpython-310.pyc,, +nltk/test/unit/__pycache__/test_corenlp.cpython-310.pyc,, +nltk/test/unit/__pycache__/test_corpora.cpython-310.pyc,, +nltk/test/unit/__pycache__/test_corpus_views.cpython-310.pyc,, +nltk/test/unit/__pycache__/test_data.cpython-310.pyc,, +nltk/test/unit/__pycache__/test_disagreement.cpython-310.pyc,, +nltk/test/unit/__pycache__/test_distance.cpython-310.pyc,, +nltk/test/unit/__pycache__/test_downloader.cpython-310.pyc,, +nltk/test/unit/__pycache__/test_freqdist.cpython-310.pyc,, +nltk/test/unit/__pycache__/test_hmm.cpython-310.pyc,, +nltk/test/unit/__pycache__/test_json2csv_corpus.cpython-310.pyc,, +nltk/test/unit/__pycache__/test_json_serialization.cpython-310.pyc,, +nltk/test/unit/__pycache__/test_metrics.cpython-310.pyc,, +nltk/test/unit/__pycache__/test_naivebayes.cpython-310.pyc,, +nltk/test/unit/__pycache__/test_nombank.cpython-310.pyc,, +nltk/test/unit/__pycache__/test_pl196x.cpython-310.pyc,, +nltk/test/unit/__pycache__/test_pos_tag.cpython-310.pyc,, +nltk/test/unit/__pycache__/test_ribes.cpython-310.pyc,, +nltk/test/unit/__pycache__/test_rte_classify.cpython-310.pyc,, +nltk/test/unit/__pycache__/test_seekable_unicode_stream_reader.cpython-310.pyc,, +nltk/test/unit/__pycache__/test_senna.cpython-310.pyc,, +nltk/test/unit/__pycache__/test_stem.cpython-310.pyc,, +nltk/test/unit/__pycache__/test_tag.cpython-310.pyc,, +nltk/test/unit/__pycache__/test_tgrep.cpython-310.pyc,, +nltk/test/unit/__pycache__/test_tokenize.cpython-310.pyc,, +nltk/test/unit/__pycache__/test_twitter_auth.cpython-310.pyc,, +nltk/test/unit/__pycache__/test_util.cpython-310.pyc,, +nltk/test/unit/__pycache__/test_wordnet.cpython-310.pyc,, +nltk/test/unit/lm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nltk/test/unit/lm/__pycache__/__init__.cpython-310.pyc,, +nltk/test/unit/lm/__pycache__/test_counter.cpython-310.pyc,, +nltk/test/unit/lm/__pycache__/test_models.cpython-310.pyc,, +nltk/test/unit/lm/__pycache__/test_preprocessing.cpython-310.pyc,, +nltk/test/unit/lm/__pycache__/test_vocabulary.cpython-310.pyc,, +nltk/test/unit/lm/test_counter.py,sha256=t1Dgd6b6JFrtkggAzoRXSGfZ1gwNKiD4aM_cBPoFsEM,3891 +nltk/test/unit/lm/test_models.py,sha256=3yVFNUsVC2_VUJexqclnSjInpQe14MuPVq3qsYlr2lc,20160 +nltk/test/unit/lm/test_preprocessing.py,sha256=05lMhOdkglGipiMuJ0gTJwaGweYed0qVGade6VY1uXw,999 +nltk/test/unit/lm/test_vocabulary.py,sha256=1Pd5rfniE0C_Pq5v2k3IKGdug6ZuA4ycbNZKfRpCfuI,5917 +nltk/test/unit/test_aline.py,sha256=u9y3D19sJGY_VE9nSOOON5GMYyTIRGVXMyqveeMypyU,1130 +nltk/test/unit/test_bllip.py,sha256=rm3KrL9F6F49WfXDV42GpuPlvbvsXibTbUXU0pWa-pw,1115 +nltk/test/unit/test_brill.py,sha256=ZTDCN3y2mTZ-dLeL0GwvO3yM6LlcTmIQ1LaNMBtXZyE,1024 +nltk/test/unit/test_cfd_mutation.py,sha256=YutOmFGBizpUzZ61l1cDxIegJah2ntuwKL88C49-WBA,1373 +nltk/test/unit/test_cfg2chomsky.py,sha256=g2wKKjmogJpAaRiLxA0_xw4KLwZa4zvjA-gBMt9z1l0,1726 +nltk/test/unit/test_chunk.py,sha256=xo-ItBtJdBsRIt-rX1rYYkV_ufABgPK4e6HlogRTvWg,2219 +nltk/test/unit/test_classify.py,sha256=4Bv5-rDyrjDGdmRHY_qh4Rq_5VdoghzlJEPB_PCIzQo,1337 +nltk/test/unit/test_collocations.py,sha256=vaiBeImr5dCDOhFPAN8Q4CAUVojJTwNHpodolyIymCU,3690 +nltk/test/unit/test_concordance.py,sha256=91x9LT-875n7OOi3it5O1qr1xKdOA1ufZY3KLH10Iaw,4108 +nltk/test/unit/test_corenlp.py,sha256=doMoc3Drnl3fDFxTaZKVPKq-75RTXOGSOqkI9K_74FQ,58632 +nltk/test/unit/test_corpora.py,sha256=46IA2v_oxDRFQQGa6iGTHiTHAPsDZjIovDSG2432Ubc,9923 +nltk/test/unit/test_corpus_views.py,sha256=mIxoCvqWSfInEQkISPwfZvTG6dTxYh7Bx0kCGC6VsoA,1600 +nltk/test/unit/test_data.py,sha256=y1fXWnIylRrff9fBJBUYZ6xw3T6uwMg_6View-jKcas,390 +nltk/test/unit/test_disagreement.py,sha256=e2JIXrNqCg1YTSh6P2lnGs9YN8KmkWcFD-zcZPsNkjk,4461 +nltk/test/unit/test_distance.py,sha256=DIMhkfn2y6WvsiJRyw1y_T5b_4OHI6wG01eEAt8Cd9Q,5839 +nltk/test/unit/test_downloader.py,sha256=QvpnRVehOfLZVJ-iUH8m5mEHG8w4deKxRhF7IOnjAZM,741 +nltk/test/unit/test_freqdist.py,sha256=I6qkc8zleTMeivGWB0NntBVQDx_tVxthWRwcOB-T0i4,210 +nltk/test/unit/test_hmm.py,sha256=bX7fSFd7k89JCr9VNFr1ZAng4m2KkfuTL_M2TNvA1nU,2285 +nltk/test/unit/test_json2csv_corpus.py,sha256=-BUZfzFHAof4umKGAL-9WKGUBiPFyKzLVZOCmzW-a-g,5888 +nltk/test/unit/test_json_serialization.py,sha256=CfpHkTvY0lF8rMQXQsv_0nSVhDfhxVkqDwTLq26pv5Q,3634 +nltk/test/unit/test_metrics.py,sha256=iK6bLxVi1fVll-2eCmgzE-ubWnQlFeQjP079qdiRP-A,1949 +nltk/test/unit/test_naivebayes.py,sha256=a_tjsQsyvPIsO3mrtmN6knaC9BFwPE7PDNHBSNdhYMc,764 +nltk/test/unit/test_nombank.py,sha256=gIgs6vlEI2NheAh8c6wlJdk6apHmAMmaDZkP8laIvKY,760 +nltk/test/unit/test_pl196x.py,sha256=C41qhbllNBqtVJ9tCFM8mReQqzsdbM7uoMo9hFVHKLg,410 +nltk/test/unit/test_pos_tag.py,sha256=5HkW7hpjZd2270RVSFXECLxXg8jCY2iBViDoDA8O2Qs,2782 +nltk/test/unit/test_ribes.py,sha256=DItkydO5d543kRFYiAebnqudiF2HETHrMAntG3H75jA,5204 +nltk/test/unit/test_rte_classify.py,sha256=oNGw78oedct_VpwelsMVFb7v3bRFepnQWWbHgKp3GBQ,2765 +nltk/test/unit/test_seekable_unicode_stream_reader.py,sha256=XBxkic2HcfqxfTY0XxBBBRNEo5FQrYYQzkg1vywwUA0,2265 +nltk/test/unit/test_senna.py,sha256=fuLdpQO7kG-12rWpGprIOiH9fwxhv1yseNxKtpcUmss,3712 +nltk/test/unit/test_stem.py,sha256=kjtoZlKkgtCZYX8kxyVQIPf5f6QSPzUCLkCJLDvDWFA,6347 +nltk/test/unit/test_tag.py,sha256=h7YztNxvYcx2177MkQrPqPgYR2gL1sdl9YB3ZMKuciw,535 +nltk/test/unit/test_tgrep.py,sha256=elx0roGwZJEOJy7-j7cqwAXvzvldlzYkxo8lHDoKf8E,31708 +nltk/test/unit/test_tokenize.py,sha256=9uQx21Vs5Iv5mBmNyiHqTaOmIecSlD1n9jUY9dF1mBM,30921 +nltk/test/unit/test_twitter_auth.py,sha256=bms9DQ07DwEr53IqMr49qGL9ria_1rEf3aA7xt8oR-A,2509 +nltk/test/unit/test_util.py,sha256=UMUTzBJRSSAdFwp7tZkG7gygQ9gHcrk2IEiuq6XvTRA,1888 +nltk/test/unit/test_wordnet.py,sha256=tZCn_lZVJ8POuehMbAIcgV000vCMwXFJUbdhuPEOSmw,9260 +nltk/test/unit/translate/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nltk/test/unit/translate/__pycache__/__init__.cpython-310.pyc,, +nltk/test/unit/translate/__pycache__/test_bleu.cpython-310.pyc,, +nltk/test/unit/translate/__pycache__/test_gdfa.cpython-310.pyc,, +nltk/test/unit/translate/__pycache__/test_ibm1.cpython-310.pyc,, +nltk/test/unit/translate/__pycache__/test_ibm2.cpython-310.pyc,, +nltk/test/unit/translate/__pycache__/test_ibm3.cpython-310.pyc,, +nltk/test/unit/translate/__pycache__/test_ibm4.cpython-310.pyc,, +nltk/test/unit/translate/__pycache__/test_ibm5.cpython-310.pyc,, +nltk/test/unit/translate/__pycache__/test_ibm_model.cpython-310.pyc,, +nltk/test/unit/translate/__pycache__/test_meteor.cpython-310.pyc,, +nltk/test/unit/translate/__pycache__/test_nist.cpython-310.pyc,, +nltk/test/unit/translate/__pycache__/test_stack_decoder.cpython-310.pyc,, +nltk/test/unit/translate/test_bleu.py,sha256=YpYHqzzebFqF8M-x2AjUYeI3YNmRSseJmAf4xoT13XY,15874 +nltk/test/unit/translate/test_gdfa.py,sha256=E4r6o0S2r5rzK7NPFTyDA2a7IAfYzmIhCU4WXB0Wvdo,4770 +nltk/test/unit/translate/test_ibm1.py,sha256=7dGtPK_T9qXGTmB8skOeJ_mNJ2G8VaoYOlqMkhP0fBs,2669 +nltk/test/unit/translate/test_ibm2.py,sha256=fAjggyyMHRzPmpuFdov7dWfwsNM7hK9Z8p_qalCn_lY,3377 +nltk/test/unit/translate/test_ibm3.py,sha256=9PUQNN75ITw_TgzWayxQrMZRNS6pdyNThSw-sTdscP4,4189 +nltk/test/unit/translate/test_ibm4.py,sha256=r1gfJCmXlP0UZhZdrdzc0CcJLZNCn0zrn_BIMH0dVDk,5209 +nltk/test/unit/translate/test_ibm5.py,sha256=20agaTpArhfMcx-Ady0BUXyxayBU_ipPiDTvb8s_1oo,6761 +nltk/test/unit/translate/test_ibm_model.py,sha256=qTMFR4acSkEP5-kta-9B6RymoswEIitV3ljn86riNCo,9676 +nltk/test/unit/translate/test_meteor.py,sha256=sldeMjDkStoMnxBnx1MKDRNBGmcs4Hdu9VmMSzpl1Jo,750 +nltk/test/unit/translate/test_nist.py,sha256=HFfcs5Gq_goyYm-NSqdb_Eet6kClibKNvcr3gdasMmk,1645 +nltk/test/unit/translate/test_stack_decoder.py,sha256=37pm9cVUc-I0789Yt-yUZME9wG6Xrcdzqs0a3lB_8mg,10000 +nltk/test/util.doctest,sha256=BYtTUbvvvKXlM57NfVs-QGe2YqSN3M_Ad40fJNI0go0,1058 +nltk/test/wordnet.doctest,sha256=5VI2tl-FxJ-NCwpMbv5AMvy8vvEoqaLW1hUNu2mzV9A,30528 +nltk/test/wordnet_lch.doctest,sha256=5a80en1DUmUKj9RepGbrpJmPsgvYSFCezlYqh_da9ME,2361 +nltk/test/wsd.doctest,sha256=wa0eAdE0glaOrzkY7VnxIb297tmA46TsuNqdeyHUNR4,3014 +nltk/text.py,sha256=TVmS9X9weLrodN2y_VlHqSYaJi6LOh9U6lpLtyAJU0o,28909 +nltk/tgrep.py,sha256=g3BNjLGCcIrKUnwXRoMd32f-l-8JqF6mUrJfxFKLOUI,37911 +nltk/tokenize/__init__.py,sha256=0qmUpIe6PExgEzV9lXnzeiSfGkHVLoIxVrcZIgBF3FA,5243 +nltk/tokenize/__pycache__/__init__.cpython-310.pyc,, +nltk/tokenize/__pycache__/api.cpython-310.pyc,, +nltk/tokenize/__pycache__/casual.cpython-310.pyc,, +nltk/tokenize/__pycache__/destructive.cpython-310.pyc,, +nltk/tokenize/__pycache__/legality_principle.cpython-310.pyc,, +nltk/tokenize/__pycache__/mwe.cpython-310.pyc,, +nltk/tokenize/__pycache__/nist.cpython-310.pyc,, +nltk/tokenize/__pycache__/punkt.cpython-310.pyc,, +nltk/tokenize/__pycache__/regexp.cpython-310.pyc,, +nltk/tokenize/__pycache__/repp.cpython-310.pyc,, +nltk/tokenize/__pycache__/sexpr.cpython-310.pyc,, +nltk/tokenize/__pycache__/simple.cpython-310.pyc,, +nltk/tokenize/__pycache__/sonority_sequencing.cpython-310.pyc,, +nltk/tokenize/__pycache__/stanford.cpython-310.pyc,, +nltk/tokenize/__pycache__/stanford_segmenter.cpython-310.pyc,, +nltk/tokenize/__pycache__/texttiling.cpython-310.pyc,, +nltk/tokenize/__pycache__/toktok.cpython-310.pyc,, +nltk/tokenize/__pycache__/treebank.cpython-310.pyc,, +nltk/tokenize/__pycache__/util.cpython-310.pyc,, +nltk/tokenize/api.py,sha256=gSsubjy4wvCeoLz2LIToXV7fEbH-ZYuU1ZQKiZAUATc,2357 +nltk/tokenize/casual.py,sha256=38kW21jjDImAPLoCsvoCIHV91U6SqKYKroIzts82f3o,16101 +nltk/tokenize/destructive.py,sha256=JkyvgJ4vbKQ7PKeG5_Ss3LWvYVhCLzrnuNC9mmXO66U,9447 +nltk/tokenize/legality_principle.py,sha256=AIbUBCKtuAvehwOkC0Aa4lGmc8vJoRrNN5hm7wCpfyg,6236 +nltk/tokenize/mwe.py,sha256=lKHLQ-4lwHuhDeg3OJvTnq1xPk2H-Sp78VYL1WRovQ0,4181 +nltk/tokenize/nist.py,sha256=-EXf8gKQOFQmuymmIsslr1RInp4lS20rgXlECRNWbdA,7720 +nltk/tokenize/punkt.py,sha256=KVKagPpsPNYKzGc6a9sPkBMUlHjw5LwZxWr_r4z6hpw,68804 +nltk/tokenize/regexp.py,sha256=4oFnXCBYYwHbl9PLVIRdLIsMoFDKvhUvXUTB0N_Yu10,8331 +nltk/tokenize/repp.py,sha256=0c9syu4dcvFjlFt-bwIIlM2uunhtnAc0rcR_QqfjYJ4,8245 +nltk/tokenize/sexpr.py,sha256=NZazV0MD6VJH30gHaf4Ul4NlJMZ5r5S9ZR8xOROnezw,5302 +nltk/tokenize/simple.py,sha256=Ie7Fhs95ubIHTzb-1Kfnfm0xjRNNsR9Jplu9Ei8v728,5379 +nltk/tokenize/sonority_sequencing.py,sha256=XBiVVAp9f2kCX_bg80r0QI835uF0jtTMUuCUimsqWPQ,7739 +nltk/tokenize/stanford.py,sha256=_p90fpkx6hmIwBtZYsKsXRa0i0OthUZ2kJqgXAkHLRY,3875 +nltk/tokenize/stanford_segmenter.py,sha256=E3Y1HS4Y298DpKFvotlpWf2YMcO7zCs7JrajhugJf1Q,9857 +nltk/tokenize/texttiling.py,sha256=QYDETnGqm4RdimXBZNInhALjzt0Wca8M3mH8aaX1lFU,16943 +nltk/tokenize/toktok.py,sha256=R1eW8VozEtxuwDYjNVn2eWSGSTDvtTKtASMhDvdrdIk,7679 +nltk/tokenize/treebank.py,sha256=ahB5jsQrFeXS-H1Y2Cm5dkkJE91Dva5nWI9Io-Uco2M,16669 +nltk/tokenize/util.py,sha256=iw9hPFtD_oZOVetjcCjh0P6s98u_fjbMDgpxGxn4RGY,10339 +nltk/toolbox.py,sha256=bMXsrkHbgGrP-Ktg2MBLDr-6MJupIlkTRtBhLUwI7tY,18337 +nltk/translate/__init__.py,sha256=a9SOGnQ057m96m5YY8JOP5OIWi353ManWall-EIlxCo,1331 +nltk/translate/__pycache__/__init__.cpython-310.pyc,, +nltk/translate/__pycache__/api.cpython-310.pyc,, +nltk/translate/__pycache__/bleu_score.cpython-310.pyc,, +nltk/translate/__pycache__/chrf_score.cpython-310.pyc,, +nltk/translate/__pycache__/gale_church.cpython-310.pyc,, +nltk/translate/__pycache__/gdfa.cpython-310.pyc,, +nltk/translate/__pycache__/gleu_score.cpython-310.pyc,, +nltk/translate/__pycache__/ibm1.cpython-310.pyc,, +nltk/translate/__pycache__/ibm2.cpython-310.pyc,, +nltk/translate/__pycache__/ibm3.cpython-310.pyc,, +nltk/translate/__pycache__/ibm4.cpython-310.pyc,, +nltk/translate/__pycache__/ibm5.cpython-310.pyc,, +nltk/translate/__pycache__/ibm_model.cpython-310.pyc,, +nltk/translate/__pycache__/meteor_score.cpython-310.pyc,, +nltk/translate/__pycache__/metrics.cpython-310.pyc,, +nltk/translate/__pycache__/nist_score.cpython-310.pyc,, +nltk/translate/__pycache__/phrase_based.cpython-310.pyc,, +nltk/translate/__pycache__/ribes_score.cpython-310.pyc,, +nltk/translate/__pycache__/stack_decoder.cpython-310.pyc,, +nltk/translate/api.py,sha256=SM3sIpzqhMYSFxnOHo1G30y9hkxKrQtE2Ugi1Qln03o,11109 +nltk/translate/bleu_score.py,sha256=YpFNn80ydIWg8iKHgAJ7jgFFkPrkuEMku6m1rC9X_kA,30415 +nltk/translate/chrf_score.py,sha256=C7mEHCk0Jn7QdmxctQImEja-HovmvpPOCfos_UhUH80,8978 +nltk/translate/gale_church.py,sha256=fy4jIbJpZmiJyjVEc_2s1ng6BORp63vG9_HbEToFM6E,8732 +nltk/translate/gdfa.py,sha256=dMWFOM72FZh8d3iuJhyhQZ8KxMfbusSzR0Nr74lyfKQ,6246 +nltk/translate/gleu_score.py,sha256=symY8I6w4SxQDKUN9EyJL6_1Da-4bVT_y-Kd2nnFe38,8831 +nltk/translate/ibm1.py,sha256=MnR2l9vpkyxCAyqGd6ajMUkVxsp5Q6m8Hz--lLzWVMs,9522 +nltk/translate/ibm2.py,sha256=kaGLTJfIrsy3KYj1-72vFYMNtDD8ptJst73mS4NH2nk,12561 +nltk/translate/ibm3.py,sha256=E9aNT2lKkV0I9wwGCvhZPWXk7FUl5VwqV2qPeDUbTck,14154 +nltk/translate/ibm4.py,sha256=1kUdywKq3klNtmId3AHrdYhk-3ptzYlaRw9CH7Jydzk,20765 +nltk/translate/ibm5.py,sha256=H6V--iB46jm9btUyvRSaJ09L2fPCda8xNhbNW9AbtKQ,27957 +nltk/translate/ibm_model.py,sha256=AXy2cgctd8CBV77lih-Yvw2G32xyCO8YMLIGpTugXHU,20504 +nltk/translate/meteor_score.py,sha256=-ZKuCuXxmexx3U-GkpUBvbxNyFEyKZUL16ZIkNnOVYY,17301 +nltk/translate/metrics.py,sha256=qyM4DXkdyRY6OYiHR0M9anTGl129lsHyKIzutfJ-Low,1513 +nltk/translate/nist_score.py,sha256=5n8KyFK_99PcGPGE_l-wkKPbbj9Uy68iE4MZiWEKaxY,8148 +nltk/translate/phrase_based.py,sha256=KBfNqROhEiut1N0C-nFDCivllejA4OUeZ6BLyuMNYTA,7860 +nltk/translate/ribes_score.py,sha256=OGPOh-byCf0R7i2aQYWR-Vh21RJtl9nxQKHEH_YuBto,14027 +nltk/translate/stack_decoder.py,sha256=MuRPezJG4gq2TQsArF0Cm3xn2X6E8FAKSg9BD4Qx4cI,20516 +nltk/tree/__init__.py,sha256=mruLTDldjRtc3viZeVxuUp_QaWW49aRrCSIlFUgln9I,1466 +nltk/tree/__pycache__/__init__.cpython-310.pyc,, +nltk/tree/__pycache__/immutable.cpython-310.pyc,, +nltk/tree/__pycache__/parented.cpython-310.pyc,, +nltk/tree/__pycache__/parsing.cpython-310.pyc,, +nltk/tree/__pycache__/prettyprinter.cpython-310.pyc,, +nltk/tree/__pycache__/probabilistic.cpython-310.pyc,, +nltk/tree/__pycache__/transforms.cpython-310.pyc,, +nltk/tree/__pycache__/tree.cpython-310.pyc,, +nltk/tree/immutable.py,sha256=NOmT_xXNUrk3ct0m7ZMNhqj_kL-91lRdP_ZwTc6bZEc,4178 +nltk/tree/parented.py,sha256=0VrrC0i7eBQuj0Q3H4bGrhWXXzlqvIX-7efnZmXtNTI,23192 +nltk/tree/parsing.py,sha256=IpYHYTBD_VZkp4m0NbC8qmhj9ZiQVI_8x4iJmoe2Hp4,2083 +nltk/tree/prettyprinter.py,sha256=1nybXp3GGP8AF_FxG_v6xrXiQxyvTAt7yseLLZ0QLdI,25586 +nltk/tree/probabilistic.py,sha256=mOnOfXKwE_OeIs4llAStM0vtm4DOqDbWgMOrR42Ni6U,2492 +nltk/tree/transforms.py,sha256=PmprKuO_0pYsSxQPdhnNnJsRmZNOtDYkImwZMoOVfvY,13689 +nltk/tree/tree.py,sha256=euTPBiCu9qVe2_-amu74Ew4SdqxrfIC0itRZkh_r05Q,36500 +nltk/treeprettyprinter.py,sha256=TJU6UHvemo2C_9a0LdTYS6n4ES4QjVf4sQcXq8HapQo,975 +nltk/treetransforms.py,sha256=B_0-bNh4gkTu2dY8E5mt8XQdN97IKFL7LrtOMeASRps,5288 +nltk/twitter/__init__.py,sha256=npOhJzWN63BFf7aPDfUAia76r8uVgVy3ll6ohLKS8fU,819 +nltk/twitter/__pycache__/__init__.cpython-310.pyc,, +nltk/twitter/__pycache__/api.cpython-310.pyc,, +nltk/twitter/__pycache__/common.cpython-310.pyc,, +nltk/twitter/__pycache__/twitter_demo.cpython-310.pyc,, +nltk/twitter/__pycache__/twitterclient.cpython-310.pyc,, +nltk/twitter/__pycache__/util.cpython-310.pyc,, +nltk/twitter/api.py,sha256=yAHSw0JeVRAKJOHvWGqml3U3t7GYaQwhPU3Kf1sRYqw,4692 +nltk/twitter/common.py,sha256=RJbcw3Wvr6YTnQ4YqfWQDN494vKATE_REx-5-9YL5Qg,10120 +nltk/twitter/twitter_demo.py,sha256=dCplglFGLm9I5NgSt3UHVM-NVES2zWWjDnVPRFUONIQ,8309 +nltk/twitter/twitterclient.py,sha256=Vliyz7To9z2IJmUUg6AWcwDS-gNIIF2Yi5fuPfJN0zE,19927 +nltk/twitter/util.py,sha256=Ij7TX3ypt1uYcfo6nSwCbygUpK6_59OhNN3l1ZPefS0,4546 +nltk/util.py,sha256=VijbYZYpM5wjWEsgI0uu-GnR-zJMulfBoDgeJI9QpGw,42026 +nltk/wsd.py,sha256=5Ie3V_RtWQGRsIeL0rMnGGA_9KEGD_9l9GkbkFyYQoA,1789 diff --git a/llmeval-env/lib/python3.10/site-packages/nltk-3.8.1.dist-info/WHEEL b/llmeval-env/lib/python3.10/site-packages/nltk-3.8.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..5bad85fdc1cd08553756d0fb2c7be8b5ad6af7fb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk-3.8.1.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/llmeval-env/lib/python3.10/site-packages/nltk-3.8.1.dist-info/entry_points.txt b/llmeval-env/lib/python3.10/site-packages/nltk-3.8.1.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..65a3a33374385d4199dda32bb7a3284f52653e66 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk-3.8.1.dist-info/entry_points.txt @@ -0,0 +1,3 @@ + +[console_scripts] +nltk=nltk.cli:cli diff --git a/llmeval-env/lib/python3.10/site-packages/nltk-3.8.1.dist-info/top_level.txt b/llmeval-env/lib/python3.10/site-packages/nltk-3.8.1.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..846929621767476398799cbeff31ac8ae954578f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk-3.8.1.dist-info/top_level.txt @@ -0,0 +1 @@ +nltk diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__init__.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4ede9455f70e41740768abe80f3198e78397053f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from .config import AdaptionPromptConfig +from .layer import AdaptedAttention +from .model import AdaptionPromptModel + + +__all__ = ["AdaptionPromptConfig", "AdaptedAttention", "AdaptionPromptModel"] diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..461a5d523b0b66ba459c4c88170a2e716daa9de9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__pycache__/config.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aae1ce59b972058f3ccb8f627790ccecc9c82ac3 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__pycache__/config.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__pycache__/layer.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__pycache__/layer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..86e255103b916b196d902b91a9bf04190f7cf955 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__pycache__/layer.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__pycache__/model.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__pycache__/model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..852aa26cfab0534834b4ede553e403c0070da848 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__pycache__/model.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__pycache__/utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5828c2ac838372b2926d2bb2068646b003122fbb Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__pycache__/utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/config.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/config.py new file mode 100644 index 0000000000000000000000000000000000000000..90e29841498b8821dc6b6602282b66a0d3df6750 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/config.py @@ -0,0 +1,80 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import namedtuple +from dataclasses import dataclass, field + +from peft.config import PeftConfig +from peft.utils import PeftType + +from .utils import llama_compute_query_states + + +@dataclass +class AdaptionPromptConfig(PeftConfig): + """Stores the configuration of an [`AdaptionPromptModel`].""" + + target_modules: str = field( + default=None, metadata={"help": "Name of the attention submodules to insert adaption prompts into."} + ) + adapter_len: int = field(default=None, metadata={"help": "Number of adapter tokens to insert"}) + adapter_layers: int = field(default=None, metadata={"help": "Number of adapter layers (from the top)"}) + + def __post_init__(self): + self.peft_type = PeftType.ADAPTION_PROMPT + + @property + def is_adaption_prompt(self) -> bool: + """Return True if this is an adaption prompt config.""" + return True + + +# Contains the config that is specific to a transformers model type. +ModelTypeConfig = namedtuple( + "ModelTypeConfig", ["compute_query_states", "target_modules", "k_proj_layer", "v_proj_layer", "o_proj_layer"] +) + +# Mapping of transformers model types to their specific configuration. +TRANSFORMERS_MODEL_CONFIG = { + "llama": ModelTypeConfig( + compute_query_states=llama_compute_query_states, + target_modules="self_attn", + k_proj_layer="k_proj", + v_proj_layer="v_proj", + o_proj_layer="o_proj", + ), + "mistral": ModelTypeConfig( # same as llama, + compute_query_states=llama_compute_query_states, + target_modules="self_attn", + k_proj_layer="k_proj", + v_proj_layer="v_proj", + o_proj_layer="o_proj", + ), +} + + +def prepare_config( + peft_config: AdaptionPromptConfig, + model, +) -> AdaptionPromptConfig: + """Prepare the config based on the llama model type.""" + if model.config.model_type not in TRANSFORMERS_MODEL_CONFIG: + raise ValueError("Unsupported model type for adaption prompt: '{model.config.model_type}'.") + + model_config = TRANSFORMERS_MODEL_CONFIG[model.config.model_type] + + if peft_config.target_modules is None: + peft_config.target_modules = model_config.target_modules + + return peft_config diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/layer.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/layer.py new file mode 100644 index 0000000000000000000000000000000000000000..31fb51e0de6a9842d14578c78a5a1aceba676483 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/layer.py @@ -0,0 +1,128 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .config import TRANSFORMERS_MODEL_CONFIG + + +class AdaptedAttention(nn.Module): + """This module wraps a LLamaAttention module and injects adaption prompts.""" + + def __init__(self, model_type: str, adapter_len: int, model): + """ + Initialize object. + + Args: + model_type: The transformer model type. This is used to retrieve the right method to + compute query states. + adapter_len: The length of the adaption prompt to insert. + model: The original transformer attention module that is being wrapped. + """ + assert not isinstance(model, AdaptedAttention) + super().__init__() + self.model_type = model_type + self.model = model + self.adapter_len = adapter_len + # Assume all parameters of the attention model we are wrapping are on the same device. + device = next(model.parameters()).device + # Don't think this was specified in the paper, but we follow the official repo which used an Embedding + # which initializes the tokens with standard normal values. + # https://github.com/ZrrSkywalker/LLaMA-Adapter/blob/41c3546fe1997ab8a65809dc8d8f9252b19d9faf/llama/model.py#L234 + # (bsz, adapter_len, hidden_size) + target_dtype = ( + model.q_proj.weight.dtype if model.q_proj.weight.dtype not in [torch.int8, torch.uint8] else torch.float32 + ) + self.adaption_prompt = nn.Parameter( + torch.empty(1, adapter_len, self.model.hidden_size, device=device, dtype=target_dtype).normal_() + ) + # Initialize the gate to 0 as this is "zero-init". + self.adaption_gate = nn.Parameter(torch.zeros(1, device=device, dtype=target_dtype)) + + def forward(self, **kwargs): + """ + Forward pass for the adapter which wraps the original LlamaAttention module. + + "Official" paper implementation: + https://github.com/ZrrSkywalker/LLaMA-Adapter/blob/41c3546fe1997ab8a65809dc8d8f9252b19d9faf/llama/model.py#L141 + + Args: + kwargs: See the original LlamaAttention module. + """ + if kwargs.get("output_attention", False): + raise NotImplementedError("output_attention is not currently supported.") + + output, _, past_key_value = self.model(**kwargs) + bsz = output.shape[0] + q_len = output.shape[1] + embed_dim = output.shape[2] + k_proj_layer = TRANSFORMERS_MODEL_CONFIG[self.model_type].k_proj_layer + v_proj_layer = TRANSFORMERS_MODEL_CONFIG[self.model_type].v_proj_layer + o_proj_layer = TRANSFORMERS_MODEL_CONFIG[self.model_type].o_proj_layer + factor = ( + self.model.k_proj.in_features // self.model.k_proj.out_features + ) # Mistral has different input and output dimension for k_proj and v_proj layers + + if k_proj_layer == v_proj_layer: + _, key, value = getattr(self.model, k_proj_layer)(self.adaption_prompt).split(embed_dim, dim=2) + else: + key = getattr(self.model, k_proj_layer)(self.adaption_prompt) + value = getattr(self.model, v_proj_layer)(self.adaption_prompt) + + # (bsz, num_key_value_heads, adapter_len, head_dim) + adapter_k = ( + key.view(1, self.adapter_len, (self.model.num_heads // factor), self.model.head_dim) + .repeat(bsz, 1, 1, 1) + .transpose(1, 2) + ) + adapter_v = ( + value.view(1, self.adapter_len, (self.model.num_heads // factor), self.model.head_dim) + .repeat(bsz, 1, 1, 1) + .transpose(1, 2) + ) + # Below is taken from https://github.com/huggingface/transformers/blob/e547458c43dfdbbb8f6a7757237e234c44e20a8f/src/transformers/models/mistral/modeling_mistral.py#L181 + # (bsz, num_heads, adapter_len, head_dim) + adapter_k = torch.repeat_interleave(adapter_k, repeats=factor, dim=1) + adapter_v = torch.repeat_interleave(adapter_v, repeats=factor, dim=1) + # Recompute query states. + compute_query_states = TRANSFORMERS_MODEL_CONFIG[self.model_type].compute_query_states + # (bsz, num_heads, q_len, head_dim) + query_states = compute_query_states(model=self.model, **kwargs) + + previous_dtype = query_states.dtype + + # (bsz, num_heads, q_len, adapter_len) + scores = torch.matmul(query_states, adapter_k.transpose(2, 3).to(previous_dtype)) / math.sqrt( + self.model.head_dim + ) + # Upcast attention to fp32 + # (bsz, num_heads, q_len, adapter_len) + scores = self.adaption_gate * F.softmax(scores, dim=-1, dtype=torch.float32).to(previous_dtype) + # (bsz, q_len, num_heads * head_dim) + adapter_output = torch.matmul(scores, adapter_v).transpose(1, 2).reshape(bsz, q_len, -1) + + # (bsz, q_len, hidden_size) + if o_proj_layer is not None: + adapter_output = getattr(self.model, o_proj_layer)(adapter_output) + + # Add adaption prompt output to original output. + output = output + adapter_output + + # Restore original dtype. + output = output.to(previous_dtype) + return output, None, past_key_value diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/model.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/model.py new file mode 100644 index 0000000000000000000000000000000000000000..08aea27f8efb51c8c2d85be91a2f95659651c701 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/model.py @@ -0,0 +1,161 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Dict, List + +import torch.nn as nn + +from peft.utils import _freeze_adapter, _get_submodules + +from .config import AdaptionPromptConfig, prepare_config +from .layer import AdaptedAttention +from .utils import is_adaption_prompt_trainable + + +class AdaptionPromptModel(nn.Module): + """ + Implements adaption prompts as described in https://arxiv.org/pdf/2303.16199.pdf. + + The top L attention modules are replaced with AdaptedAttention modules that wrap the original ones, but insert + trainable prompts with gates (for zero init). + + Notes on the multi-adapter pattern: + - We store the states of different adapters by keeping a dictionary of AdaptedAttention modules indexed by adapter + name. + - Every time we switch adapters, we remove the modules of the currently active adapter from the model, store them + in the dictionary, and replace them with the modules of the new adapter. + - To avoid duplicated and potentially inconsistent state, the currently active adapter is always removed from the + dictionary. + - Disabling the adapter would also result in the modules being removed from the model. + """ + + def __init__(self, model, configs: Dict, adapter_name: str): + super().__init__() + self.model = model + # Store adapter configs by name. + self.peft_config: Dict[str, AdaptionPromptConfig] = {} + # Store lists of the parents of the affected attention modules by adapter name. + # We keep references to the parents so we can swap the adapters in-and-out of the model. + self._parents: Dict[str, List[nn.Module]] = {} + # Store lists of cached AdaptedAttention modules by name. + self._cached_adapters: Dict[str, List] = {} + # The name of the currently active adapter. + self._active_adapter = None + # Whether the adapter is enabled. + self._enabled = True + self.forward = self.model.forward + self.add_adapter(adapter_name, configs[adapter_name]) + self._mark_only_adaption_prompts_as_trainable(self.model) + + def add_adapter(self, adapter_name: str, config: AdaptionPromptConfig) -> None: + """Add an adapter with the given name and config.""" + config = prepare_config(config, self.model) + if adapter_name in self.peft_config: + raise ValueError(f"Adapter with name '{adapter_name}' already exists.") + + parents = [] + for name, _ in self.model.named_modules(): + if name.endswith(config.target_modules): + par, _, _ = _get_submodules(self.model, name) + parents.append(par) + if len(parents) < config.adapter_layers: + raise ValueError( + f"Config specifies more adapter layers '{config.adapter_layers}'" + f" than the model has '{len(parents)}'." + ) + # Note that if the target modules are not in Sequential, ModuleList, or + # some other PyTorch ordered container, the behavior is undefined as we + # assume here that the order of the modules is the same as the order of + # the transformer decoder layers. + parents = parents[-config.adapter_layers :] + self._parents[adapter_name] = parents + + # It is only None during initialization. + # If it is disabled, we don't have to remove the modules. + if self._active_adapter is not None and self._enabled: + self._remove_adapted_attentions(self._active_adapter) + self._active_adapter = adapter_name + self.peft_config[adapter_name] = config + self._create_adapted_attentions(config, parents) + if not self._enabled: + self._remove_adapted_attentions(self._active_adapter) + + if config.inference_mode: + _freeze_adapter(self.model, adapter_name) + + def set_adapter(self, adapter_name: str) -> None: + """Set the model to use the adapter with the given name.""" + if self._active_adapter == adapter_name: + return + if adapter_name not in self.peft_config: + raise ValueError(f"Adapter with name '{adapter_name}' does not exist.") + + if self._enabled: + self._remove_adapted_attentions(self._active_adapter) + self._set_adapted_attentions(adapter_name) + + self._active_adapter = adapter_name + + def enable_adapter_layers(self): + """Enable adapter layers by swapping in cached AdaptedAttention modules.""" + self._enabled = True + self._set_adapted_attentions(self._active_adapter) + + def disable_adapter_layers(self): + """Disable adapter layers by swapping out AdaptedAttention modules.""" + self._enabled = False + self._remove_adapted_attentions(self._active_adapter) + + def _create_adapted_attentions(self, config: AdaptionPromptConfig, parents: List[nn.Module]) -> None: + """Wrap LlamaAttention modules with newly created AdaptedAttention modules.""" + for par in parents: + attn = AdaptedAttention( + model_type=self.model.config.model_type, + adapter_len=config.adapter_len, + model=getattr(par, config.target_modules), + ) + setattr(par, config.target_modules, attn) + + def _set_adapted_attentions(self, adapter_name: str) -> None: + """Replace LlamaAttention modules with cached AdaptedAttention modules.""" + cached = self._cached_adapters[adapter_name] + del self._cached_adapters[adapter_name] + config = self.peft_config[adapter_name] + for i, par in enumerate(self._parents[adapter_name]): + setattr(par, config.target_modules, cached[i]) + + def _remove_adapted_attentions(self, adapter_name: str) -> None: + """Remove AdaptedAttention modules from the model and store them in the cache.""" + config = self.peft_config[adapter_name] + adapted_attentions = [] + for par in self._parents[adapter_name]: + attn = getattr(par, config.target_modules) + adapted_attentions.append(attn) + setattr(par, config.target_modules, attn.model) + self._cached_adapters[adapter_name] = adapted_attentions + + def _mark_only_adaption_prompts_as_trainable(self, model: nn.Module) -> None: + """Freeze all parameters of the model except the adaption prompts.""" + for n, p in model.named_parameters(): + if not is_adaption_prompt_trainable(n): + p.requires_grad = False + + def __getattr__(self, name: str): + """Forward missing attributes to the wrapped module.""" + try: + return super().__getattr__(name) # defer to nn.Module's logic + except AttributeError: + # This is necessary as e.g. causal models have various methods that we + # don't want to re-implement here. + return getattr(self.model, name) diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/utils.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8f15d89f31aa0c92c7305897850342d3929292a6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/utils.py @@ -0,0 +1,121 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import inspect + +import torch +import torch.nn as nn + + +def llama_rotate_half(x: torch.Tensor) -> torch.Tensor: + """ + Rotate half the hidden dims of the input. + + This function was duplicated verbatim from: + https://github.com/huggingface/transformers/blob/1de8ce9ee1191ba761a593ac15d9ccbf5851bfc5/src/transformers/models/llama/modeling_llama.py#L126 + + This was done to eliminate the Llama transformers implementation as a dependency of this file. Note that some other + functions were also adapted from the transformers implementation but were modified. + """ + x1 = x[..., : x.shape[-1] // 2] + x2 = x[..., x.shape[-1] // 2 :] + return torch.cat((-x2, x1), dim=-1) + + +def llama_apply_rotary_pos_emb(q, cos, sin, position_ids): + """ + Apply rotary position embedding to query states in the Llama model. + + This function was adapted from: + https://github.com/huggingface/transformers/blob/1de8ce9ee1191ba761a593ac15d9ccbf5851bfc5/src/transformers/models/llama/modeling_llama.py#L133 + + It was modified to remove unnecessary processing of key states. The method is compatible with transformers <= + 4.34.2 and also with the latest version (>=4.35). + """ + # In previous transformers version cos/sin cached had a shape of 4D + if len(cos.shape) == 4: + gather_indices = position_ids[:, None, :, None] # [bs, 1, seq_len, 1] + gather_indices = gather_indices.repeat(1, cos.shape[1], 1, cos.shape[3]) + cos = torch.gather(cos.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices) + sin = torch.gather(sin.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices) + # In the new version, it is 2D so we fall back to the new implementation + # https://github.com/huggingface/transformers/blame/eef7ea98c31a333bacdc7ae7a2372bde772be8e4/src/transformers/models/llama/modeling_llama.py#L222-L226 + else: + cos = cos[position_ids].unsqueeze(1) + sin = sin[position_ids].unsqueeze(1) + q_embed = (q * cos) + (llama_rotate_half(q) * sin) + return q_embed + + +def llama_compute_query_states(model: nn.Module, **kwargs) -> torch.Tensor: + """ + Compute query states for Llama models specifically. They need to be recomputed as the forward() method of the + original LlamaModel in the transformers library does not return them. See the related discussion in the PR: + https://github.com/huggingface/peft/pull/268 + """ + hidden_states = kwargs.get("hidden_states") + position_ids = kwargs.get("position_ids") + past_key_value = kwargs.get("past_key_value") + bsz, q_len, _ = hidden_states.size() + query_states = model.q_proj(hidden_states).view(bsz, q_len, model.num_heads, model.head_dim).transpose(1, 2) + + factor = model.k_proj.in_features // model.k_proj.out_features + value_states = ( + model.v_proj(hidden_states).view(bsz, q_len, (model.num_heads // factor), model.head_dim).transpose(1, 2) + ) + + seq_len = q_len + + if past_key_value is not None: + if isinstance(past_key_value, tuple): + # for transformers <= 4.35 + seq_len += past_key_value[0].shape[-2] + else: + # since transformers 4.36, this is a DynamicCache instance + seq_len += past_key_value.get_seq_length(model.layer_idx) + + # For transformers > 4.37.2 `position_ids` became a required arguments in the rotary embedding's forward pass. + if "position_ids" not in inspect.signature(model.rotary_emb.forward).parameters: + # TODO we assume that position_ids is not None here, not sure if that is safe but the old code also did that + cos, sin = model.rotary_emb(value_states, seq_len=seq_len) + return llama_apply_rotary_pos_emb(query_states, cos, sin, position_ids) + + past_seen_tokens = 0 + if position_ids is None: + # Compute position_ids, since they are required for transformers > 4.37.2 + if past_key_value is None: + new_cache_positions = torch.arange(q_len, q_len + q_len, device=value_states.device) + else: + past_seen_tokens = past_key_value.get_usable_length(q_len, model.layer_idx) + new_cache_positions = torch.arange(past_seen_tokens, past_seen_tokens + q_len, device=value_states.device) + position_ids = new_cache_positions.unsqueeze(0) + + rotary_emb_kwargs = {"position_ids": position_ids} + # The `seq_len` argument has been officially removed in transformers >= 4.39.0 + if "seq_len" in inspect.signature(model.rotary_emb.forward).parameters: + rotary_emb_kwargs["seq_len"] = q_len + past_seen_tokens + + cos, sin = model.rotary_emb(value_states, **rotary_emb_kwargs) + + # For batched inference unsqueeze it on the correct dim + # since: https://github.com/huggingface/transformers/pull/29109 + if len(cos.shape) == 3: + cos = cos.unsqueeze(1) + sin = sin.unsqueeze(1) + + return (query_states * cos) + (llama_rotate_half(query_states) * sin) + + +def is_adaption_prompt_trainable(params: str) -> bool: + """Return True if module is trainable under adaption prompt fine-tuning.""" + return params.split(".")[-1].startswith("adaption_") diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/lokr/__init__.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lokr/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..408cf2a54ae4c0befa9e3f1cad4ff93d71cfedc5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lokr/__init__.py @@ -0,0 +1,20 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .config import LoKrConfig +from .layer import Conv2d, Linear, LoKrLayer +from .model import LoKrModel + + +__all__ = ["LoKrConfig", "LoKrModel", "Conv2d", "Linear", "LoKrLayer"] diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/lokr/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lokr/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0e7452f777da7672d4378a80e000a7067a45069 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lokr/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/lokr/__pycache__/config.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lokr/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e379674c232f4d743908e0b33478329f7a3e647e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lokr/__pycache__/config.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/lokr/__pycache__/layer.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lokr/__pycache__/layer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ef110cc39e48ce7ec2f74a547a48d9b4a732bb7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lokr/__pycache__/layer.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/lokr/__pycache__/model.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lokr/__pycache__/model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..229f29ccd01d89136573439218fae53560e3eb5e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lokr/__pycache__/model.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/lokr/config.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lokr/config.py new file mode 100644 index 0000000000000000000000000000000000000000..c8d60a7463c59e114e42658965ac7c81f3fb563e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lokr/config.py @@ -0,0 +1,127 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass, field +from typing import List, Optional, Union + +from peft.tuners.lycoris_utils import LycorisConfig +from peft.utils import PeftType + + +@dataclass +class LoKrConfig(LycorisConfig): + """ + Configuration class of [`LoKrModel`]. + + Args: + r (`int`): + LoKr rank. + alpha (`int`): + The alpha parameter for LoKr scaling. + rank_dropout (`float`): + The dropout probability for rank dimension during training. + module_dropout (`float`): + The dropout probability for disabling LoKr modules during training. + use_effective_conv2d (`bool`): + Use parameter effective decomposition for Conv2d with ksize > 1 ("Proposition 3" from FedPara paper). + decompose_both (`bool`): + Perform rank decomposition of left kronecker product matrix. + decompose_factor (`int`): + Kronecker product decomposition factor. + target_modules (`Optional[Union[List[str], str]]`): + The names of the modules to apply the adapter to. If this is specified, only the modules with the specified + names will be replaced. When passing a string, a regex match will be performed. When passing a list of + strings, either an exact match will be performed or it is checked if the name of the module ends with any + of the passed strings. If this is specified as 'all-linear', then all linear/Conv1D modules are chosen, + excluding the output layer. If this is not specified, modules will be chosen according to the model + architecture. If the architecture is not known, an error will be raised -- in this case, you should specify + the target modules manually. + init_weights (`bool`): + Whether to perform initialization of adapter weights. This defaults to `True`, passing `False` is + discouraged. + layers_to_transform (`Union[List[int], int]`): + The layer indices to transform. If a list of ints is passed, it will apply the adapter to the layer indices + that are specified in this list. If a single integer is passed, it will apply the transformations on the + layer at this index. + layers_pattern (`str`): + The layer pattern name, used only if `layers_to_transform` is different from `None`. + rank_pattern (`dict`): + The mapping from layer names or regexp expression to ranks which are different from the default rank + specified by `r`. + alpha_pattern (`dict`): + The mapping from layer names or regexp expression to alphas which are different from the default alpha + specified by `alpha`. + modules_to_save (`Optional[List[str]]`): + List of modules apart from adapter layers to be set as trainable and saved in the final checkpoint. + """ + + r: int = field(default=8, metadata={"help": "LoKr rank"}) + alpha: int = field(default=8, metadata={"help": "LoKr alpha"}) + rank_dropout: float = field( + default=0.0, metadata={"help": "The dropout probability for rank dimension during training"} + ) + module_dropout: float = field( + default=0.0, metadata={"help": "The dropout probability for disabling LoKr modules during training"} + ) + use_effective_conv2d: bool = field( + default=False, + metadata={ + "help": 'Use parameter effective decomposition for Conv2d 3x3 with ksize > 1 ("Proposition 3" from FedPara paper)' + }, + ) + decompose_both: bool = field( + default=False, + metadata={"help": "Perform rank decomposition of left kronecker product matrix."}, + ) + decompose_factor: int = field(default=-1, metadata={"help": "Kronecker product decomposition factor."}) + target_modules: Optional[Union[List[str], str]] = field( + default=None, + metadata={ + "help": "List of module names or regex expression of the module names to replace with LoKr." + "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' " + "This can also be a wildcard 'all-linear' which matches all linear/Conv1D layers except the output layer." + }, + ) + init_weights: bool = field( + default=True, + metadata={ + "help": ( + "Whether to initialize the weights of the LoKr layers with their default initialization. Don't change " + "this setting, except if you know exactly what you're doing." + ), + }, + ) + layers_to_transform: Optional[Union[List[int], int]] = field( + default=None, + metadata={ + "help": "The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index." + }, + ) + layers_pattern: Optional[str] = field( + default=None, + metadata={ + "help": "The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern." + }, + ) + modules_to_save: Optional[List[str]] = field( + default=None, + metadata={ + "help": "List of modules apart from LoKr layers to be set as trainable and saved in the final checkpoint. " + "For example, in Sequence Classification or Token Classification tasks, " + "the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved." + }, + ) + + def __post_init__(self): + self.peft_type = PeftType.LOKR diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/lokr/layer.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lokr/layer.py new file mode 100644 index 0000000000000000000000000000000000000000..28e4e5ca61bc4b6826d740fb6e1b77b583f891a7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lokr/layer.py @@ -0,0 +1,409 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from typing import Any, Optional, Set, Tuple, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from peft.tuners.lycoris_utils import LycorisLayer + + +class LoKrLayer(nn.Module, LycorisLayer): + # All names of layers that may contain adapter weights + adapter_layer_names = ( + "lokr_w1", + "lokr_w1_a", + "lokr_w1_b", + "lokr_w2", + "lokr_w2_a", + "lokr_w2_b", + "lokr_t2", + ) + # other_param_names is defined on parent class + + def __init__(self, base_layer: nn.Module) -> None: + super().__init__() + LycorisLayer.__init__(self, base_layer) + + # LoKr info + self.lokr_w1 = nn.ParameterDict({}) + self.lokr_w1_a = nn.ParameterDict({}) + self.lokr_w1_b = nn.ParameterDict({}) + self.lokr_w2 = nn.ParameterDict({}) + self.lokr_w2_a = nn.ParameterDict({}) + self.lokr_w2_b = nn.ParameterDict({}) + self.lokr_t2 = nn.ParameterDict({}) + + @property + def _available_adapters(self) -> Set[str]: + return { + *self.lokr_w1, + *self.lokr_w1_a, + *self.lokr_w1_b, + *self.lokr_w2, + *self.lokr_w2_a, + *self.lokr_w2_b, + *self.lokr_t2, + } + + def create_adapter_parameters( + self, + adapter_name: str, + r: int, + shape, + use_w1: bool, + use_w2: bool, + use_effective_conv2d: bool, + ): + if use_w1: + self.lokr_w1[adapter_name] = nn.Parameter(torch.empty(shape[0][0], shape[1][0])) + else: + self.lokr_w1_a[adapter_name] = nn.Parameter(torch.empty(shape[0][0], r)) + self.lokr_w1_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1][0])) + + if len(shape) == 4: + # Conv2d + if use_w2: + self.lokr_w2[adapter_name] = nn.Parameter(torch.empty(shape[0][1], shape[1][1], *shape[2:])) + elif use_effective_conv2d: + self.lokr_t2[adapter_name] = nn.Parameter(torch.empty(r, r, shape[2], shape[3])) + self.lokr_w2_a[adapter_name] = nn.Parameter(torch.empty(r, shape[0][1])) # b, 1-mode + self.lokr_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1][1])) # d, 2-mode + else: + self.lokr_w2_a[adapter_name] = nn.Parameter(torch.empty(shape[0][1], r)) + self.lokr_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1][1] * shape[2] * shape[3])) + else: + # Linear + if use_w2: + self.lokr_w2[adapter_name] = nn.Parameter(torch.empty(shape[0][1], shape[1][1])) + else: + self.lokr_w2_a[adapter_name] = nn.Parameter(torch.empty(shape[0][1], r)) + self.lokr_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1][1])) + + def reset_adapter_parameters(self, adapter_name: str): + if adapter_name in self.lokr_w1: + nn.init.zeros_(self.lokr_w1[adapter_name]) + else: + nn.init.zeros_(self.lokr_w1_a[adapter_name]) + nn.init.kaiming_uniform_(self.lokr_w1_b[adapter_name], a=math.sqrt(5)) + + if adapter_name in self.lokr_w2: + nn.init.kaiming_uniform_(self.lokr_w2[adapter_name], a=math.sqrt(5)) + else: + nn.init.kaiming_uniform_(self.lokr_w2_a[adapter_name], a=math.sqrt(5)) + nn.init.kaiming_uniform_(self.lokr_w2_b[adapter_name], a=math.sqrt(5)) + + if adapter_name in self.lokr_t2: + nn.init.kaiming_uniform_(self.lokr_t2[adapter_name], a=math.sqrt(5)) + + def reset_adapter_parameters_random(self, adapter_name: str): + if adapter_name in self.lokr_w1: + nn.init.kaiming_uniform_(self.lokr_w1[adapter_name], a=math.sqrt(5)) + else: + nn.init.kaiming_uniform_(self.lokr_w1_a[adapter_name], a=math.sqrt(5)) + nn.init.kaiming_uniform_(self.lokr_w1_b[adapter_name], a=math.sqrt(5)) + + if adapter_name in self.lokr_w2: + nn.init.kaiming_uniform_(self.lokr_w2[adapter_name], a=math.sqrt(5)) + else: + nn.init.kaiming_uniform_(self.lokr_w2_a[adapter_name], a=math.sqrt(5)) + nn.init.kaiming_uniform_(self.lokr_w2_b[adapter_name], a=math.sqrt(5)) + + if adapter_name in self.lokr_t2: + nn.init.kaiming_uniform_(self.lokr_t2[adapter_name], a=math.sqrt(5)) + + def update_layer( + self, + adapter_name: str, + r: int, + alpha: float, + rank_dropout: float, + module_dropout: float, + init_weights: bool, + use_effective_conv2d: bool, + decompose_both: bool, + decompose_factor: int, + **kwargs, + ) -> None: + """Internal function to create lokr adapter + + Args: + adapter_name (`str`): Name for the adapter to add. + r (`int`): Rank for the added adapter. + alpha (`float`): Alpha for the added adapter. + rank_dropout (`float`): The dropout probability for rank dimension during training + module_dropout (`float`): The dropout probability for disabling adapter during training. + init_weights (`bool`): Whether to initialize adapter weights. + use_effective_conv2d (`bool`): Use parameter effective decomposition for Conv2d with ksize > 1. + decompose_both (`bool`): Perform rank decomposition of left kronecker product matrix. + decompose_factor (`int`): Kronecker product decomposition factor. + """ + if r <= 0: + raise ValueError(f"`r` should be a positive integer value but the value passed is {r}") + + self.r[adapter_name] = r + self.alpha[adapter_name] = alpha + self.scaling[adapter_name] = alpha / r + self.rank_dropout[adapter_name] = rank_dropout + self.module_dropout[adapter_name] = module_dropout + base_layer = self.get_base_layer() + + # Determine shape of LoKr weights + if isinstance(base_layer, nn.Linear): + in_dim, out_dim = base_layer.in_features, base_layer.out_features + + in_m, in_n = factorization(in_dim, decompose_factor) + out_l, out_k = factorization(out_dim, decompose_factor) + shape = ((out_l, out_k), (in_m, in_n)) # ((a, b), (c, d)), out_dim = a*c, in_dim = b*d + + use_w1 = not (decompose_both and r < max(shape[0][0], shape[1][0]) / 2) + use_w2 = not (r < max(shape[0][1], shape[1][1]) / 2) + use_effective_conv2d = False + elif isinstance(base_layer, nn.Conv2d): + in_dim, out_dim = base_layer.in_channels, base_layer.out_channels + k_size = base_layer.kernel_size + + in_m, in_n = factorization(in_dim, decompose_factor) + out_l, out_k = factorization(out_dim, decompose_factor) + shape = ((out_l, out_k), (in_m, in_n), *k_size) # ((a, b), (c, d), *k_size) + + use_w1 = not (decompose_both and r < max(shape[0][0], shape[1][0]) / 2) + use_w2 = r >= max(shape[0][1], shape[1][1]) / 2 + use_effective_conv2d = use_effective_conv2d and base_layer.kernel_size != (1, 1) + else: + raise TypeError(f"LoKr is not implemented for base layers of type {type(base_layer).__name__}") + + # Create weights with provided shape + self.create_adapter_parameters(adapter_name, r, shape, use_w1, use_w2, use_effective_conv2d) + + # Initialize weights + if init_weights: + self.reset_adapter_parameters(adapter_name) + else: + self.reset_adapter_parameters_random(adapter_name) + + # Move new weights to device + weight = getattr(self.get_base_layer(), "weight", None) + if weight is not None: + # the layer is already completely initialized, this is an update + if weight.dtype.is_floating_point or weight.dtype.is_complex: + self.to(weight.device, dtype=weight.dtype) + else: + self.to(weight.device) + self.set_adapter(self.active_adapters) + + def get_delta_weight(self, adapter_name: str) -> torch.Tensor: + # https://github.com/KohakuBlueleaf/LyCORIS/blob/e4259b870d3354a9615a96be61cb5d07455c58ea/lycoris/modules/lokr.py#L224 + if adapter_name in self.lokr_w1: + w1 = self.lokr_w1[adapter_name] + else: + w1 = self.lokr_w1_a[adapter_name] @ self.lokr_w1_b[adapter_name] + + if adapter_name in self.lokr_w2: + w2 = self.lokr_w2[adapter_name] + elif adapter_name in self.lokr_t2: + w2 = make_weight_cp(self.lokr_t2[adapter_name], self.lokr_w2_a[adapter_name], self.lokr_w2_b[adapter_name]) + else: + w2 = self.lokr_w2_a[adapter_name] @ self.lokr_w2_b[adapter_name] + + # Make weights with Kronecker product + weight = make_kron(w1, w2) + weight = weight.reshape(self.get_base_layer().weight.shape) + + # Perform rank dropout during training - drop rows of addition weights + rank_dropout = self.rank_dropout[adapter_name] + if self.training and rank_dropout: + drop = (torch.rand(weight.size(0)) > rank_dropout).float() + drop = drop.view(-1, *[1] * len(weight.shape[1:])).to(weight.device) + drop /= drop.mean() + weight *= drop + + return weight + + def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: + previous_dtype = x.dtype + + if self.disable_adapters: + if self.merged: + self.unmerge() + result = self.base_layer(x, *args, **kwargs) + elif self.merged: + result = self.base_layer(x, *args, **kwargs) + else: + result = self.base_layer(x, *args, **kwargs) + + # Execute all the adapters + for active_adapter in self.active_adapters: + if active_adapter not in self._available_adapters: + continue + + module_dropout = self.module_dropout[active_adapter] + + # Modify current execution weights + if (not self.training) or (self.training and torch.rand(1) > module_dropout): + result = result + self._get_delta_activations(active_adapter, x, *args, **kwargs) + + result = result.to(previous_dtype) + return result + + +class Linear(LoKrLayer): + """LoKr implemented in Linear layer""" + + def __init__( + self, + base_layer: nn.Module, + device: Optional[Union[str, torch.device]] = None, + dtype: Optional[torch.dtype] = None, + adapter_name: str = "default", + r: int = 0, + alpha: float = 0.0, + rank_dropout: float = 0.0, + module_dropout: float = 0.0, + init_weights: bool = True, + **kwargs, + ): + super().__init__(base_layer) + + # Create adapter and set it active + self._active_adapter = adapter_name + self.update_layer(adapter_name, r, alpha, rank_dropout, module_dropout, init_weights, **kwargs) + + def _get_delta_activations( + self, adapter_name: str, input: torch.Tensor, *args: Any, **kwargs: Any + ) -> torch.Tensor: + delta_weight = self.get_delta_weight(adapter_name) + # don't add bias here, because the bias is already included in the output of the base_layer + return F.linear(input, delta_weight) + + def __repr__(self) -> str: + rep = super().__repr__() + return "lokr." + rep + + +class Conv2d(LoKrLayer): + """LoKr implemented in Conv2d layer""" + + def __init__( + self, + base_layer: nn.Module, + device: Optional[Union[str, torch.device]] = None, + dtype: Optional[torch.dtype] = None, + adapter_name: str = "default", + r: int = 0, + alpha: float = 0.0, + rank_dropout: float = 0.0, + module_dropout: float = 0.0, + use_effective_conv2d: bool = False, + init_weights: bool = True, + **kwargs, + ): + super().__init__(base_layer) + + # Create adapter and set it active + self._active_adapter = adapter_name + self.update_layer( + adapter_name, r, alpha, rank_dropout, module_dropout, init_weights, use_effective_conv2d, **kwargs + ) + + def _get_delta_activations( + self, adapter_name: str, input: torch.Tensor, *args: Any, **kwargs: Any + ) -> torch.Tensor: + delta_weight = self.get_delta_weight(adapter_name) + # don't add bias here, because the bias is already included in the output of the base_layer + base_layer = self.get_base_layer() + return F.conv2d( + input, + delta_weight, + stride=base_layer.stride, + padding=base_layer.padding, + dilation=base_layer.dilation, + groups=base_layer.groups, + ) + + def __repr__(self) -> str: + rep = super().__repr__() + return "lokr." + rep + + +# Below code is a direct copy from https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/lokr.py#L11 + + +def factorization(dimension: int, factor: int = -1) -> Tuple[int, int]: + """Factorizes the provided number into the product of two numbers + + Args: + dimension (`int`): The number that needs to be factorized. + factor (`int`, optional): + Factorization divider. The algorithm will try to output two numbers, one of each will be as close to the + factor as possible. If -1 is provided, the decomposition algorithm would try to search dividers near the + square root of the dimension. Defaults to -1. + + Returns: + Tuple[`int`, `int`]: A tuple of two numbers, whose product is equal to the provided number. The first number is + always less than or equal to the second. + + Example: + ```py + >>> factorization(256, factor=-1) + (16, 16) + + >>> factorization(128, factor=-1) + (8, 16) + + >>> factorization(127, factor=-1) + (1, 127) + + >>> factorization(128, factor=4) + (4, 32) + ``` + """ + + if factor > 0 and (dimension % factor) == 0: + m = factor + n = dimension // factor + return m, n + if factor == -1: + factor = dimension + m, n = 1, dimension + length = m + n + while m < n: + new_m = m + 1 + while dimension % new_m != 0: + new_m += 1 + new_n = dimension // new_m + if new_m + new_n > length or new_m > factor: + break + else: + m, n = new_m, new_n + if m > n: + n, m = m, n + return m, n + + +def make_weight_cp(t, wa, wb): + rebuild2 = torch.einsum("i j k l, i p, j r -> p r k l", t, wa, wb) # [c, d, k1, k2] + return rebuild2 + + +def make_kron(w1, w2, scale=1.0): + if len(w2.shape) == 4: + w1 = w1.unsqueeze(2).unsqueeze(2) + w2 = w2.contiguous() + rebuild = torch.kron(w1, w2) + + return rebuild * scale diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/lokr/model.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lokr/model.py new file mode 100644 index 0000000000000000000000000000000000000000..eecad8dd13d8f637ea8e6da2377473f314fa6aac --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lokr/model.py @@ -0,0 +1,115 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +from itertools import chain +from typing import Dict, Type, Union + +import torch +from torch import nn + +from peft.tuners.lycoris_utils import LycorisConfig, LycorisTuner + +from .layer import Conv2d, Linear, LoKrLayer + + +class LoKrModel(LycorisTuner): + """ + Creates Low-Rank Kronecker Product model from a pretrained model. The original method is partially described in + https://arxiv.org/abs/2108.06098 and in https://arxiv.org/abs/2309.14859 Current implementation heavily borrows + from + https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/lokr.py + + Args: + model (`torch.nn.Module`): The model to which the adapter tuner layers will be attached. + config ([`LoKrConfig`]): The configuration of the LoKr model. + adapter_name (`str`): The name of the adapter, defaults to `"default"`. + + Returns: + `torch.nn.Module`: The LoKr model. + + Example: + ```py + >>> from diffusers import StableDiffusionPipeline + >>> from peft import LoKrModel, LoKrConfig + + >>> config_te = LoKrConfig( + ... r=8, + ... lora_alpha=32, + ... target_modules=["k_proj", "q_proj", "v_proj", "out_proj", "fc1", "fc2"], + ... rank_dropout=0.0, + ... module_dropout=0.0, + ... init_weights=True, + ... ) + >>> config_unet = LoKrConfig( + ... r=8, + ... lora_alpha=32, + ... target_modules=[ + ... "proj_in", + ... "proj_out", + ... "to_k", + ... "to_q", + ... "to_v", + ... "to_out.0", + ... "ff.net.0.proj", + ... "ff.net.2", + ... ], + ... rank_dropout=0.0, + ... module_dropout=0.0, + ... init_weights=True, + ... use_effective_conv2d=True, + ... ) + + >>> model = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") + >>> model.text_encoder = LoKrModel(model.text_encoder, config_te, "default") + >>> model.unet = LoKrModel(model.unet, config_unet, "default") + ``` + + **Attributes**: + - **model** ([`~torch.nn.Module`]) -- The model to be adapted. + - **peft_config** ([`LoKrConfig`]): The configuration of the LoKr model. + """ + + prefix: str = "lokr_" + layers_mapping: Dict[Type[torch.nn.Module], Type[LoKrLayer]] = { + torch.nn.Conv2d: Conv2d, + torch.nn.Linear: Linear, + } + + def _create_and_replace( + self, + config: LycorisConfig, + adapter_name: str, + target: Union[LoKrLayer, nn.Module], + target_name: str, + parent: nn.Module, + current_key: str, + ) -> None: + """ + A private method to create and replace the target module with the adapter module. + """ + + # Regexp matching - Find key which matches current target_name in patterns provided + pattern_keys = list(chain(config.rank_pattern.keys(), config.alpha_pattern.keys())) + target_name_key = next(filter(lambda key: re.match(rf"(.*\.)?{key}$", current_key), pattern_keys), target_name) + + kwargs = config.to_dict() + kwargs["r"] = config.rank_pattern.get(target_name_key, config.r) + kwargs["alpha"] = config.alpha_pattern.get(target_name_key, config.alpha) + + if isinstance(target, LoKrLayer): + target.update_layer(adapter_name, **kwargs) + else: + new_module = self._create_new_module(config, adapter_name, target, **kwargs) + self._replace_module(parent, target_name, new_module, target) diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__init__.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3115fff724b9e37661f001cd809f6c1005fdc337 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__init__.py @@ -0,0 +1,37 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from peft.import_utils import is_bnb_4bit_available, is_bnb_available + +from .config import LoftQConfig, LoraConfig +from .gptq import QuantLinear +from .layer import Conv2d, Embedding, Linear, LoraLayer +from .model import LoraModel + + +__all__ = ["LoraConfig", "LoftQConfig", "Conv2d", "Embedding", "LoraLayer", "Linear", "LoraModel", "QuantLinear"] + + +def __getattr__(name): + if (name == "Linear8bitLt") and is_bnb_available(): + from .bnb import Linear8bitLt + + return Linear8bitLt + + if (name == "Linear4bit") and is_bnb_4bit_available(): + from .bnb import Linear4bit + + return Linear4bit + + raise AttributeError(f"module {__name__} has no attribute {name}") diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..21a3027a606da0fce3ab609bd377eda5e10e6896 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/aqlm.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/aqlm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c68857cfb8211a0807e78aa1fad697fd0ac0f934 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/aqlm.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/awq.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/awq.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d3a0c5fc02c29c2606daf7574802e5923fd843e2 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/awq.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/bnb.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/bnb.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb8d454200f8b0427fbe0a6544b0589bb66c49e9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/bnb.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/gptq.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/gptq.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..587e180a8a8cf483c1ebcab8f75c9f14d000caf6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/gptq.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/layer.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/layer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f93e0a274e9ab9c557db4e608df3131c7cd9832 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/layer.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/model.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc99c9ad08f5c8f340782aa18cb4332964bee29c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/model.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/tp_layer.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/tp_layer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb59048c29acb32a45ce54907f1909c72892d033 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/tp_layer.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/gptq.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/gptq.py new file mode 100644 index 0000000000000000000000000000000000000000..333dfa6feb7595e185ae81f540aaa18fc1f2233a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/gptq.py @@ -0,0 +1,114 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any, Optional + +import torch + +from peft.tuners.lora.layer import LoraLayer +from peft.tuners.tuners_utils import BaseTunerLayer +from peft.utils import get_auto_gptq_quant_linear + + +class QuantLinear(torch.nn.Module, LoraLayer): + def __init__( + self, + base_layer, + adapter_name: str, + r: int = 0, + lora_alpha: int = 1, + lora_dropout: float = 0.0, + init_lora_weights: bool = True, + use_rslora: bool = False, + use_dora: bool = False, + **kwargs, + ): + super().__init__() + LoraLayer.__init__(self, base_layer) + + if use_dora: + raise ValueError(f"{self.__class__.__name__} does not support DoRA yet, please set it to False") + + # self.base_layer and self.quant_linear_module are the same; we need the former for consistency and the latter + # for backwards compatibility + self.quant_linear_module = base_layer + self._active_adapter = adapter_name + self.update_layer( + adapter_name, + r, + lora_alpha=lora_alpha, + lora_dropout=lora_dropout, + init_lora_weights=init_lora_weights, + use_rslora=use_rslora, + use_dora=use_dora, + ) + + def forward(self, x: torch.Tensor): + # note: logic differs from default Linear because merging is not supported + result = self.quant_linear_module(x) + + if self.disable_adapters: + return result + + for active_adapter in self.active_adapters: + if active_adapter not in self.lora_A.keys(): + continue + lora_A = self.lora_A[active_adapter] + lora_B = self.lora_B[active_adapter] + dropout = self.lora_dropout[active_adapter] + scaling = self.scaling[active_adapter] + + requires_conversion = not torch.is_autocast_enabled() + if requires_conversion: + expected_dtype = result.dtype + x = x.to(lora_A.weight.dtype) + + output = lora_B(lora_A(dropout(x))) + if requires_conversion: + output = output.to(expected_dtype) + output = output * scaling + result += output + return result + + def __repr__(self) -> str: + rep = super().__repr__() + return "lora." + rep + + # TODO: Check if it is better as suggested by users https://github.com/PanQiWei/AutoGPTQ/pull/102 + # def reset_lora_parameters(self, adapter_name): + # if adapter_name in self.lora_A.keys(): + # torch.nn.init.xavier_uniform_(self.lora_A[adapter_name].weight) + # torch.nn.init.zeros_(self.lora_B[adapter_name].weight) + + +def dispatch_gptq( + target: torch.nn.Module, + adapter_name: str, + **kwargs: Any, +) -> Optional[torch.nn.Module]: + new_module = None + + if isinstance(target, BaseTunerLayer): + target_base_layer = target.get_base_layer() + else: + target_base_layer = target + + gptq_quantization_config = kwargs.get("gptq_quantization_config", None) + AutoGPTQQuantLinear = get_auto_gptq_quant_linear(gptq_quantization_config) + + if AutoGPTQQuantLinear is not None and isinstance(target_base_layer, AutoGPTQQuantLinear): + new_module = QuantLinear(target, adapter_name, **kwargs) + target.qweight = target_base_layer.qweight + + return new_module diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/p_tuning/__init__.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/p_tuning/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7dd3a6ba3e4442354302c5bfe3da75f1d6f69d02 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/p_tuning/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .config import PromptEncoderConfig, PromptEncoderReparameterizationType +from .model import PromptEncoder + + +__all__ = ["PromptEncoder", "PromptEncoderConfig", "PromptEncoderReparameterizationType"] diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/p_tuning/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/p_tuning/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba9388592ccadfec320152f41ec373a60d8b9202 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/p_tuning/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/p_tuning/__pycache__/config.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/p_tuning/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..acd62375da01ef8afb880ca15c6bfd5abce4eb86 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/p_tuning/__pycache__/config.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/p_tuning/__pycache__/model.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/p_tuning/__pycache__/model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..94b32b2dedfcc87fc1b25f391b2fb2372d9e825e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/p_tuning/__pycache__/model.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/p_tuning/config.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/p_tuning/config.py new file mode 100644 index 0000000000000000000000000000000000000000..75deffb4299df4178e80d74dde47b0470ea06c25 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/p_tuning/config.py @@ -0,0 +1,59 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import enum +from dataclasses import dataclass, field +from typing import Union + +from peft.config import PromptLearningConfig +from peft.utils import PeftType + + +class PromptEncoderReparameterizationType(str, enum.Enum): + MLP = "MLP" + LSTM = "LSTM" + + +@dataclass +class PromptEncoderConfig(PromptLearningConfig): + """ + This is the configuration class to store the configuration of a [`PromptEncoder`]. + + Args: + encoder_reparameterization_type (Union[[`PromptEncoderReparameterizationType`], `str`]): + The type of reparameterization to use. + encoder_hidden_size (`int`): The hidden size of the prompt encoder. + encoder_num_layers (`int`): The number of layers of the prompt encoder. + encoder_dropout (`float`): The dropout probability of the prompt encoder. + """ + + encoder_reparameterization_type: Union[str, PromptEncoderReparameterizationType] = field( + default=PromptEncoderReparameterizationType.MLP, + metadata={"help": "How to reparameterize the prompt encoder"}, + ) + encoder_hidden_size: int = field( + default=None, + metadata={"help": "The hidden size of the prompt encoder"}, + ) + encoder_num_layers: int = field( + default=2, + metadata={"help": "The number of layers of the prompt encoder"}, + ) + encoder_dropout: float = field( + default=0.0, + metadata={"help": "The dropout of the prompt encoder"}, + ) + + def __post_init__(self): + self.peft_type = PeftType.P_TUNING diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/p_tuning/model.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/p_tuning/model.py new file mode 100644 index 0000000000000000000000000000000000000000..ade2b1128158376c134441687803b85d444cfb96 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/p_tuning/model.py @@ -0,0 +1,130 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Based on https://github.com/NVIDIA/NeMo/blob/main/nemo/collections/nlp/modules/common/prompt_encoder.py +# with some refactor +import warnings + +import torch + +from .config import PromptEncoderConfig, PromptEncoderReparameterizationType + + +class PromptEncoder(torch.nn.Module): + """ + The prompt encoder network that is used to generate the virtual token embeddings for p-tuning. + + Args: + config ([`PromptEncoderConfig`]): The configuration of the prompt encoder. + + Example: + + ```py + >>> from peft import PromptEncoder, PromptEncoderConfig + + >>> config = PromptEncoderConfig( + ... peft_type="P_TUNING", + ... task_type="SEQ_2_SEQ_LM", + ... num_virtual_tokens=20, + ... token_dim=768, + ... num_transformer_submodules=1, + ... num_attention_heads=12, + ... num_layers=12, + ... encoder_reparameterization_type="MLP", + ... encoder_hidden_size=768, + ... ) + + >>> prompt_encoder = PromptEncoder(config) + ``` + + **Attributes**: + - **embedding** (`torch.nn.Embedding`) -- The embedding layer of the prompt encoder. + - **mlp_head** (`torch.nn.Sequential`) -- The MLP head of the prompt encoder if `inference_mode=False`. + - **lstm_head** (`torch.nn.LSTM`) -- The LSTM head of the prompt encoder if `inference_mode=False` and + `encoder_reparameterization_type="LSTM"`. + - **token_dim** (`int`) -- The hidden embedding dimension of the base transformer model. + - **input_size** (`int`) -- The input size of the prompt encoder. + - **output_size** (`int`) -- The output size of the prompt encoder. + - **hidden_size** (`int`) -- The hidden size of the prompt encoder. + - **total_virtual_tokens** (`int`): The total number of virtual tokens of the + prompt encoder. + - **encoder_type** (Union[[`PromptEncoderReparameterizationType`], `str`]): The encoder type of the prompt + encoder. + + + Input shape: (`batch_size`, `total_virtual_tokens`) + + Output shape: (`batch_size`, `total_virtual_tokens`, `token_dim`) + """ + + def __init__(self, config): + super().__init__() + self.token_dim = config.token_dim + self.input_size = self.token_dim + self.output_size = self.token_dim + self.hidden_size = config.encoder_hidden_size + self.total_virtual_tokens = config.num_virtual_tokens * config.num_transformer_submodules + self.encoder_type = config.encoder_reparameterization_type + + # embedding + self.embedding = torch.nn.Embedding(self.total_virtual_tokens, self.token_dim) + if not config.inference_mode: + if self.encoder_type == PromptEncoderReparameterizationType.LSTM: + lstm_dropout = config.encoder_dropout + num_layers = config.encoder_num_layers + # LSTM + self.lstm_head = torch.nn.LSTM( + input_size=self.input_size, + hidden_size=self.hidden_size, + num_layers=num_layers, + dropout=lstm_dropout, + bidirectional=True, + batch_first=True, + ) + + self.mlp_head = torch.nn.Sequential( + torch.nn.Linear(self.hidden_size * 2, self.hidden_size * 2), + torch.nn.ReLU(), + torch.nn.Linear(self.hidden_size * 2, self.output_size), + ) + + elif self.encoder_type == PromptEncoderReparameterizationType.MLP: + encoder_num_layers_default = PromptEncoderConfig.encoder_num_layers + if config.encoder_num_layers != encoder_num_layers_default: + warnings.warn( + f"for {self.encoder_type.value}, the argument `encoder_num_layers` is ignored. " + f"Exactly {encoder_num_layers_default} MLP layers are used." + ) + layers = [ + torch.nn.Linear(self.input_size, self.hidden_size), + torch.nn.ReLU(), + torch.nn.Linear(self.hidden_size, self.hidden_size), + torch.nn.ReLU(), + torch.nn.Linear(self.hidden_size, self.output_size), + ] + self.mlp_head = torch.nn.Sequential(*layers) + + else: + raise ValueError("Prompt encoder type not recognized. Please use one of MLP (recommended) or LSTM.") + + def forward(self, indices): + input_embeds = self.embedding(indices) + if self.encoder_type == PromptEncoderReparameterizationType.LSTM: + output_embeds = self.mlp_head(self.lstm_head(input_embeds)[0]) + elif self.encoder_type == PromptEncoderReparameterizationType.MLP: + output_embeds = self.mlp_head(input_embeds) + else: + raise ValueError("Prompt encoder type not recognized. Please use one of MLP (recommended) or LSTM.") + + return output_embeds diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/poly/__init__.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/poly/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b0f368695edbd7fb7bb3c68d9e918bd16752b873 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/poly/__init__.py @@ -0,0 +1,20 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .config import PolyConfig +from .layer import Linear, PolyLayer +from .model import PolyModel + + +__all__ = ["Linear", "PolyConfig", "PolyLayer", "PolyModel"] diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/poly/config.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/poly/config.py new file mode 100644 index 0000000000000000000000000000000000000000..3abbc93b022dd53b5fd5c373b029dba9084a0b9b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/poly/config.py @@ -0,0 +1,89 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass, field +from typing import List, Literal, Optional, Union + +from peft.config import PeftConfig +from peft.utils import PeftType + + +@dataclass +class PolyConfig(PeftConfig): + """ + This is the configuration class to store the configuration of a [`PolyModel`]. + - [Polytropon (Poly)](https://arxiv.org/abs/2202.13914) + - [Multi-Head Routing (MHR)](https://arxiv.org/abs/2211.03831) + + Args: + r (`int`): Attention dimension of each Lora in Poly. + target_modules (`Union[List[str],str]`): The names of the modules to apply Poly to. + modules_to_save (`List[str]`): List of modules apart from Poly layers to be set as trainable + and saved in the final checkpoint. + init_weights (bool): Whether to perform initialization of Poly weights. + poly_type (`Literal["poly"]`): The variant of the Poly module to use. Currently, only "poly" + is supported. + n_tasks (`int`): The number of tasks in a multitasking scenario. + n_skills (`int`): The number of skills (LoRA) in each Poly layer. + n_splits (`int`): The number of splits within each LoRA of a Poly layer. A value greater + than 1 indicates the use of Multi-Head Routing (MHR). + """ + + r: int = field(default=8, metadata={"help": "Lora attention dimension"}) + target_modules: Optional[Union[List[str], str]] = field( + default=None, + metadata={ + "help": "List of module names or regex expression of the module names to replace with Poly." + "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' " + }, + ) + modules_to_save: Optional[List[str]] = field( + default=None, + metadata={ + "help": "List of modules apart from Poly layers to be set as trainable and saved in the final checkpoint. " + "For example, in Sequence Classification or Token Classification tasks, " + "the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved." + }, + ) + init_weights: bool = field( + default=True, + metadata={ + "help": ( + "Whether to initialize the weights of the Poly layers with their default initialization. Don't change " + "this setting, except if you know exactly what you're doing." + ), + }, + ) + poly_type: Literal["poly"] = field( + default="poly", + metadata={"help": 'Type of Poly modules to be used. Currently only "poly" is supported.'}, + ) + n_tasks: int = field( + default=1, + metadata={"help": "Number of tasks in multitasking scenario."}, + ) + n_skills: int = field( + default=4, + metadata={"help": "Number of skills (LoRA) in each Poly layer."}, + ) + n_splits: int = field( + default=1, + metadata={"help": "Number of splits within each LoRA of a Poly layer."}, + ) + + def __post_init__(self): + self.peft_type = PeftType.POLY + self.target_modules = ( + set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules + ) diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/poly/layer.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/poly/layer.py new file mode 100644 index 0000000000000000000000000000000000000000..debb40beee29b1cfdf2072a293d4c61042280227 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/poly/layer.py @@ -0,0 +1,171 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from typing import Any + +import torch +import torch.nn as nn + +from peft.tuners.tuners_utils import BaseTunerLayer + +from .config import PolyConfig +from .router import get_router + + +class PolyLayer(BaseTunerLayer): + # All names of layers that may contain (trainable) adapter weights + adapter_layer_names = ("poly_lora_A", "poly_lora_B", "poly_router") + # All names of other parameters that may contain adapter-related parameters + other_param_names = ("r", "n_tasks", "n_skills", "n_splits") + + def __init__(self, base_layer: nn.Module, **kwargs): + self.base_layer = base_layer + self.r = {} + self.n_tasks = {} + self.n_skills = {} + self.n_splits = {} + self.poly_type = {} + self.poly_router = nn.ModuleDict() + self.poly_lora_A = nn.ParameterDict() + self.poly_lora_B = nn.ParameterDict() + self.kwargs = kwargs + + base_layer = self.get_base_layer() + if isinstance(base_layer, nn.Linear): + in_features, out_features = base_layer.in_features, base_layer.out_features + else: + raise ValueError(f"Unsupported layer type {type(base_layer)}") + + self.in_features = in_features + self.out_features = out_features + + def update_layer(self, adapter_name, poly_config): + if poly_config.r <= 0: + raise ValueError(f"`r` should be a positive integer value but the value passed is {poly_config.r}") + + self.r[adapter_name] = poly_config.r + self.n_tasks[adapter_name] = poly_config.n_tasks + self.n_skills[adapter_name] = poly_config.n_skills + self.n_splits[adapter_name] = poly_config.n_splits + self.poly_type[adapter_name] = poly_config.poly_type + + self.poly_lora_A[adapter_name] = nn.Parameter( + torch.empty( + poly_config.n_splits, + poly_config.n_skills, + self.in_features // poly_config.n_splits, + poly_config.r, + ) + ) + self.poly_lora_B[adapter_name] = nn.Parameter( + torch.empty( + poly_config.n_splits, + poly_config.n_skills, + poly_config.r, + self.out_features // poly_config.n_splits, + ) + ) + self.poly_router[adapter_name] = get_router(poly_config) + + self.reset_poly_parameters(adapter_name, init_weights=poly_config.init_weights) + + weight = getattr(self.get_base_layer(), "weight", None) + if weight is not None: + # the layer is already completely initialized, this is an update + if weight.dtype.is_floating_point or weight.dtype.is_complex: + self.to(weight.device, dtype=weight.dtype) + else: + self.to(weight.device) + self.set_adapter(self.active_adapters) + + def reset_poly_parameters(self, adapter_name, init_weights): + if adapter_name in self.poly_lora_A.keys(): + # initialize A the same way as the default for nn.Linear + # https://github.com/microsoft/mttl/blob/ce4ca51dbca73be656feb9b3e5233633e3c5dec7/mttl/models/poly.py#L269 + n_splits, n_skills, d, r = self.poly_lora_A[adapter_name].shape + for skill in range(n_skills): + for split in range(n_splits): + param = torch.empty((r, d)) + torch.nn.init.kaiming_uniform_(param, a=math.sqrt(5)) + self.poly_lora_A[adapter_name].data[split, skill, :, :] = param.T + + if init_weights: + # initialize B to zero + torch.nn.init.zeros_(self.poly_lora_B[adapter_name]) + else: + # initialize B the same way as the default for nn.Linear + n_splits, n_skills, r, d = self.poly_lora_B[adapter_name].shape + for skill in range(n_skills): + for split in range(n_splits): + param = torch.empty((d, r)) + torch.nn.init.kaiming_uniform_(param, a=math.sqrt(5)) + self.poly_lora_B[adapter_name].data[split, skill, :, :] = param.T + + # initialized router + self.poly_router[adapter_name].reset() + + +class Linear(nn.Module, PolyLayer): + # Lora implemented in a dense layer + def __init__( + self, + base_layer, + adapter_name: str, + poly_config: PolyConfig, + **kwargs, + ) -> None: + super().__init__() + PolyLayer.__init__(self, base_layer, **kwargs) + + self._active_adapter = adapter_name + self.update_layer(adapter_name, poly_config) + + def forward(self, x: torch.Tensor, *args: Any, task_ids: torch.Tensor = None, **kwargs: Any) -> torch.Tensor: + previous_dtype = x.dtype + if self.disable_adapters: + result = self.base_layer(x, *args, **kwargs) + else: + result = self.base_layer(x, *args, **kwargs) + for active_adapter in self.active_adapters: + if active_adapter not in self.poly_lora_A.keys(): + continue + + r = self.r[active_adapter] + poly_router = self.poly_router[active_adapter] + poly_lora_A = self.poly_lora_A[active_adapter] + poly_lora_B = self.poly_lora_B[active_adapter] + + # Combine the output of LoRAs + # https://github.com/microsoft/mttl/blob/ce4ca51dbca73be656feb9b3e5233633e3c5dec7/mttl/models/poly.py#L293 + mixing_weights = poly_router(task_ids=task_ids, input_ids=x) + bs, n_splits, n_skills = mixing_weights.size() + + # A is n_splits, n_skills, D // n_splits, rank + # we want bs, n_splits, D // n_splits, rank + A = torch.einsum("bqs,qsdr->bqdr", (mixing_weights, poly_lora_A)) + B = torch.einsum("bqs,qsrd->bqrd", (mixing_weights, poly_lora_B)) + + A = A.reshape(bs, self.in_features, r) + B = B.transpose(1, 2).reshape(bs, r, self.out_features) + + x = x.to(A.dtype) + result += x.bmm(A).bmm(B) / r + + result = result.to(previous_dtype) + return result + + def __repr__(self) -> str: + rep = super().__repr__() + return "poly." + rep diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/poly/router.py b/llmeval-env/lib/python3.10/site-packages/peft/tuners/poly/router.py new file mode 100644 index 0000000000000000000000000000000000000000..0249398a9fc36d53bc0b4f022a8410514688a9f1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/peft/tuners/poly/router.py @@ -0,0 +1,83 @@ +# Copyright 2023-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from abc import ABC, abstractmethod + +import torch +from torch import nn +from torch.distributions.relaxed_bernoulli import RelaxedBernoulli + +from .config import PolyConfig + + +EPS = 1e-12 + + +def get_router(poly_config: PolyConfig) -> nn.Module: + if poly_config.poly_type == "poly": + return PolyRouter(poly_config) + else: + raise ValueError( + f"Unsupported poly_type: {poly_config.poly_type}. " + "Currently, only the following types are supported: " + "`poly`." + ) + + +class Router(nn.Module, ABC): + @abstractmethod + def reset(self): + ... + + @abstractmethod + def forward(self, task_ids: torch.Tensor, input_ids: torch.Tensor): + ... + + +class PolyRouter(Router): + # It's a simplified implementation of + # https://github.com/microsoft/mttl/blob/ce4ca51dbca73be656feb9b3e5233633e3c5dec7/mttl/models/poly.py#L138 + def __init__(self, poly_config: PolyConfig): + super().__init__() + + self.poly_type = poly_config.poly_type + self.n_tasks = poly_config.n_tasks + self.n_skills = poly_config.n_skills + self.n_splits = poly_config.n_splits + + self.module_logits = nn.Parameter(torch.empty((self.n_tasks, self.n_splits * self.n_skills))) + + def reset(self): + torch.nn.init.uniform_(self.module_logits, -1e-3, 1e-3) + + def forward(self, task_ids: torch.Tensor, input_ids: torch.Tensor): + if task_ids is None: + raise ValueError("task_ids should not be None.") + if task_ids.max().item() >= self.n_tasks: + raise ValueError(f"Only {self.n_tasks} tasks available. Found task id = {task_ids.max().item()}") + + # move task id to input's device + task_ids = task_ids.to(self.module_logits.device) + + module_logits = self.module_logits[task_ids] + module_logits = module_logits.view(-1, self.n_splits, self.n_skills) + + if self.training: + module_logits = RelaxedBernoulli(temperature=1.0, logits=module_logits).rsample() + else: + module_logits = torch.sigmoid(module_logits) + + module_weights = module_logits / (module_logits.sum(dim=-1, keepdim=True) + EPS) + + return module_weights diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/prompt_tuning/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/prompt_tuning/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9acd1eac5e8fc14e0a3014a6b60fd5ba6669461a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/prompt_tuning/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/prompt_tuning/__pycache__/config.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/prompt_tuning/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7bfab7d06ae3e010c9fe5df2fe60bf33b10c607a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/prompt_tuning/__pycache__/config.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/peft/tuners/prompt_tuning/__pycache__/model.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/peft/tuners/prompt_tuning/__pycache__/model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..075898d1665822156198c91ee43c2329c47c02ac Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/peft/tuners/prompt_tuning/__pycache__/model.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow_hotfix/__about__.py b/llmeval-env/lib/python3.10/site-packages/pyarrow_hotfix/__about__.py new file mode 100644 index 0000000000000000000000000000000000000000..cdba39a3cbf89d5f121b4948f36cbc4aefcb3585 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow_hotfix/__about__.py @@ -0,0 +1,5 @@ +# SPDX-FileCopyrightText: 2023-present Antoine Pitrou +# +# SPDX-License-Identifier: Apache-2.0 + +__version__ = "0.6" diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow_hotfix/__init__.py b/llmeval-env/lib/python3.10/site-packages/pyarrow_hotfix/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7ea776328c5793f4a2582d4a0034902a1d97a132 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow_hotfix/__init__.py @@ -0,0 +1,110 @@ +# SPDX-FileCopyrightText: 2023-present Antoine Pitrou +# +# SPDX-License-Identifier: Apache-2.0 + + +_ERROR_MSG = """\ +Disallowed deserialization of 'arrow.py_extension_type': +storage_type = {storage_type} +serialized = {serialized} +pickle disassembly:\n{pickle_disassembly} + +Reading of untrusted Parquet or Feather files with a PyExtensionType column +allows arbitrary code execution. +If you trust this file, you can enable reading the extension type by one of: + +- upgrading to pyarrow >= 14.0.1, and call `pa.PyExtensionType.set_auto_load(True)` +- disable this error by running `import pyarrow_hotfix; pyarrow_hotfix.uninstall()` + +We strongly recommend updating your Parquet/Feather files to use extension types +derived from `pyarrow.ExtensionType` instead, and register this type explicitly. +See https://arrow.apache.org/docs/dev/python/extending_types.html#defining-extension-types-user-defined-types +for more details. +""" + +try: + _import_error = ModuleNotFoundError +except NameError: + _import_error = ImportError # ModuleNotFoundError unavailable in py3.5 + + +def install(): + import atexit + try: + import pyarrow as pa + except _import_error: + # Not installed; nothing to do here. + return + + if not hasattr(pa, "ExtensionType"): + # Unsupported PyArrow version? + return + + if getattr(pa, "_hotfix_installed", False): + return + + class ForbiddenExtensionType(pa.ExtensionType): + def __arrow_ext_serialize__(self): + return b"" + + @classmethod + def __arrow_ext_deserialize__(cls, storage_type, serialized): + import io + import pickletools + out = io.StringIO() + pickletools.dis(serialized, out) + raise RuntimeError( + _ERROR_MSG.format( + storage_type=storage_type, + serialized=serialized, + pickle_disassembly=out.getvalue(), + ) + ) + + if hasattr(pa, "unregister_extension_type"): + # 0.15.0 <= PyArrow + pa.unregister_extension_type("arrow.py_extension_type") + pa.register_extension_type(ForbiddenExtensionType(pa.null(), + "arrow.py_extension_type")) + elif hasattr(pa.lib, "_unregister_py_extension_type"): + # 0.14.1 <= PyArrow < 0.15.0 + pa.lib._unregister_py_extension_type() + atexit.unregister(pa.lib._unregister_py_extension_type) + else: + # PyArrow 0.14.0 + del pa.lib._extension_types_initializer + + pa._hotfix_installed = True + + +def uninstall(): + import atexit + try: + import pyarrow as pa + except _import_error: + # Not installed; nothing to do here. + return + + if not hasattr(pa, "ExtensionType"): + # Unsupported PyArrow version? + return + + if not getattr(pa, "_hotfix_installed", False): + return + + if hasattr(pa, "unregister_extension_type"): + # 0.15.0 <= PyArrow + pa.unregister_extension_type("arrow.py_extension_type") + pa.lib._register_py_extension_type() + elif hasattr(pa.lib, "_register_py_extension_type"): + # 0.14.1 <= PyArrow < 0.15.0 + pa.lib._register_py_extension_type() + atexit.register(pa.lib._unregister_py_extension_type) + elif hasattr(pa.lib, "_ExtensionTypesInitializer"): + # PyArrow 0.14.0 + pa.lib._extension_types_initializer = pa.lib._ExtensionTypesInitializer() + + pa._hotfix_installed = False + + +install() diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow_hotfix/__pycache__/__about__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pyarrow_hotfix/__pycache__/__about__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e275677c6f65aa650b60c07aba7bb44aef2f827b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pyarrow_hotfix/__pycache__/__about__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow_hotfix/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pyarrow_hotfix/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9385f14201621d0a9ad78f295feae40c504e7bb7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pyarrow_hotfix/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/rouge_score/__init__.py b/llmeval-env/lib/python3.10/site-packages/rouge_score/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9454c646e57e67a21b7ad4881a53ab0f16e503c5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/rouge_score/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2022 The rouge_score Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + diff --git a/llmeval-env/lib/python3.10/site-packages/rouge_score/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/rouge_score/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d82c74b663b43f5a680cfb80f94e772d6fb2b4df Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/rouge_score/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/rouge_score/__pycache__/create_pyrouge_files.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/rouge_score/__pycache__/create_pyrouge_files.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4796a5697646f821ffeb1eb75432d7c50bffbbdc Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/rouge_score/__pycache__/create_pyrouge_files.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/rouge_score/__pycache__/io.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/rouge_score/__pycache__/io.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed446a750e25dd8aa8d5383b84d367524a737826 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/rouge_score/__pycache__/io.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/rouge_score/__pycache__/io_test.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/rouge_score/__pycache__/io_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e933ec4eb80441513dafa06f7a56bee371f1888 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/rouge_score/__pycache__/io_test.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/rouge_score/__pycache__/rouge.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/rouge_score/__pycache__/rouge.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..932f8fcf9907885087dc7da656b7f5c0720a6bbd Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/rouge_score/__pycache__/rouge.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/rouge_score/__pycache__/rouge_scorer.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/rouge_score/__pycache__/rouge_scorer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d8548d85879985227d2bfa80637b96f2a17314c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/rouge_score/__pycache__/rouge_scorer.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/rouge_score/__pycache__/rouge_scorer_test.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/rouge_score/__pycache__/rouge_scorer_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..02260c3d359c19d551946d3b43dbd6e752ab5de7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/rouge_score/__pycache__/rouge_scorer_test.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/rouge_score/__pycache__/scoring.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/rouge_score/__pycache__/scoring.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..48e9abc62ae064f7da781574cd46f0f0a348dba4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/rouge_score/__pycache__/scoring.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/rouge_score/__pycache__/scoring_test.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/rouge_score/__pycache__/scoring_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a6b0e51ad121416f2fd3a6d011349adc1ac0d05 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/rouge_score/__pycache__/scoring_test.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/rouge_score/__pycache__/test_util.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/rouge_score/__pycache__/test_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d32baacef741efd5f40b1855b2072145d2de7c1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/rouge_score/__pycache__/test_util.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/rouge_score/__pycache__/tokenize.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/rouge_score/__pycache__/tokenize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7988ce5bf5f0acdd45e2133b65649b5b357394dd Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/rouge_score/__pycache__/tokenize.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/rouge_score/__pycache__/tokenize_test.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/rouge_score/__pycache__/tokenize_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..59b3744c5c509f5d3393c6f1823d36b1ba2efc1a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/rouge_score/__pycache__/tokenize_test.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/rouge_score/__pycache__/tokenizers.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/rouge_score/__pycache__/tokenizers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b6acf0d0cc02e70d5253a18caa7998055517424 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/rouge_score/__pycache__/tokenizers.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/rouge_score/__pycache__/tokenizers_test.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/rouge_score/__pycache__/tokenizers_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a32a1519905881be9a1a45306544a3fb9fcaeee Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/rouge_score/__pycache__/tokenizers_test.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/rouge_score/io.py b/llmeval-env/lib/python3.10/site-packages/rouge_score/io.py new file mode 100644 index 0000000000000000000000000000000000000000..ffd96ef5859861fe5a3c6d145fa435dbbfcf8857 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/rouge_score/io.py @@ -0,0 +1,182 @@ +# Copyright 2022 The rouge_score Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Library for reading/writing input and score files.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import glob + +from absl import logging +import six +from six.moves import zip +from six.moves import zip_longest + + + +def compute_scores_and_write_to_csv(target_filepattern, + prediction_filepattern, + output_filename, + scorer, + aggregator, + delimiter="\n"): + """Runs aggregate score calculations and outputs results to a CSV file. + + Args: + target_filepattern: Pattern for files containing target text. + prediction_filepattern: Pattern for files containing prediction text. + output_filename: Name of file to write results to. + scorer: A BaseScorer object to compute scores. + aggregator: An aggregator to aggregate scores. If None, outputs are + per-example scores. + delimiter: Record delimiter. + """ + + target_filenames = _glob(target_filepattern) + prediction_filenames = _glob(prediction_filepattern) + if (len(target_filenames) < 1 or + len(target_filenames) != len(prediction_filenames)): + raise ValueError("Must have equal and positive number of target and " + "prediction files. Found: %d target files (%s)," + " %d prediction files (%s)." % + (len(target_filenames), target_filepattern, + len(prediction_filenames), prediction_filepattern)) + + scores = _compute_scores(target_filenames, prediction_filenames, scorer, + delimiter) + if aggregator: + for score in scores: + aggregator.add_scores(score) + _write_aggregates_to_csv(output_filename, aggregator.aggregate()) + else: + _write_scores_to_csv(output_filename, scores) + + +def _glob(filepattern): + return glob.glob(filepattern) # pylint: disable=unreachable + + +def _open(filepattern, mode="r"): + return open(filepattern, mode) # pylint: disable=unreachable + + +def _record_gen(filename, delimiter): + """Opens file and yields records separated by delimiter.""" + with _open(filename) as f: + records = f.read().split(six.ensure_str(delimiter)) + if records[-1]: + # Need a final delimiter at end of file to be able to detect an empty last + # record. + logging.warn("Expected delimiter at end of file") + else: + records = records[:-1] + for record in records: + yield record + + +def _compute_scores(target_filenames, prediction_filenames, scorer, delimiter): + """Computes aggregates scores across the given target and prediction files. + + Args: + target_filenames: List of filenames from which to read target lines. + prediction_filenames: List of filenames from which to read prediction lines. + scorer: A BaseScorer object to compute scores. + delimiter: string delimiter between each record in input files + + Returns: + A list of dicts mapping score_type to Score objects. + Raises: + ValueError: If invalid targets or predictions are provided. + """ + + scores = [] + for target_filename, prediction_filename in zip( + sorted(target_filenames), sorted(prediction_filenames)): + logging.info("Reading targets from %s.", target_filename) + logging.info("Reading predictions from %s.", prediction_filename) + targets = _record_gen(target_filename, delimiter) + preds = _record_gen(prediction_filename, delimiter) + for target_rec, prediction_rec in zip_longest(targets, preds): + if target_rec is None or prediction_rec is None: + raise ValueError("Must have equal number of lines across target and " + "prediction files. Mismatch between files: %s, %s." % + (target_filename, prediction_filename)) + scores.append(scorer.score(target_rec, prediction_rec)) + + return scores + + +def _write_aggregates_to_csv(output_filename, aggregates): + """Writes aggregate scores to an output CSV file. + + Output file is a comma separated where each line has the format: + score_type-(P|R|F),low_ci,mean,high_ci + + P/R/F indicates whether the score is a precision, recall or f-measure. + + Args: + output_filename: Name of file to write results to. + aggregates: A dict mapping each score_type to a AggregateScore object. + """ + + logging.info("Writing results to %s.", output_filename) + with _open(output_filename, "w") as output_file: + output_file.write("score_type,low,mid,high\n") + for score_type, aggregate in sorted(aggregates.items()): + output_file.write("%s-R,%f,%f,%f\n" % + (score_type, aggregate.low.recall, aggregate.mid.recall, + aggregate.high.recall)) + output_file.write("%s-P,%f,%f,%f\n" % + (score_type, aggregate.low.precision, + aggregate.mid.precision, aggregate.high.precision)) + output_file.write("%s-F,%f,%f,%f\n" % + (score_type, aggregate.low.fmeasure, + aggregate.mid.fmeasure, aggregate.high.fmeasure)) + logging.info("Finished writing results.") + + +def _write_scores_to_csv(output_filename, scores): + """Writes scores for each individual example to an output CSV file. + + Output file is a comma separated where each line has the format: + id,score1,score2,score3,... + + The header row indicates the type of each score column. + + Args: + output_filename: Name of file to write results to. + scores: A list of dicts mapping each score_type to a Score object. + """ + + if len(scores) < 1: + logging.warn("No scores to write") + return + rouge_types = sorted(scores[0].keys()) + + logging.info("Writing results to %s.", output_filename) + with _open(output_filename, "w") as out_file: + out_file.write("id") + for rouge_type in rouge_types: + out_file.write(",{t}-P,{t}-R,{t}-F".format(t=rouge_type)) + out_file.write("\n") + for i, result in enumerate(scores): + out_file.write("%d" % i) + for rouge_type in rouge_types: + out_file.write(",%f,%f,%f" % + (result[rouge_type].precision, result[rouge_type].recall, + result[rouge_type].fmeasure)) + out_file.write("\n") + logging.info("Finished writing results.") diff --git a/llmeval-env/lib/python3.10/site-packages/rouge_score/io_test.py b/llmeval-env/lib/python3.10/site-packages/rouge_score/io_test.py new file mode 100644 index 0000000000000000000000000000000000000000..591ae582d38722530420ff78364f6acb85991b2b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/rouge_score/io_test.py @@ -0,0 +1,92 @@ +# Copyright 2022 The rouge_score Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for rouge input/output library.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tempfile + +from absl.testing import absltest +from rouge_score import io +from rouge_score import rouge_scorer +from rouge_score import scoring +from rouge_score import test_util + + +class IoTest(absltest.TestCase): + + def testProducesValidOutput(self): + with tempfile.NamedTemporaryFile() as output_file: + output_filename = output_file.name + scorer = rouge_scorer.RougeScorer(["rouge1"], False) + io.compute_scores_and_write_to_csv(test_util.TARGETS_FILE, + test_util.PREDICTIONS_FILE, + output_filename, scorer, + scoring.BootstrapAggregator()) + with open(output_filename) as f: + csv_lines = f.readlines() + output_types = tuple((line.split(",")[0] for line in csv_lines)) + self.assertEqual(output_types[0], "score_type") + self.assertSameElements(output_types[1:], + ["rouge1-P", "rouge1-R", "rouge1-F"]) + + def testUnAggregated(self): + with tempfile.NamedTemporaryFile() as output_file: + output_filename = output_file.name + scorer = rouge_scorer.RougeScorer(["rouge1"], False) + io.compute_scores_and_write_to_csv(test_util.TARGETS_FILE, + test_util.PREDICTIONS_FILE, + output_filename, scorer, None) + with open(output_filename) as f: + csv_lines = f.readlines() + ids = tuple((line.split(",")[0] for line in csv_lines)) + self.assertEqual(ids[0], "id") + self.assertLen(csv_lines, 3) + + def testDelimitedFile(self): + with tempfile.NamedTemporaryFile() as output_file: + output_filename = output_file.name + scorer = rouge_scorer.RougeScorer(["rouge1"], False) + io.compute_scores_and_write_to_csv( + test_util.DELIMITED_FILE, + test_util.DELIMITED_FILE, + output_filename, + scorer, + None, + delimiter=":") + with open(output_filename) as f: + csv_lines = f.readlines() + ids = tuple((line.split(",")[0] for line in csv_lines)) + self.assertEqual(ids[0], "id") + self.assertLen(csv_lines, 5) + + def testAssertsOnInvalidInputFiles(self): + scorer = rouge_scorer.RougeScorer(["rouge1"], False) + with self.assertRaises(ValueError): + io.compute_scores_and_write_to_csv("invalid*", "invalid*", "invalid", + scorer, scoring.BootstrapAggregator()) + + def testAssertsOnInvalidRougeTypes(self): + scorer = rouge_scorer.RougeScorer(["rougex"], False) + with self.assertRaises(ValueError): + io.compute_scores_and_write_to_csv(test_util.TARGETS_FILE, + test_util.PREDICTIONS_FILE, "", scorer, + scoring.BootstrapAggregator()) + + +if __name__ == "__main__": + absltest.main() diff --git a/llmeval-env/lib/python3.10/site-packages/rouge_score/rouge_scorer.py b/llmeval-env/lib/python3.10/site-packages/rouge_score/rouge_scorer.py new file mode 100644 index 0000000000000000000000000000000000000000..4c076bf73a05a2be9f4db46ae1f3e141059094f0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/rouge_score/rouge_scorer.py @@ -0,0 +1,334 @@ +# Copyright 2022 The rouge_score Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Computes rouge scores between two text blobs. + +Implementation replicates the functionality in the original ROUGE package. See: + +Lin, Chin-Yew. ROUGE: a Package for Automatic Evaluation of Summaries. In +Proceedings of the Workshop on Text Summarization Branches Out (WAS 2004), +Barcelona, Spain, July 25 - 26, 2004. + +Default options are equivalent to running: +ROUGE-1.5.5.pl -e data -n 2 -a settings.xml + +Or with use_stemmer=True: +ROUGE-1.5.5.pl -m -e data -n 2 -a settings.xml + +In these examples settings.xml lists input files and formats. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import re + +from absl import logging +import nltk +import numpy as np +import six +from six.moves import map +from six.moves import range +from rouge_score import scoring +from rouge_score import tokenizers + + +class RougeScorer(scoring.BaseScorer): + """Calculate rouges scores between two blobs of text. + + Sample usage: + scorer = RougeScorer(['rouge1', 'rougeL'], use_stemmer=True) + scores = scorer.score('The quick brown fox jumps over the lazy dog', + 'The quick brown dog jumps on the log.') + """ + + def __init__(self, rouge_types, use_stemmer=False, split_summaries=False, + tokenizer=None): + """Initializes a new RougeScorer. + + Valid rouge types that can be computed are: + rougen (e.g. rouge1, rouge2): n-gram based scoring. + rougeL: Longest common subsequence based scoring. + + Args: + rouge_types: A list of rouge types to calculate. + use_stemmer: Bool indicating whether Porter stemmer should be used to + strip word suffixes to improve matching. This arg is used in the + DefaultTokenizer, but other tokenizers might or might not choose to + use this. + split_summaries: whether to add newlines between sentences for rougeLsum + tokenizer: Tokenizer object which has a tokenize() method. + Returns: + A dict mapping rouge types to Score tuples. + """ + + self.rouge_types = rouge_types + if tokenizer: + self._tokenizer = tokenizer + else: + self._tokenizer = tokenizers.DefaultTokenizer(use_stemmer) + logging.info("Using default tokenizer.") + + self._split_summaries = split_summaries + + def score_multi(self, targets, prediction): + """Calculates rouge scores between targets and prediction. + + The target with the maximum f-measure is used for the final score for + each score type.. + + Args: + targets: list of texts containing the targets + prediction: Text containing the predicted text. + Returns: + A dict mapping each rouge type to a Score object. + Raises: + ValueError: If an invalid rouge type is encountered. + """ + score_dicts = [self.score(t, prediction) for t in targets] + max_score = {} + for k in self.rouge_types: + index = np.argmax([s[k].fmeasure for s in score_dicts]) + max_score[k] = score_dicts[index][k] + + return max_score + + def score(self, target, prediction): + """Calculates rouge scores between the target and prediction. + + Args: + target: Text containing the target (ground truth) text, + or if a list + prediction: Text containing the predicted text. + Returns: + A dict mapping each rouge type to a Score object. + Raises: + ValueError: If an invalid rouge type is encountered. + """ + + # Pre-compute target tokens and prediction tokens for use by different + # types, except if only "rougeLsum" is requested. + if len(self.rouge_types) == 1 and self.rouge_types[0] == "rougeLsum": + target_tokens = None + prediction_tokens = None + else: + target_tokens = self._tokenizer.tokenize(target) + prediction_tokens = self._tokenizer.tokenize(prediction) + result = {} + + for rouge_type in self.rouge_types: + if rouge_type == "rougeL": + # Rouge from longest common subsequences. + scores = _score_lcs(target_tokens, prediction_tokens) + elif rouge_type == "rougeLsum": + # Note: Does not support multi-line text. + def get_sents(text): + if self._split_summaries: + sents = nltk.sent_tokenize(text) + else: + # Assume sentences are separated by newline. + sents = six.ensure_str(text).split("\n") + sents = [x for x in sents if len(x)] + return sents + + target_tokens_list = [ + self._tokenizer.tokenize(s) for s in get_sents(target)] + prediction_tokens_list = [ + self._tokenizer.tokenize(s) for s in get_sents(prediction)] + + scores = _summary_level_lcs(target_tokens_list, + prediction_tokens_list) + elif re.match(r"rouge[0-9]$", six.ensure_str(rouge_type)): + # Rouge from n-grams. + n = int(rouge_type[5:]) + if n <= 0: + raise ValueError("rougen requires positive n: %s" % rouge_type) + target_ngrams = _create_ngrams(target_tokens, n) + prediction_ngrams = _create_ngrams(prediction_tokens, n) + scores = _score_ngrams(target_ngrams, prediction_ngrams) + else: + raise ValueError("Invalid rouge type: %s" % rouge_type) + result[rouge_type] = scores + + return result + + +def _create_ngrams(tokens, n): + """Creates ngrams from the given list of tokens. + + Args: + tokens: A list of tokens from which ngrams are created. + n: Number of tokens to use, e.g. 2 for bigrams. + Returns: + A dictionary mapping each bigram to the number of occurrences. + """ + + ngrams = collections.Counter() + for ngram in (tuple(tokens[i:i + n]) for i in range(len(tokens) - n + 1)): + ngrams[ngram] += 1 + return ngrams + + +def _score_lcs(target_tokens, prediction_tokens): + """Computes LCS (Longest Common Subsequence) rouge scores. + + Args: + target_tokens: Tokens from the target text. + prediction_tokens: Tokens from the predicted text. + Returns: + A Score object containing computed scores. + """ + + if not target_tokens or not prediction_tokens: + return scoring.Score(precision=0, recall=0, fmeasure=0) + + # Compute length of LCS from the bottom up in a table (DP appproach). + lcs_table = _lcs_table(target_tokens, prediction_tokens) + lcs_length = lcs_table[-1][-1] + + precision = lcs_length / len(prediction_tokens) + recall = lcs_length / len(target_tokens) + fmeasure = scoring.fmeasure(precision, recall) + + return scoring.Score(precision=precision, recall=recall, fmeasure=fmeasure) + + +def _lcs_table(ref, can): + """Create 2-d LCS score table.""" + rows = len(ref) + cols = len(can) + lcs_table = [[0] * (cols + 1) for _ in range(rows + 1)] + for i in range(1, rows + 1): + for j in range(1, cols + 1): + if ref[i - 1] == can[j - 1]: + lcs_table[i][j] = lcs_table[i - 1][j - 1] + 1 + else: + lcs_table[i][j] = max(lcs_table[i - 1][j], lcs_table[i][j - 1]) + return lcs_table + + +def _backtrack_norec(t, ref, can): + """Read out LCS.""" + i = len(ref) + j = len(can) + lcs = [] + while i > 0 and j > 0: + if ref[i - 1] == can[j - 1]: + lcs.insert(0, i-1) + i -= 1 + j -= 1 + elif t[i][j - 1] > t[i - 1][j]: + j -= 1 + else: + i -= 1 + return lcs + + +def _summary_level_lcs(ref_sent, can_sent): + """ROUGE: Summary-level LCS, section 3.2 in ROUGE paper. + + Args: + ref_sent: list of tokenized reference sentences + can_sent: list of tokenized candidate sentences + + Returns: + summary level ROUGE score + """ + if not ref_sent or not can_sent: + return scoring.Score(precision=0, recall=0, fmeasure=0) + + m = sum(map(len, ref_sent)) + n = sum(map(len, can_sent)) + if not n or not m: + return scoring.Score(precision=0, recall=0, fmeasure=0) + + # get token counts to prevent double counting + token_cnts_r = collections.Counter() + token_cnts_c = collections.Counter() + for s in ref_sent: + # s is a list of tokens + token_cnts_r.update(s) + for s in can_sent: + token_cnts_c.update(s) + + hits = 0 + for r in ref_sent: + lcs = _union_lcs(r, can_sent) + # Prevent double-counting: + # The paper describes just computing hits += len(_union_lcs()), + # but the implementation prevents double counting. We also + # implement this as in version 1.5.5. + for t in lcs: + if token_cnts_c[t] > 0 and token_cnts_r[t] > 0: + hits += 1 + token_cnts_c[t] -= 1 + token_cnts_r[t] -= 1 + + recall = hits / m + precision = hits / n + fmeasure = scoring.fmeasure(precision, recall) + return scoring.Score(precision=precision, recall=recall, fmeasure=fmeasure) + + +def _union_lcs(ref, c_list): + """Find union LCS between a ref sentence and list of candidate sentences. + + Args: + ref: list of tokens + c_list: list of list of indices for LCS into reference summary + + Returns: + List of tokens in ref representing union LCS. + """ + lcs_list = [lcs_ind(ref, c) for c in c_list] + return [ref[i] for i in _find_union(lcs_list)] + + +def _find_union(lcs_list): + """Finds union LCS given a list of LCS.""" + return sorted(list(set().union(*lcs_list))) + + +def lcs_ind(ref, can): + """Returns one of the longest lcs.""" + t = _lcs_table(ref, can) + return _backtrack_norec(t, ref, can) + + +def _score_ngrams(target_ngrams, prediction_ngrams): + """Compute n-gram based rouge scores. + + Args: + target_ngrams: A Counter object mapping each ngram to number of + occurrences for the target text. + prediction_ngrams: A Counter object mapping each ngram to number of + occurrences for the prediction text. + Returns: + A Score object containing computed scores. + """ + + intersection_ngrams_count = 0 + for ngram in six.iterkeys(target_ngrams): + intersection_ngrams_count += min(target_ngrams[ngram], + prediction_ngrams[ngram]) + target_ngrams_count = sum(target_ngrams.values()) + prediction_ngrams_count = sum(prediction_ngrams.values()) + + precision = intersection_ngrams_count / max(prediction_ngrams_count, 1) + recall = intersection_ngrams_count / max(target_ngrams_count, 1) + fmeasure = scoring.fmeasure(precision, recall) + + return scoring.Score(precision=precision, recall=recall, fmeasure=fmeasure) diff --git a/llmeval-env/lib/python3.10/site-packages/rouge_score/tokenizers_test.py b/llmeval-env/lib/python3.10/site-packages/rouge_score/tokenizers_test.py new file mode 100644 index 0000000000000000000000000000000000000000..b2bb6929f42034cb6e26329337175bf4142ff25e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/rouge_score/tokenizers_test.py @@ -0,0 +1,39 @@ +# Copyright 2022 The rouge_score Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for tokenizers.""" + +from absl.testing import absltest +from rouge_score import tokenizers + + +class TokenizersTest(absltest.TestCase): + + def test_default_tokenizer_no_stemmer_init(self): + tokenizer = tokenizers.DefaultTokenizer(use_stemmer=False) + self.assertIsInstance(tokenizer, tokenizers.Tokenizer) + + result = tokenizer.tokenize("this is a test") + self.assertListEqual(["this", "is", "a", "test"], result) + + def test_default_tokenizer_with_stemmer_init(self): + tokenizer = tokenizers.DefaultTokenizer(use_stemmer=True) + self.assertIsInstance(tokenizer, tokenizers.Tokenizer) + + result = tokenizer.tokenize("the friends had a meeting") + self.assertListEqual(["the", "friend", "had", "a", "meet"], result) + + +if __name__ == "__main__": + absltest.main() diff --git a/llmeval-env/lib/python3.10/site-packages/urllib3/fields.py b/llmeval-env/lib/python3.10/site-packages/urllib3/fields.py new file mode 100644 index 0000000000000000000000000000000000000000..3e258a5d8ba0fe849217a0cf1f52df61a461e32a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/urllib3/fields.py @@ -0,0 +1,341 @@ +from __future__ import annotations + +import email.utils +import mimetypes +import typing + +_TYPE_FIELD_VALUE = typing.Union[str, bytes] +_TYPE_FIELD_VALUE_TUPLE = typing.Union[ + _TYPE_FIELD_VALUE, + typing.Tuple[str, _TYPE_FIELD_VALUE], + typing.Tuple[str, _TYPE_FIELD_VALUE, str], +] + + +def guess_content_type( + filename: str | None, default: str = "application/octet-stream" +) -> str: + """ + Guess the "Content-Type" of a file. + + :param filename: + The filename to guess the "Content-Type" of using :mod:`mimetypes`. + :param default: + If no "Content-Type" can be guessed, default to `default`. + """ + if filename: + return mimetypes.guess_type(filename)[0] or default + return default + + +def format_header_param_rfc2231(name: str, value: _TYPE_FIELD_VALUE) -> str: + """ + Helper function to format and quote a single header parameter using the + strategy defined in RFC 2231. + + Particularly useful for header parameters which might contain + non-ASCII values, like file names. This follows + `RFC 2388 Section 4.4 `_. + + :param name: + The name of the parameter, a string expected to be ASCII only. + :param value: + The value of the parameter, provided as ``bytes`` or `str``. + :returns: + An RFC-2231-formatted unicode string. + + .. deprecated:: 2.0.0 + Will be removed in urllib3 v2.1.0. This is not valid for + ``multipart/form-data`` header parameters. + """ + import warnings + + warnings.warn( + "'format_header_param_rfc2231' is deprecated and will be " + "removed in urllib3 v2.1.0. This is not valid for " + "multipart/form-data header parameters.", + DeprecationWarning, + stacklevel=2, + ) + + if isinstance(value, bytes): + value = value.decode("utf-8") + + if not any(ch in value for ch in '"\\\r\n'): + result = f'{name}="{value}"' + try: + result.encode("ascii") + except (UnicodeEncodeError, UnicodeDecodeError): + pass + else: + return result + + value = email.utils.encode_rfc2231(value, "utf-8") + value = f"{name}*={value}" + + return value + + +def format_multipart_header_param(name: str, value: _TYPE_FIELD_VALUE) -> str: + """ + Format and quote a single multipart header parameter. + + This follows the `WHATWG HTML Standard`_ as of 2021/06/10, matching + the behavior of current browser and curl versions. Values are + assumed to be UTF-8. The ``\\n``, ``\\r``, and ``"`` characters are + percent encoded. + + .. _WHATWG HTML Standard: + https://html.spec.whatwg.org/multipage/ + form-control-infrastructure.html#multipart-form-data + + :param name: + The name of the parameter, an ASCII-only ``str``. + :param value: + The value of the parameter, a ``str`` or UTF-8 encoded + ``bytes``. + :returns: + A string ``name="value"`` with the escaped value. + + .. versionchanged:: 2.0.0 + Matches the WHATWG HTML Standard as of 2021/06/10. Control + characters are no longer percent encoded. + + .. versionchanged:: 2.0.0 + Renamed from ``format_header_param_html5`` and + ``format_header_param``. The old names will be removed in + urllib3 v2.1.0. + """ + if isinstance(value, bytes): + value = value.decode("utf-8") + + # percent encode \n \r " + value = value.translate({10: "%0A", 13: "%0D", 34: "%22"}) + return f'{name}="{value}"' + + +def format_header_param_html5(name: str, value: _TYPE_FIELD_VALUE) -> str: + """ + .. deprecated:: 2.0.0 + Renamed to :func:`format_multipart_header_param`. Will be + removed in urllib3 v2.1.0. + """ + import warnings + + warnings.warn( + "'format_header_param_html5' has been renamed to " + "'format_multipart_header_param'. The old name will be " + "removed in urllib3 v2.1.0.", + DeprecationWarning, + stacklevel=2, + ) + return format_multipart_header_param(name, value) + + +def format_header_param(name: str, value: _TYPE_FIELD_VALUE) -> str: + """ + .. deprecated:: 2.0.0 + Renamed to :func:`format_multipart_header_param`. Will be + removed in urllib3 v2.1.0. + """ + import warnings + + warnings.warn( + "'format_header_param' has been renamed to " + "'format_multipart_header_param'. The old name will be " + "removed in urllib3 v2.1.0.", + DeprecationWarning, + stacklevel=2, + ) + return format_multipart_header_param(name, value) + + +class RequestField: + """ + A data container for request body parameters. + + :param name: + The name of this request field. Must be unicode. + :param data: + The data/value body. + :param filename: + An optional filename of the request field. Must be unicode. + :param headers: + An optional dict-like object of headers to initially use for the field. + + .. versionchanged:: 2.0.0 + The ``header_formatter`` parameter is deprecated and will + be removed in urllib3 v2.1.0. + """ + + def __init__( + self, + name: str, + data: _TYPE_FIELD_VALUE, + filename: str | None = None, + headers: typing.Mapping[str, str] | None = None, + header_formatter: typing.Callable[[str, _TYPE_FIELD_VALUE], str] | None = None, + ): + self._name = name + self._filename = filename + self.data = data + self.headers: dict[str, str | None] = {} + if headers: + self.headers = dict(headers) + + if header_formatter is not None: + import warnings + + warnings.warn( + "The 'header_formatter' parameter is deprecated and " + "will be removed in urllib3 v2.1.0.", + DeprecationWarning, + stacklevel=2, + ) + self.header_formatter = header_formatter + else: + self.header_formatter = format_multipart_header_param + + @classmethod + def from_tuples( + cls, + fieldname: str, + value: _TYPE_FIELD_VALUE_TUPLE, + header_formatter: typing.Callable[[str, _TYPE_FIELD_VALUE], str] | None = None, + ) -> RequestField: + """ + A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters. + + Supports constructing :class:`~urllib3.fields.RequestField` from + parameter of key/value strings AND key/filetuple. A filetuple is a + (filename, data, MIME type) tuple where the MIME type is optional. + For example:: + + 'foo': 'bar', + 'fakefile': ('foofile.txt', 'contents of foofile'), + 'realfile': ('barfile.txt', open('realfile').read()), + 'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'), + 'nonamefile': 'contents of nonamefile field', + + Field names and filenames must be unicode. + """ + filename: str | None + content_type: str | None + data: _TYPE_FIELD_VALUE + + if isinstance(value, tuple): + if len(value) == 3: + filename, data, content_type = value + else: + filename, data = value + content_type = guess_content_type(filename) + else: + filename = None + content_type = None + data = value + + request_param = cls( + fieldname, data, filename=filename, header_formatter=header_formatter + ) + request_param.make_multipart(content_type=content_type) + + return request_param + + def _render_part(self, name: str, value: _TYPE_FIELD_VALUE) -> str: + """ + Override this method to change how each multipart header + parameter is formatted. By default, this calls + :func:`format_multipart_header_param`. + + :param name: + The name of the parameter, an ASCII-only ``str``. + :param value: + The value of the parameter, a ``str`` or UTF-8 encoded + ``bytes``. + + :meta public: + """ + return self.header_formatter(name, value) + + def _render_parts( + self, + header_parts: ( + dict[str, _TYPE_FIELD_VALUE | None] + | typing.Sequence[tuple[str, _TYPE_FIELD_VALUE | None]] + ), + ) -> str: + """ + Helper function to format and quote a single header. + + Useful for single headers that are composed of multiple items. E.g., + 'Content-Disposition' fields. + + :param header_parts: + A sequence of (k, v) tuples or a :class:`dict` of (k, v) to format + as `k1="v1"; k2="v2"; ...`. + """ + iterable: typing.Iterable[tuple[str, _TYPE_FIELD_VALUE | None]] + + parts = [] + if isinstance(header_parts, dict): + iterable = header_parts.items() + else: + iterable = header_parts + + for name, value in iterable: + if value is not None: + parts.append(self._render_part(name, value)) + + return "; ".join(parts) + + def render_headers(self) -> str: + """ + Renders the headers for this request field. + """ + lines = [] + + sort_keys = ["Content-Disposition", "Content-Type", "Content-Location"] + for sort_key in sort_keys: + if self.headers.get(sort_key, False): + lines.append(f"{sort_key}: {self.headers[sort_key]}") + + for header_name, header_value in self.headers.items(): + if header_name not in sort_keys: + if header_value: + lines.append(f"{header_name}: {header_value}") + + lines.append("\r\n") + return "\r\n".join(lines) + + def make_multipart( + self, + content_disposition: str | None = None, + content_type: str | None = None, + content_location: str | None = None, + ) -> None: + """ + Makes this request field into a multipart request field. + + This method overrides "Content-Disposition", "Content-Type" and + "Content-Location" headers to the request parameter. + + :param content_disposition: + The 'Content-Disposition' of the request body. Defaults to 'form-data' + :param content_type: + The 'Content-Type' of the request body. + :param content_location: + The 'Content-Location' of the request body. + + """ + content_disposition = (content_disposition or "form-data") + "; ".join( + [ + "", + self._render_parts( + (("name", self._name), ("filename", self._filename)) + ), + ] + ) + + self.headers["Content-Disposition"] = content_disposition + self.headers["Content-Type"] = content_type + self.headers["Content-Location"] = content_location diff --git a/llmeval-env/lib/python3.10/site-packages/urllib3/response.py b/llmeval-env/lib/python3.10/site-packages/urllib3/response.py new file mode 100644 index 0000000000000000000000000000000000000000..d31fac9ba0c3ce341c258b711c4c347a760718b8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/urllib3/response.py @@ -0,0 +1,1243 @@ +from __future__ import annotations + +import collections +import io +import json as _json +import logging +import re +import sys +import typing +import warnings +import zlib +from contextlib import contextmanager +from http.client import HTTPMessage as _HttplibHTTPMessage +from http.client import HTTPResponse as _HttplibHTTPResponse +from socket import timeout as SocketTimeout + +if typing.TYPE_CHECKING: + from ._base_connection import BaseHTTPConnection + +try: + try: + import brotlicffi as brotli # type: ignore[import-not-found] + except ImportError: + import brotli # type: ignore[import-not-found] +except ImportError: + brotli = None + +try: + import zstandard as zstd # type: ignore[import-not-found] + + # The package 'zstandard' added the 'eof' property starting + # in v0.18.0 which we require to ensure a complete and + # valid zstd stream was fed into the ZstdDecoder. + # See: https://github.com/urllib3/urllib3/pull/2624 + _zstd_version = _zstd_version = tuple( + map(int, re.search(r"^([0-9]+)\.([0-9]+)", zstd.__version__).groups()) # type: ignore[union-attr] + ) + if _zstd_version < (0, 18): # Defensive: + zstd = None + +except (AttributeError, ImportError, ValueError): # Defensive: + zstd = None + +from . import util +from ._base_connection import _TYPE_BODY +from ._collections import HTTPHeaderDict +from .connection import BaseSSLError, HTTPConnection, HTTPException +from .exceptions import ( + BodyNotHttplibCompatible, + DecodeError, + HTTPError, + IncompleteRead, + InvalidChunkLength, + InvalidHeader, + ProtocolError, + ReadTimeoutError, + ResponseNotChunked, + SSLError, +) +from .util.response import is_fp_closed, is_response_to_head +from .util.retry import Retry + +if typing.TYPE_CHECKING: + from typing import Literal + + from .connectionpool import HTTPConnectionPool + +log = logging.getLogger(__name__) + + +class ContentDecoder: + def decompress(self, data: bytes) -> bytes: + raise NotImplementedError() + + def flush(self) -> bytes: + raise NotImplementedError() + + +class DeflateDecoder(ContentDecoder): + def __init__(self) -> None: + self._first_try = True + self._data = b"" + self._obj = zlib.decompressobj() + + def decompress(self, data: bytes) -> bytes: + if not data: + return data + + if not self._first_try: + return self._obj.decompress(data) + + self._data += data + try: + decompressed = self._obj.decompress(data) + if decompressed: + self._first_try = False + self._data = None # type: ignore[assignment] + return decompressed + except zlib.error: + self._first_try = False + self._obj = zlib.decompressobj(-zlib.MAX_WBITS) + try: + return self.decompress(self._data) + finally: + self._data = None # type: ignore[assignment] + + def flush(self) -> bytes: + return self._obj.flush() + + +class GzipDecoderState: + FIRST_MEMBER = 0 + OTHER_MEMBERS = 1 + SWALLOW_DATA = 2 + + +class GzipDecoder(ContentDecoder): + def __init__(self) -> None: + self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS) + self._state = GzipDecoderState.FIRST_MEMBER + + def decompress(self, data: bytes) -> bytes: + ret = bytearray() + if self._state == GzipDecoderState.SWALLOW_DATA or not data: + return bytes(ret) + while True: + try: + ret += self._obj.decompress(data) + except zlib.error: + previous_state = self._state + # Ignore data after the first error + self._state = GzipDecoderState.SWALLOW_DATA + if previous_state == GzipDecoderState.OTHER_MEMBERS: + # Allow trailing garbage acceptable in other gzip clients + return bytes(ret) + raise + data = self._obj.unused_data + if not data: + return bytes(ret) + self._state = GzipDecoderState.OTHER_MEMBERS + self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS) + + def flush(self) -> bytes: + return self._obj.flush() + + +if brotli is not None: + + class BrotliDecoder(ContentDecoder): + # Supports both 'brotlipy' and 'Brotli' packages + # since they share an import name. The top branches + # are for 'brotlipy' and bottom branches for 'Brotli' + def __init__(self) -> None: + self._obj = brotli.Decompressor() + if hasattr(self._obj, "decompress"): + setattr(self, "decompress", self._obj.decompress) + else: + setattr(self, "decompress", self._obj.process) + + def flush(self) -> bytes: + if hasattr(self._obj, "flush"): + return self._obj.flush() # type: ignore[no-any-return] + return b"" + + +if zstd is not None: + + class ZstdDecoder(ContentDecoder): + def __init__(self) -> None: + self._obj = zstd.ZstdDecompressor().decompressobj() + + def decompress(self, data: bytes) -> bytes: + if not data: + return b"" + data_parts = [self._obj.decompress(data)] + while self._obj.eof and self._obj.unused_data: + unused_data = self._obj.unused_data + self._obj = zstd.ZstdDecompressor().decompressobj() + data_parts.append(self._obj.decompress(unused_data)) + return b"".join(data_parts) + + def flush(self) -> bytes: + ret = self._obj.flush() # note: this is a no-op + if not self._obj.eof: + raise DecodeError("Zstandard data is incomplete") + return ret # type: ignore[no-any-return] + + +class MultiDecoder(ContentDecoder): + """ + From RFC7231: + If one or more encodings have been applied to a representation, the + sender that applied the encodings MUST generate a Content-Encoding + header field that lists the content codings in the order in which + they were applied. + """ + + def __init__(self, modes: str) -> None: + self._decoders = [_get_decoder(m.strip()) for m in modes.split(",")] + + def flush(self) -> bytes: + return self._decoders[0].flush() + + def decompress(self, data: bytes) -> bytes: + for d in reversed(self._decoders): + data = d.decompress(data) + return data + + +def _get_decoder(mode: str) -> ContentDecoder: + if "," in mode: + return MultiDecoder(mode) + + # According to RFC 9110 section 8.4.1.3, recipients should + # consider x-gzip equivalent to gzip + if mode in ("gzip", "x-gzip"): + return GzipDecoder() + + if brotli is not None and mode == "br": + return BrotliDecoder() + + if zstd is not None and mode == "zstd": + return ZstdDecoder() + + return DeflateDecoder() + + +class BytesQueueBuffer: + """Memory-efficient bytes buffer + + To return decoded data in read() and still follow the BufferedIOBase API, we need a + buffer to always return the correct amount of bytes. + + This buffer should be filled using calls to put() + + Our maximum memory usage is determined by the sum of the size of: + + * self.buffer, which contains the full data + * the largest chunk that we will copy in get() + + The worst case scenario is a single chunk, in which case we'll make a full copy of + the data inside get(). + """ + + def __init__(self) -> None: + self.buffer: typing.Deque[bytes] = collections.deque() + self._size: int = 0 + + def __len__(self) -> int: + return self._size + + def put(self, data: bytes) -> None: + self.buffer.append(data) + self._size += len(data) + + def get(self, n: int) -> bytes: + if n == 0: + return b"" + elif not self.buffer: + raise RuntimeError("buffer is empty") + elif n < 0: + raise ValueError("n should be > 0") + + fetched = 0 + ret = io.BytesIO() + while fetched < n: + remaining = n - fetched + chunk = self.buffer.popleft() + chunk_length = len(chunk) + if remaining < chunk_length: + left_chunk, right_chunk = chunk[:remaining], chunk[remaining:] + ret.write(left_chunk) + self.buffer.appendleft(right_chunk) + self._size -= remaining + break + else: + ret.write(chunk) + self._size -= chunk_length + fetched += chunk_length + + if not self.buffer: + break + + return ret.getvalue() + + def get_all(self) -> bytes: + buffer = self.buffer + if not buffer: + assert self._size == 0 + return b"" + if len(buffer) == 1: + result = buffer.pop() + else: + ret = io.BytesIO() + ret.writelines(buffer.popleft() for _ in range(len(buffer))) + result = ret.getvalue() + self._size = 0 + return result + + +class BaseHTTPResponse(io.IOBase): + CONTENT_DECODERS = ["gzip", "x-gzip", "deflate"] + if brotli is not None: + CONTENT_DECODERS += ["br"] + if zstd is not None: + CONTENT_DECODERS += ["zstd"] + REDIRECT_STATUSES = [301, 302, 303, 307, 308] + + DECODER_ERROR_CLASSES: tuple[type[Exception], ...] = (IOError, zlib.error) + if brotli is not None: + DECODER_ERROR_CLASSES += (brotli.error,) + + if zstd is not None: + DECODER_ERROR_CLASSES += (zstd.ZstdError,) + + def __init__( + self, + *, + headers: typing.Mapping[str, str] | typing.Mapping[bytes, bytes] | None = None, + status: int, + version: int, + reason: str | None, + decode_content: bool, + request_url: str | None, + retries: Retry | None = None, + ) -> None: + if isinstance(headers, HTTPHeaderDict): + self.headers = headers + else: + self.headers = HTTPHeaderDict(headers) # type: ignore[arg-type] + self.status = status + self.version = version + self.reason = reason + self.decode_content = decode_content + self._has_decoded_content = False + self._request_url: str | None = request_url + self.retries = retries + + self.chunked = False + tr_enc = self.headers.get("transfer-encoding", "").lower() + # Don't incur the penalty of creating a list and then discarding it + encodings = (enc.strip() for enc in tr_enc.split(",")) + if "chunked" in encodings: + self.chunked = True + + self._decoder: ContentDecoder | None = None + self.length_remaining: int | None + + def get_redirect_location(self) -> str | None | Literal[False]: + """ + Should we redirect and where to? + + :returns: Truthy redirect location string if we got a redirect status + code and valid location. ``None`` if redirect status and no + location. ``False`` if not a redirect status code. + """ + if self.status in self.REDIRECT_STATUSES: + return self.headers.get("location") + return False + + @property + def data(self) -> bytes: + raise NotImplementedError() + + def json(self) -> typing.Any: + """ + Parses the body of the HTTP response as JSON. + + To use a custom JSON decoder pass the result of :attr:`HTTPResponse.data` to the decoder. + + This method can raise either `UnicodeDecodeError` or `json.JSONDecodeError`. + + Read more :ref:`here `. + """ + data = self.data.decode("utf-8") + return _json.loads(data) + + @property + def url(self) -> str | None: + raise NotImplementedError() + + @url.setter + def url(self, url: str | None) -> None: + raise NotImplementedError() + + @property + def connection(self) -> BaseHTTPConnection | None: + raise NotImplementedError() + + @property + def retries(self) -> Retry | None: + return self._retries + + @retries.setter + def retries(self, retries: Retry | None) -> None: + # Override the request_url if retries has a redirect location. + if retries is not None and retries.history: + self.url = retries.history[-1].redirect_location + self._retries = retries + + def stream( + self, amt: int | None = 2**16, decode_content: bool | None = None + ) -> typing.Iterator[bytes]: + raise NotImplementedError() + + def read( + self, + amt: int | None = None, + decode_content: bool | None = None, + cache_content: bool = False, + ) -> bytes: + raise NotImplementedError() + + def read1( + self, + amt: int | None = None, + decode_content: bool | None = None, + ) -> bytes: + raise NotImplementedError() + + def read_chunked( + self, + amt: int | None = None, + decode_content: bool | None = None, + ) -> typing.Iterator[bytes]: + raise NotImplementedError() + + def release_conn(self) -> None: + raise NotImplementedError() + + def drain_conn(self) -> None: + raise NotImplementedError() + + def close(self) -> None: + raise NotImplementedError() + + def _init_decoder(self) -> None: + """ + Set-up the _decoder attribute if necessary. + """ + # Note: content-encoding value should be case-insensitive, per RFC 7230 + # Section 3.2 + content_encoding = self.headers.get("content-encoding", "").lower() + if self._decoder is None: + if content_encoding in self.CONTENT_DECODERS: + self._decoder = _get_decoder(content_encoding) + elif "," in content_encoding: + encodings = [ + e.strip() + for e in content_encoding.split(",") + if e.strip() in self.CONTENT_DECODERS + ] + if encodings: + self._decoder = _get_decoder(content_encoding) + + def _decode( + self, data: bytes, decode_content: bool | None, flush_decoder: bool + ) -> bytes: + """ + Decode the data passed in and potentially flush the decoder. + """ + if not decode_content: + if self._has_decoded_content: + raise RuntimeError( + "Calling read(decode_content=False) is not supported after " + "read(decode_content=True) was called." + ) + return data + + try: + if self._decoder: + data = self._decoder.decompress(data) + self._has_decoded_content = True + except self.DECODER_ERROR_CLASSES as e: + content_encoding = self.headers.get("content-encoding", "").lower() + raise DecodeError( + "Received response with content-encoding: %s, but " + "failed to decode it." % content_encoding, + e, + ) from e + if flush_decoder: + data += self._flush_decoder() + + return data + + def _flush_decoder(self) -> bytes: + """ + Flushes the decoder. Should only be called if the decoder is actually + being used. + """ + if self._decoder: + return self._decoder.decompress(b"") + self._decoder.flush() + return b"" + + # Compatibility methods for `io` module + def readinto(self, b: bytearray) -> int: + temp = self.read(len(b)) + if len(temp) == 0: + return 0 + else: + b[: len(temp)] = temp + return len(temp) + + # Compatibility methods for http.client.HTTPResponse + def getheaders(self) -> HTTPHeaderDict: + warnings.warn( + "HTTPResponse.getheaders() is deprecated and will be removed " + "in urllib3 v2.1.0. Instead access HTTPResponse.headers directly.", + category=DeprecationWarning, + stacklevel=2, + ) + return self.headers + + def getheader(self, name: str, default: str | None = None) -> str | None: + warnings.warn( + "HTTPResponse.getheader() is deprecated and will be removed " + "in urllib3 v2.1.0. Instead use HTTPResponse.headers.get(name, default).", + category=DeprecationWarning, + stacklevel=2, + ) + return self.headers.get(name, default) + + # Compatibility method for http.cookiejar + def info(self) -> HTTPHeaderDict: + return self.headers + + def geturl(self) -> str | None: + return self.url + + +class HTTPResponse(BaseHTTPResponse): + """ + HTTP Response container. + + Backwards-compatible with :class:`http.client.HTTPResponse` but the response ``body`` is + loaded and decoded on-demand when the ``data`` property is accessed. This + class is also compatible with the Python standard library's :mod:`io` + module, and can hence be treated as a readable object in the context of that + framework. + + Extra parameters for behaviour not present in :class:`http.client.HTTPResponse`: + + :param preload_content: + If True, the response's body will be preloaded during construction. + + :param decode_content: + If True, will attempt to decode the body based on the + 'content-encoding' header. + + :param original_response: + When this HTTPResponse wrapper is generated from an :class:`http.client.HTTPResponse` + object, it's convenient to include the original for debug purposes. It's + otherwise unused. + + :param retries: + The retries contains the last :class:`~urllib3.util.retry.Retry` that + was used during the request. + + :param enforce_content_length: + Enforce content length checking. Body returned by server must match + value of Content-Length header, if present. Otherwise, raise error. + """ + + def __init__( + self, + body: _TYPE_BODY = "", + headers: typing.Mapping[str, str] | typing.Mapping[bytes, bytes] | None = None, + status: int = 0, + version: int = 0, + reason: str | None = None, + preload_content: bool = True, + decode_content: bool = True, + original_response: _HttplibHTTPResponse | None = None, + pool: HTTPConnectionPool | None = None, + connection: HTTPConnection | None = None, + msg: _HttplibHTTPMessage | None = None, + retries: Retry | None = None, + enforce_content_length: bool = True, + request_method: str | None = None, + request_url: str | None = None, + auto_close: bool = True, + ) -> None: + super().__init__( + headers=headers, + status=status, + version=version, + reason=reason, + decode_content=decode_content, + request_url=request_url, + retries=retries, + ) + + self.enforce_content_length = enforce_content_length + self.auto_close = auto_close + + self._body = None + self._fp: _HttplibHTTPResponse | None = None + self._original_response = original_response + self._fp_bytes_read = 0 + self.msg = msg + + if body and isinstance(body, (str, bytes)): + self._body = body + + self._pool = pool + self._connection = connection + + if hasattr(body, "read"): + self._fp = body # type: ignore[assignment] + + # Are we using the chunked-style of transfer encoding? + self.chunk_left: int | None = None + + # Determine length of response + self.length_remaining = self._init_length(request_method) + + # Used to return the correct amount of bytes for partial read()s + self._decoded_buffer = BytesQueueBuffer() + + # If requested, preload the body. + if preload_content and not self._body: + self._body = self.read(decode_content=decode_content) + + def release_conn(self) -> None: + if not self._pool or not self._connection: + return None + + self._pool._put_conn(self._connection) + self._connection = None + + def drain_conn(self) -> None: + """ + Read and discard any remaining HTTP response data in the response connection. + + Unread data in the HTTPResponse connection blocks the connection from being released back to the pool. + """ + try: + self.read() + except (HTTPError, OSError, BaseSSLError, HTTPException): + pass + + @property + def data(self) -> bytes: + # For backwards-compat with earlier urllib3 0.4 and earlier. + if self._body: + return self._body # type: ignore[return-value] + + if self._fp: + return self.read(cache_content=True) + + return None # type: ignore[return-value] + + @property + def connection(self) -> HTTPConnection | None: + return self._connection + + def isclosed(self) -> bool: + return is_fp_closed(self._fp) + + def tell(self) -> int: + """ + Obtain the number of bytes pulled over the wire so far. May differ from + the amount of content returned by :meth:``urllib3.response.HTTPResponse.read`` + if bytes are encoded on the wire (e.g, compressed). + """ + return self._fp_bytes_read + + def _init_length(self, request_method: str | None) -> int | None: + """ + Set initial length value for Response content if available. + """ + length: int | None + content_length: str | None = self.headers.get("content-length") + + if content_length is not None: + if self.chunked: + # This Response will fail with an IncompleteRead if it can't be + # received as chunked. This method falls back to attempt reading + # the response before raising an exception. + log.warning( + "Received response with both Content-Length and " + "Transfer-Encoding set. This is expressly forbidden " + "by RFC 7230 sec 3.3.2. Ignoring Content-Length and " + "attempting to process response as Transfer-Encoding: " + "chunked." + ) + return None + + try: + # RFC 7230 section 3.3.2 specifies multiple content lengths can + # be sent in a single Content-Length header + # (e.g. Content-Length: 42, 42). This line ensures the values + # are all valid ints and that as long as the `set` length is 1, + # all values are the same. Otherwise, the header is invalid. + lengths = {int(val) for val in content_length.split(",")} + if len(lengths) > 1: + raise InvalidHeader( + "Content-Length contained multiple " + "unmatching values (%s)" % content_length + ) + length = lengths.pop() + except ValueError: + length = None + else: + if length < 0: + length = None + + else: # if content_length is None + length = None + + # Convert status to int for comparison + # In some cases, httplib returns a status of "_UNKNOWN" + try: + status = int(self.status) + except ValueError: + status = 0 + + # Check for responses that shouldn't include a body + if status in (204, 304) or 100 <= status < 200 or request_method == "HEAD": + length = 0 + + return length + + @contextmanager + def _error_catcher(self) -> typing.Generator[None, None, None]: + """ + Catch low-level python exceptions, instead re-raising urllib3 + variants, so that low-level exceptions are not leaked in the + high-level api. + + On exit, release the connection back to the pool. + """ + clean_exit = False + + try: + try: + yield + + except SocketTimeout as e: + # FIXME: Ideally we'd like to include the url in the ReadTimeoutError but + # there is yet no clean way to get at it from this context. + raise ReadTimeoutError(self._pool, None, "Read timed out.") from e # type: ignore[arg-type] + + except BaseSSLError as e: + # FIXME: Is there a better way to differentiate between SSLErrors? + if "read operation timed out" not in str(e): + # SSL errors related to framing/MAC get wrapped and reraised here + raise SSLError(e) from e + + raise ReadTimeoutError(self._pool, None, "Read timed out.") from e # type: ignore[arg-type] + + except IncompleteRead as e: + if ( + e.expected is not None + and e.partial is not None + and e.expected == -e.partial + ): + arg = "Response may not contain content." + else: + arg = f"Connection broken: {e!r}" + raise ProtocolError(arg, e) from e + + except (HTTPException, OSError) as e: + raise ProtocolError(f"Connection broken: {e!r}", e) from e + + # If no exception is thrown, we should avoid cleaning up + # unnecessarily. + clean_exit = True + finally: + # If we didn't terminate cleanly, we need to throw away our + # connection. + if not clean_exit: + # The response may not be closed but we're not going to use it + # anymore so close it now to ensure that the connection is + # released back to the pool. + if self._original_response: + self._original_response.close() + + # Closing the response may not actually be sufficient to close + # everything, so if we have a hold of the connection close that + # too. + if self._connection: + self._connection.close() + + # If we hold the original response but it's closed now, we should + # return the connection back to the pool. + if self._original_response and self._original_response.isclosed(): + self.release_conn() + + def _fp_read( + self, + amt: int | None = None, + *, + read1: bool = False, + ) -> bytes: + """ + Read a response with the thought that reading the number of bytes + larger than can fit in a 32-bit int at a time via SSL in some + known cases leads to an overflow error that has to be prevented + if `amt` or `self.length_remaining` indicate that a problem may + happen. + + The known cases: + * 3.8 <= CPython < 3.9.7 because of a bug + https://github.com/urllib3/urllib3/issues/2513#issuecomment-1152559900. + * urllib3 injected with pyOpenSSL-backed SSL-support. + * CPython < 3.10 only when `amt` does not fit 32-bit int. + """ + assert self._fp + c_int_max = 2**31 - 1 + if ( + (amt and amt > c_int_max) + or ( + amt is None + and self.length_remaining + and self.length_remaining > c_int_max + ) + ) and (util.IS_PYOPENSSL or sys.version_info < (3, 10)): + if read1: + return self._fp.read1(c_int_max) + buffer = io.BytesIO() + # Besides `max_chunk_amt` being a maximum chunk size, it + # affects memory overhead of reading a response by this + # method in CPython. + # `c_int_max` equal to 2 GiB - 1 byte is the actual maximum + # chunk size that does not lead to an overflow error, but + # 256 MiB is a compromise. + max_chunk_amt = 2**28 + while amt is None or amt != 0: + if amt is not None: + chunk_amt = min(amt, max_chunk_amt) + amt -= chunk_amt + else: + chunk_amt = max_chunk_amt + data = self._fp.read(chunk_amt) + if not data: + break + buffer.write(data) + del data # to reduce peak memory usage by `max_chunk_amt`. + return buffer.getvalue() + elif read1: + return self._fp.read1(amt) if amt is not None else self._fp.read1() + else: + # StringIO doesn't like amt=None + return self._fp.read(amt) if amt is not None else self._fp.read() + + def _raw_read( + self, + amt: int | None = None, + *, + read1: bool = False, + ) -> bytes: + """ + Reads `amt` of bytes from the socket. + """ + if self._fp is None: + return None # type: ignore[return-value] + + fp_closed = getattr(self._fp, "closed", False) + + with self._error_catcher(): + data = self._fp_read(amt, read1=read1) if not fp_closed else b"" + if amt is not None and amt != 0 and not data: + # Platform-specific: Buggy versions of Python. + # Close the connection when no data is returned + # + # This is redundant to what httplib/http.client _should_ + # already do. However, versions of python released before + # December 15, 2012 (http://bugs.python.org/issue16298) do + # not properly close the connection in all cases. There is + # no harm in redundantly calling close. + self._fp.close() + if ( + self.enforce_content_length + and self.length_remaining is not None + and self.length_remaining != 0 + ): + # This is an edge case that httplib failed to cover due + # to concerns of backward compatibility. We're + # addressing it here to make sure IncompleteRead is + # raised during streaming, so all calls with incorrect + # Content-Length are caught. + raise IncompleteRead(self._fp_bytes_read, self.length_remaining) + elif read1 and ( + (amt != 0 and not data) or self.length_remaining == len(data) + ): + # All data has been read, but `self._fp.read1` in + # CPython 3.12 and older doesn't always close + # `http.client.HTTPResponse`, so we close it here. + # See https://github.com/python/cpython/issues/113199 + self._fp.close() + + if data: + self._fp_bytes_read += len(data) + if self.length_remaining is not None: + self.length_remaining -= len(data) + return data + + def read( + self, + amt: int | None = None, + decode_content: bool | None = None, + cache_content: bool = False, + ) -> bytes: + """ + Similar to :meth:`http.client.HTTPResponse.read`, but with two additional + parameters: ``decode_content`` and ``cache_content``. + + :param amt: + How much of the content to read. If specified, caching is skipped + because it doesn't make sense to cache partial content as the full + response. + + :param decode_content: + If True, will attempt to decode the body based on the + 'content-encoding' header. + + :param cache_content: + If True, will save the returned data such that the same result is + returned despite of the state of the underlying file object. This + is useful if you want the ``.data`` property to continue working + after having ``.read()`` the file object. (Overridden if ``amt`` is + set.) + """ + self._init_decoder() + if decode_content is None: + decode_content = self.decode_content + + if amt is not None: + cache_content = False + + if len(self._decoded_buffer) >= amt: + return self._decoded_buffer.get(amt) + + data = self._raw_read(amt) + + flush_decoder = amt is None or (amt != 0 and not data) + + if not data and len(self._decoded_buffer) == 0: + return data + + if amt is None: + data = self._decode(data, decode_content, flush_decoder) + if cache_content: + self._body = data + else: + # do not waste memory on buffer when not decoding + if not decode_content: + if self._has_decoded_content: + raise RuntimeError( + "Calling read(decode_content=False) is not supported after " + "read(decode_content=True) was called." + ) + return data + + decoded_data = self._decode(data, decode_content, flush_decoder) + self._decoded_buffer.put(decoded_data) + + while len(self._decoded_buffer) < amt and data: + # TODO make sure to initially read enough data to get past the headers + # For example, the GZ file header takes 10 bytes, we don't want to read + # it one byte at a time + data = self._raw_read(amt) + decoded_data = self._decode(data, decode_content, flush_decoder) + self._decoded_buffer.put(decoded_data) + data = self._decoded_buffer.get(amt) + + return data + + def read1( + self, + amt: int | None = None, + decode_content: bool | None = None, + ) -> bytes: + """ + Similar to ``http.client.HTTPResponse.read1`` and documented + in :meth:`io.BufferedReader.read1`, but with an additional parameter: + ``decode_content``. + + :param amt: + How much of the content to read. + + :param decode_content: + If True, will attempt to decode the body based on the + 'content-encoding' header. + """ + if decode_content is None: + decode_content = self.decode_content + # try and respond without going to the network + if self._has_decoded_content: + if not decode_content: + raise RuntimeError( + "Calling read1(decode_content=False) is not supported after " + "read1(decode_content=True) was called." + ) + if len(self._decoded_buffer) > 0: + if amt is None: + return self._decoded_buffer.get_all() + return self._decoded_buffer.get(amt) + if amt == 0: + return b"" + + # FIXME, this method's type doesn't say returning None is possible + data = self._raw_read(amt, read1=True) + if not decode_content or data is None: + return data + + self._init_decoder() + while True: + flush_decoder = not data + decoded_data = self._decode(data, decode_content, flush_decoder) + self._decoded_buffer.put(decoded_data) + if decoded_data or flush_decoder: + break + data = self._raw_read(8192, read1=True) + + if amt is None: + return self._decoded_buffer.get_all() + return self._decoded_buffer.get(amt) + + def stream( + self, amt: int | None = 2**16, decode_content: bool | None = None + ) -> typing.Generator[bytes, None, None]: + """ + A generator wrapper for the read() method. A call will block until + ``amt`` bytes have been read from the connection or until the + connection is closed. + + :param amt: + How much of the content to read. The generator will return up to + much data per iteration, but may return less. This is particularly + likely when using compressed data. However, the empty string will + never be returned. + + :param decode_content: + If True, will attempt to decode the body based on the + 'content-encoding' header. + """ + if self.chunked and self.supports_chunked_reads(): + yield from self.read_chunked(amt, decode_content=decode_content) + else: + while not is_fp_closed(self._fp) or len(self._decoded_buffer) > 0: + data = self.read(amt=amt, decode_content=decode_content) + + if data: + yield data + + # Overrides from io.IOBase + def readable(self) -> bool: + return True + + def close(self) -> None: + if not self.closed and self._fp: + self._fp.close() + + if self._connection: + self._connection.close() + + if not self.auto_close: + io.IOBase.close(self) + + @property + def closed(self) -> bool: + if not self.auto_close: + return io.IOBase.closed.__get__(self) # type: ignore[no-any-return] + elif self._fp is None: + return True + elif hasattr(self._fp, "isclosed"): + return self._fp.isclosed() + elif hasattr(self._fp, "closed"): + return self._fp.closed + else: + return True + + def fileno(self) -> int: + if self._fp is None: + raise OSError("HTTPResponse has no file to get a fileno from") + elif hasattr(self._fp, "fileno"): + return self._fp.fileno() + else: + raise OSError( + "The file-like object this HTTPResponse is wrapped " + "around has no file descriptor" + ) + + def flush(self) -> None: + if ( + self._fp is not None + and hasattr(self._fp, "flush") + and not getattr(self._fp, "closed", False) + ): + return self._fp.flush() + + def supports_chunked_reads(self) -> bool: + """ + Checks if the underlying file-like object looks like a + :class:`http.client.HTTPResponse` object. We do this by testing for + the fp attribute. If it is present we assume it returns raw chunks as + processed by read_chunked(). + """ + return hasattr(self._fp, "fp") + + def _update_chunk_length(self) -> None: + # First, we'll figure out length of a chunk and then + # we'll try to read it from socket. + if self.chunk_left is not None: + return None + line = self._fp.fp.readline() # type: ignore[union-attr] + line = line.split(b";", 1)[0] + try: + self.chunk_left = int(line, 16) + except ValueError: + self.close() + if line: + # Invalid chunked protocol response, abort. + raise InvalidChunkLength(self, line) from None + else: + # Truncated at start of next chunk + raise ProtocolError("Response ended prematurely") from None + + def _handle_chunk(self, amt: int | None) -> bytes: + returned_chunk = None + if amt is None: + chunk = self._fp._safe_read(self.chunk_left) # type: ignore[union-attr] + returned_chunk = chunk + self._fp._safe_read(2) # type: ignore[union-attr] # Toss the CRLF at the end of the chunk. + self.chunk_left = None + elif self.chunk_left is not None and amt < self.chunk_left: + value = self._fp._safe_read(amt) # type: ignore[union-attr] + self.chunk_left = self.chunk_left - amt + returned_chunk = value + elif amt == self.chunk_left: + value = self._fp._safe_read(amt) # type: ignore[union-attr] + self._fp._safe_read(2) # type: ignore[union-attr] # Toss the CRLF at the end of the chunk. + self.chunk_left = None + returned_chunk = value + else: # amt > self.chunk_left + returned_chunk = self._fp._safe_read(self.chunk_left) # type: ignore[union-attr] + self._fp._safe_read(2) # type: ignore[union-attr] # Toss the CRLF at the end of the chunk. + self.chunk_left = None + return returned_chunk # type: ignore[no-any-return] + + def read_chunked( + self, amt: int | None = None, decode_content: bool | None = None + ) -> typing.Generator[bytes, None, None]: + """ + Similar to :meth:`HTTPResponse.read`, but with an additional + parameter: ``decode_content``. + + :param amt: + How much of the content to read. If specified, caching is skipped + because it doesn't make sense to cache partial content as the full + response. + + :param decode_content: + If True, will attempt to decode the body based on the + 'content-encoding' header. + """ + self._init_decoder() + # FIXME: Rewrite this method and make it a class with a better structured logic. + if not self.chunked: + raise ResponseNotChunked( + "Response is not chunked. " + "Header 'transfer-encoding: chunked' is missing." + ) + if not self.supports_chunked_reads(): + raise BodyNotHttplibCompatible( + "Body should be http.client.HTTPResponse like. " + "It should have have an fp attribute which returns raw chunks." + ) + + with self._error_catcher(): + # Don't bother reading the body of a HEAD request. + if self._original_response and is_response_to_head(self._original_response): + self._original_response.close() + return None + + # If a response is already read and closed + # then return immediately. + if self._fp.fp is None: # type: ignore[union-attr] + return None + + while True: + self._update_chunk_length() + if self.chunk_left == 0: + break + chunk = self._handle_chunk(amt) + decoded = self._decode( + chunk, decode_content=decode_content, flush_decoder=False + ) + if decoded: + yield decoded + + if decode_content: + # On CPython and PyPy, we should never need to flush the + # decoder. However, on Jython we *might* need to, so + # lets defensively do it anyway. + decoded = self._flush_decoder() + if decoded: # Platform-specific: Jython. + yield decoded + + # Chunk content ends with \r\n: discard it. + while self._fp is not None: + line = self._fp.fp.readline() + if not line: + # Some sites may not end with '\r\n'. + break + if line == b"\r\n": + break + + # We read everything; close the "file". + if self._original_response: + self._original_response.close() + + @property + def url(self) -> str | None: + """ + Returns the URL that was the source of this response. + If the request that generated this response redirected, this method + will return the final redirect location. + """ + return self._request_url + + @url.setter + def url(self, url: str) -> None: + self._request_url = url + + def __iter__(self) -> typing.Iterator[bytes]: + buffer: list[bytes] = [] + for chunk in self.stream(decode_content=True): + if b"\n" in chunk: + chunks = chunk.split(b"\n") + yield b"".join(buffer) + chunks[0] + b"\n" + for x in chunks[1:-1]: + yield x + b"\n" + if chunks[-1]: + buffer = [chunks[-1]] + else: + buffer = [] + else: + buffer.append(chunk) + if buffer: + yield b"".join(buffer)