Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- llmeval-env/lib/python3.10/site-packages/nltk-3.8.1.dist-info/AUTHORS.md +313 -0
- llmeval-env/lib/python3.10/site-packages/nltk-3.8.1.dist-info/INSTALLER +1 -0
- llmeval-env/lib/python3.10/site-packages/nltk-3.8.1.dist-info/LICENSE.txt +202 -0
- llmeval-env/lib/python3.10/site-packages/nltk-3.8.1.dist-info/METADATA +67 -0
- llmeval-env/lib/python3.10/site-packages/nltk-3.8.1.dist-info/README.md +50 -0
- llmeval-env/lib/python3.10/site-packages/nltk-3.8.1.dist-info/RECORD +782 -0
- llmeval-env/lib/python3.10/site-packages/nltk-3.8.1.dist-info/WHEEL +5 -0
- llmeval-env/lib/python3.10/site-packages/nltk-3.8.1.dist-info/entry_points.txt +3 -0
- llmeval-env/lib/python3.10/site-packages/nltk-3.8.1.dist-info/top_level.txt +1 -0
- llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__init__.py +19 -0
- llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__pycache__/config.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__pycache__/layer.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__pycache__/model.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__pycache__/utils.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/config.py +80 -0
- llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/layer.py +128 -0
- llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/model.py +161 -0
- llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/utils.py +121 -0
- llmeval-env/lib/python3.10/site-packages/peft/tuners/lokr/__init__.py +20 -0
- llmeval-env/lib/python3.10/site-packages/peft/tuners/lokr/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/peft/tuners/lokr/__pycache__/config.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/peft/tuners/lokr/__pycache__/layer.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/peft/tuners/lokr/__pycache__/model.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/peft/tuners/lokr/config.py +127 -0
- llmeval-env/lib/python3.10/site-packages/peft/tuners/lokr/layer.py +409 -0
- llmeval-env/lib/python3.10/site-packages/peft/tuners/lokr/model.py +115 -0
- llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__init__.py +37 -0
- llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/aqlm.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/awq.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/bnb.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/gptq.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/layer.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/model.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/tp_layer.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/gptq.py +114 -0
- llmeval-env/lib/python3.10/site-packages/peft/tuners/p_tuning/__init__.py +19 -0
- llmeval-env/lib/python3.10/site-packages/peft/tuners/p_tuning/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/peft/tuners/p_tuning/__pycache__/config.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/peft/tuners/p_tuning/__pycache__/model.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/peft/tuners/p_tuning/config.py +59 -0
- llmeval-env/lib/python3.10/site-packages/peft/tuners/p_tuning/model.py +130 -0
- llmeval-env/lib/python3.10/site-packages/peft/tuners/poly/__init__.py +20 -0
- llmeval-env/lib/python3.10/site-packages/peft/tuners/poly/config.py +89 -0
- llmeval-env/lib/python3.10/site-packages/peft/tuners/poly/layer.py +171 -0
- llmeval-env/lib/python3.10/site-packages/peft/tuners/poly/router.py +83 -0
- llmeval-env/lib/python3.10/site-packages/peft/tuners/prompt_tuning/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/peft/tuners/prompt_tuning/__pycache__/config.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/peft/tuners/prompt_tuning/__pycache__/model.cpython-310.pyc +0 -0
llmeval-env/lib/python3.10/site-packages/nltk-3.8.1.dist-info/AUTHORS.md
ADDED
@@ -0,0 +1,313 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit (NLTK) Authors
|
2 |
+
|
3 |
+
## Original Authors
|
4 |
+
|
5 |
+
- Steven Bird <[email protected]>
|
6 |
+
- Edward Loper <[email protected]>
|
7 |
+
- Ewan Klein <[email protected]>
|
8 |
+
|
9 |
+
## Contributors
|
10 |
+
|
11 |
+
- Tom Aarsen
|
12 |
+
- Rami Al-Rfou'
|
13 |
+
- Mark Amery
|
14 |
+
- Greg Aumann
|
15 |
+
- Ivan Barria
|
16 |
+
- Ingolf Becker
|
17 |
+
- Yonatan Becker
|
18 |
+
- Paul Bedaride
|
19 |
+
- Steven Bethard
|
20 |
+
- Robert Berwick
|
21 |
+
- Dan Blanchard
|
22 |
+
- Nathan Bodenstab
|
23 |
+
- Alexander Böhm
|
24 |
+
- Francis Bond
|
25 |
+
- Paul Bone
|
26 |
+
- Jordan Boyd-Graber
|
27 |
+
- Daniel Blanchard
|
28 |
+
- Phil Blunsom
|
29 |
+
- Lars Buitinck
|
30 |
+
- Cristian Capdevila
|
31 |
+
- Steve Cassidy
|
32 |
+
- Chen-Fu Chiang
|
33 |
+
- Dmitry Chichkov
|
34 |
+
- Jinyoung Choi
|
35 |
+
- Andrew Clausen
|
36 |
+
- Lucas Champollion
|
37 |
+
- Graham Christensen
|
38 |
+
- Trevor Cohn
|
39 |
+
- David Coles
|
40 |
+
- Tom Conroy <https://github.com/tconroy>
|
41 |
+
- Claude Coulombe
|
42 |
+
- Lucas Cooper
|
43 |
+
- Robin Cooper
|
44 |
+
- Chris Crowner
|
45 |
+
- James Curran
|
46 |
+
- Arthur Darcet
|
47 |
+
- Dariel Dato-on
|
48 |
+
- Selina Dennis
|
49 |
+
- Leon Derczynski
|
50 |
+
- Alexis Dimitriadis
|
51 |
+
- Nikhil Dinesh
|
52 |
+
- Liang Dong
|
53 |
+
- David Doukhan
|
54 |
+
- Rebecca Dridan
|
55 |
+
- Pablo Duboue
|
56 |
+
- Long Duong
|
57 |
+
- Christian Federmann
|
58 |
+
- Campion Fellin
|
59 |
+
- Michelle Fullwood
|
60 |
+
- Dan Garrette
|
61 |
+
- Maciej Gawinecki
|
62 |
+
- Jean Mark Gawron
|
63 |
+
- Sumukh Ghodke
|
64 |
+
- Yoav Goldberg
|
65 |
+
- Michael Wayne Goodman
|
66 |
+
- Dougal Graham
|
67 |
+
- Brent Gray
|
68 |
+
- Simon Greenhill
|
69 |
+
- Clark Grubb
|
70 |
+
- Eduardo Pereira Habkost
|
71 |
+
- Masato Hagiwara
|
72 |
+
- Lauri Hallila
|
73 |
+
- Michael Hansen
|
74 |
+
- Yurie Hara
|
75 |
+
- Will Hardy
|
76 |
+
- Tyler Hartley
|
77 |
+
- Peter Hawkins
|
78 |
+
- Saimadhav Heblikar
|
79 |
+
- Fredrik Hedman
|
80 |
+
- Helder
|
81 |
+
- Michael Heilman
|
82 |
+
- Ofer Helman
|
83 |
+
- Christopher Hench
|
84 |
+
- Bruce Hill
|
85 |
+
- Amy Holland
|
86 |
+
- Kristy Hollingshead
|
87 |
+
- Marcus Huderle
|
88 |
+
- Baden Hughes
|
89 |
+
- Nancy Ide
|
90 |
+
- Rebecca Ingram
|
91 |
+
- Edward Ivanovic
|
92 |
+
- Thomas Jakobsen
|
93 |
+
- Nick Johnson
|
94 |
+
- Eric Kafe
|
95 |
+
- Piotr Kasprzyk
|
96 |
+
- Angelos Katharopoulos
|
97 |
+
- Sudharshan Kaushik
|
98 |
+
- Chris Koenig
|
99 |
+
- Mikhail Korobov
|
100 |
+
- Denis Krusko
|
101 |
+
- Ilia Kurenkov
|
102 |
+
- Stefano Lattarini
|
103 |
+
- Pierre-François Laquerre
|
104 |
+
- Stefano Lattarini
|
105 |
+
- Haejoong Lee
|
106 |
+
- Jackson Lee
|
107 |
+
- Max Leonov
|
108 |
+
- Chris Liechti
|
109 |
+
- Hyuckin David Lim
|
110 |
+
- Tom Lippincott
|
111 |
+
- Peter Ljunglöf
|
112 |
+
- Alex Louden
|
113 |
+
- Joseph Lynch
|
114 |
+
- Nitin Madnani
|
115 |
+
- Felipe Madrigal
|
116 |
+
- Bjørn Mæland
|
117 |
+
- Dean Malmgren
|
118 |
+
- Christopher Maloof
|
119 |
+
- Rob Malouf
|
120 |
+
- Iker Manterola
|
121 |
+
- Carl de Marcken
|
122 |
+
- Mitch Marcus
|
123 |
+
- Torsten Marek
|
124 |
+
- Robert Marshall
|
125 |
+
- Marius Mather
|
126 |
+
- Duncan McGreggor
|
127 |
+
- David McClosky
|
128 |
+
- Xinfan Meng
|
129 |
+
- Dmitrijs Milajevs
|
130 |
+
- Margaret Mitchell
|
131 |
+
- Tomonori Nagano
|
132 |
+
- Jason Narad
|
133 |
+
- Shari A’aidil Nasruddin
|
134 |
+
- Lance Nathan
|
135 |
+
- Morten Neergaard
|
136 |
+
- David Nemeskey
|
137 |
+
- Eric Nichols
|
138 |
+
- Joel Nothman
|
139 |
+
- Alireza Nourian
|
140 |
+
- Alexander Oleynikov
|
141 |
+
- Pierpaolo Pantone
|
142 |
+
- Ted Pedersen
|
143 |
+
- Jacob Perkins
|
144 |
+
- Alberto Planas
|
145 |
+
- Ondrej Platek
|
146 |
+
- Alessandro Presta
|
147 |
+
- Qi Liu
|
148 |
+
- Martin Thorsen Ranang
|
149 |
+
- Michael Recachinas
|
150 |
+
- Brandon Rhodes
|
151 |
+
- Joshua Ritterman
|
152 |
+
- Will Roberts
|
153 |
+
- Stuart Robinson
|
154 |
+
- Carlos Rodriguez
|
155 |
+
- Lorenzo Rubio
|
156 |
+
- Alex Rudnick
|
157 |
+
- Jussi Salmela
|
158 |
+
- Geoffrey Sampson
|
159 |
+
- Kepa Sarasola
|
160 |
+
- Kevin Scannell
|
161 |
+
- Nathan Schneider
|
162 |
+
- Rico Sennrich
|
163 |
+
- Thomas Skardal
|
164 |
+
- Eric Smith
|
165 |
+
- Lynn Soe
|
166 |
+
- Rob Speer
|
167 |
+
- Peter Spiller
|
168 |
+
- Richard Sproat
|
169 |
+
- Ceri Stagg
|
170 |
+
- Peter Stahl
|
171 |
+
- Oliver Steele
|
172 |
+
- Thomas Stieglmaier
|
173 |
+
- Jan Strunk
|
174 |
+
- Liling Tan
|
175 |
+
- Claire Taylor
|
176 |
+
- Louis Tiao
|
177 |
+
- Steven Tomcavage
|
178 |
+
- Tiago Tresoldi
|
179 |
+
- Marcus Uneson
|
180 |
+
- Yu Usami
|
181 |
+
- Petro Verkhogliad
|
182 |
+
- Peter Wang
|
183 |
+
- Zhe Wang
|
184 |
+
- Charlotte Wilson
|
185 |
+
- Chuck Wooters
|
186 |
+
- Steven Xu
|
187 |
+
- Beracah Yankama
|
188 |
+
- Lei Ye (叶磊)
|
189 |
+
- Patrick Ye
|
190 |
+
- Geraldine Sim Wei Ying
|
191 |
+
- Jason Yoder
|
192 |
+
- Thomas Zieglier
|
193 |
+
- 0ssifrage
|
194 |
+
- ducki13
|
195 |
+
- kiwipi
|
196 |
+
- lade
|
197 |
+
- isnowfy
|
198 |
+
- onesandzeros
|
199 |
+
- pquentin
|
200 |
+
- wvanlint
|
201 |
+
- Álvaro Justen <https://github.com/turicas>
|
202 |
+
- bjut-hz
|
203 |
+
- Sergio Oller
|
204 |
+
- Will Monroe
|
205 |
+
- Elijah Rippeth
|
206 |
+
- Emil Manukyan
|
207 |
+
- Casper Lehmann-Strøm
|
208 |
+
- Andrew Giel
|
209 |
+
- Tanin Na Nakorn
|
210 |
+
- Linghao Zhang
|
211 |
+
- Colin Carroll
|
212 |
+
- Heguang Miao
|
213 |
+
- Hannah Aizenman (story645)
|
214 |
+
- George Berry
|
215 |
+
- Adam Nelson
|
216 |
+
- J Richard Snape
|
217 |
+
- Alex Constantin <[email protected]>
|
218 |
+
- Tsolak Ghukasyan
|
219 |
+
- Prasasto Adi
|
220 |
+
- Safwan Kamarrudin
|
221 |
+
- Arthur Tilley
|
222 |
+
- Vilhjalmur Thorsteinsson
|
223 |
+
- Jaehoon Hwang <https://github.com/jaehoonhwang>
|
224 |
+
- Chintan Shah <https://github.com/chintanshah24>
|
225 |
+
- sbagan
|
226 |
+
- Zicheng Xu
|
227 |
+
- Albert Au Yeung <https://github.com/albertauyeung>
|
228 |
+
- Shenjian Zhao
|
229 |
+
- Deng Wang <https://github.com/lmatt-bit>
|
230 |
+
- Ali Abdullah
|
231 |
+
- Stoytcho Stoytchev
|
232 |
+
- Lakhdar Benzahia
|
233 |
+
- Kheireddine Abainia <https://github.com/xprogramer>
|
234 |
+
- Yibin Lin <https://github.com/yibinlin>
|
235 |
+
- Artiem Krinitsyn
|
236 |
+
- Björn Mattsson
|
237 |
+
- Oleg Chislov
|
238 |
+
- Pavan Gururaj Joshi <https://github.com/PavanGJ>
|
239 |
+
- Ethan Hill <https://github.com/hill1303>
|
240 |
+
- Vivek Lakshmanan
|
241 |
+
- Somnath Rakshit <https://github.com/somnathrakshit>
|
242 |
+
- Anlan Du
|
243 |
+
- Pulkit Maloo <https://github.com/pulkitmaloo>
|
244 |
+
- Brandon M. Burroughs <https://github.com/brandonmburroughs>
|
245 |
+
- John Stewart <https://github.com/free-variation>
|
246 |
+
- Iaroslav Tymchenko <https://github.com/myproblemchild>
|
247 |
+
- Aleš Tamchyna
|
248 |
+
- Tim Gianitsos <https://github.com/timgianitsos>
|
249 |
+
- Philippe Partarrieu <https://github.com/ppartarr>
|
250 |
+
- Andrew Owen Martin
|
251 |
+
- Adrian Ellis <https://github.com/adrianjellis>
|
252 |
+
- Nat Quayle Nelson <https://github.com/nqnstudios>
|
253 |
+
- Yanpeng Zhao <https://github.com/zhaoyanpeng>
|
254 |
+
- Matan Rak <https://github.com/matanrak>
|
255 |
+
- Nick Ulle <https://github.com/nick-ulle>
|
256 |
+
- Uday Krishna <https://github.com/udaykrishna>
|
257 |
+
- Osman Zubair <https://github.com/okz12>
|
258 |
+
- Viresh Gupta <https://github.com/virresh>
|
259 |
+
- Ondřej Cífka <https://github.com/cifkao>
|
260 |
+
- Iris X. Zhou <https://github.com/irisxzhou>
|
261 |
+
- Devashish Lal <https://github.com/BLaZeKiLL>
|
262 |
+
- Gerhard Kremer <https://github.com/GerhardKa>
|
263 |
+
- Nicolas Darr <https://github.com/ndarr>
|
264 |
+
- Hervé Nicol <https://github.com/hervenicol>
|
265 |
+
- Alexandre H. T. Dias <https://github.com/alexandredias3d>
|
266 |
+
- Daksh Shah <https://github.com/Daksh>
|
267 |
+
- Jacob Weightman <https://github.com/jacobdweightman>
|
268 |
+
- Bonifacio de Oliveira <https://github.com/Bonifacio2>
|
269 |
+
- Armins Bagrats Stepanjans <https://github.com/ab-10>
|
270 |
+
- Vassilis Palassopoulos <https://github.com/palasso>
|
271 |
+
- Ram Rachum <https://github.com/cool-RR>
|
272 |
+
- Or Sharir <https://github.com/orsharir>
|
273 |
+
- Denali Molitor <https://github.com/dmmolitor>
|
274 |
+
- Jacob Moorman <https://github.com/jdmoorman>
|
275 |
+
- Cory Nezin <https://github.com/corynezin>
|
276 |
+
- Matt Chaput
|
277 |
+
- Danny Sepler <https://github.com/dannysepler>
|
278 |
+
- Akshita Bhagia <https://github.com/AkshitaB>
|
279 |
+
- Pratap Yadav <https://github.com/prtpydv>
|
280 |
+
- Hiroki Teranishi <https://github.com/chantera>
|
281 |
+
- Ruben Cartuyvels <https://github.com/rubencart>
|
282 |
+
- Dalton Pearson <https://github.com/daltonpearson>
|
283 |
+
- Robby Horvath <https://github.com/robbyhorvath>
|
284 |
+
- Gavish Poddar <https://github.com/gavishpoddar>
|
285 |
+
- Saibo Geng <https://github.com/Saibo-creator>
|
286 |
+
- Ahmet Yildirim <https://github.com/RnDevelover>
|
287 |
+
- Yuta Nakamura <https://github.com/yutanakamura-tky>
|
288 |
+
- Adam Hawley <https://github.com/adamjhawley>
|
289 |
+
- Panagiotis Simakis <https://github.com/sp1thas>
|
290 |
+
- Richard Wang <https://github.com/richarddwang>
|
291 |
+
- Alexandre Perez-Lebel <https://github.com/aperezlebel>
|
292 |
+
- Fernando Carranza <https://github.com/fernandocar86>
|
293 |
+
- Martin Kondratzky <https://github.com/martinkondra>
|
294 |
+
- Heungson Lee <https://github.com/heungson>
|
295 |
+
- M.K. Pawelkiewicz <https://github.com/hamiltonianflow>
|
296 |
+
- Steven Thomas Smith <https://github.com/essandess>
|
297 |
+
- Jan Lennartz <https://github.com/Madnex>
|
298 |
+
|
299 |
+
## Others whose work we've taken and included in NLTK, but who didn't directly contribute it:
|
300 |
+
|
301 |
+
### Contributors to the Porter Stemmer
|
302 |
+
|
303 |
+
- Martin Porter
|
304 |
+
- Vivake Gupta
|
305 |
+
- Barry Wilkins
|
306 |
+
- Hiranmay Ghosh
|
307 |
+
- Chris Emerson
|
308 |
+
|
309 |
+
### Authors of snowball arabic stemmer algorithm
|
310 |
+
|
311 |
+
- Assem Chelli
|
312 |
+
- Abdelkrim Aries
|
313 |
+
- Lakhdar Benzahia
|
llmeval-env/lib/python3.10/site-packages/nltk-3.8.1.dist-info/INSTALLER
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
pip
|
llmeval-env/lib/python3.10/site-packages/nltk-3.8.1.dist-info/LICENSE.txt
ADDED
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
Apache License
|
3 |
+
Version 2.0, January 2004
|
4 |
+
http://www.apache.org/licenses/
|
5 |
+
|
6 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
7 |
+
|
8 |
+
1. Definitions.
|
9 |
+
|
10 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
11 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
12 |
+
|
13 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
14 |
+
the copyright owner that is granting the License.
|
15 |
+
|
16 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
17 |
+
other entities that control, are controlled by, or are under common
|
18 |
+
control with that entity. For the purposes of this definition,
|
19 |
+
"control" means (i) the power, direct or indirect, to cause the
|
20 |
+
direction or management of such entity, whether by contract or
|
21 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
22 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
23 |
+
|
24 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
25 |
+
exercising permissions granted by this License.
|
26 |
+
|
27 |
+
"Source" form shall mean the preferred form for making modifications,
|
28 |
+
including but not limited to software source code, documentation
|
29 |
+
source, and configuration files.
|
30 |
+
|
31 |
+
"Object" form shall mean any form resulting from mechanical
|
32 |
+
transformation or translation of a Source form, including but
|
33 |
+
not limited to compiled object code, generated documentation,
|
34 |
+
and conversions to other media types.
|
35 |
+
|
36 |
+
"Work" shall mean the work of authorship, whether in Source or
|
37 |
+
Object form, made available under the License, as indicated by a
|
38 |
+
copyright notice that is included in or attached to the work
|
39 |
+
(an example is provided in the Appendix below).
|
40 |
+
|
41 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
42 |
+
form, that is based on (or derived from) the Work and for which the
|
43 |
+
editorial revisions, annotations, elaborations, or other modifications
|
44 |
+
represent, as a whole, an original work of authorship. For the purposes
|
45 |
+
of this License, Derivative Works shall not include works that remain
|
46 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
47 |
+
the Work and Derivative Works thereof.
|
48 |
+
|
49 |
+
"Contribution" shall mean any work of authorship, including
|
50 |
+
the original version of the Work and any modifications or additions
|
51 |
+
to that Work or Derivative Works thereof, that is intentionally
|
52 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
53 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
54 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
55 |
+
means any form of electronic, verbal, or written communication sent
|
56 |
+
to the Licensor or its representatives, including but not limited to
|
57 |
+
communication on electronic mailing lists, source code control systems,
|
58 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
59 |
+
Licensor for the purpose of discussing and improving the Work, but
|
60 |
+
excluding communication that is conspicuously marked or otherwise
|
61 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
62 |
+
|
63 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
64 |
+
on behalf of whom a Contribution has been received by Licensor and
|
65 |
+
subsequently incorporated within the Work.
|
66 |
+
|
67 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
68 |
+
this License, each Contributor hereby grants to You a perpetual,
|
69 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
70 |
+
copyright license to reproduce, prepare Derivative Works of,
|
71 |
+
publicly display, publicly perform, sublicense, and distribute the
|
72 |
+
Work and such Derivative Works in Source or Object form.
|
73 |
+
|
74 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
75 |
+
this License, each Contributor hereby grants to You a perpetual,
|
76 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
77 |
+
(except as stated in this section) patent license to make, have made,
|
78 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
79 |
+
where such license applies only to those patent claims licensable
|
80 |
+
by such Contributor that are necessarily infringed by their
|
81 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
82 |
+
with the Work to which such Contribution(s) was submitted. If You
|
83 |
+
institute patent litigation against any entity (including a
|
84 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
85 |
+
or a Contribution incorporated within the Work constitutes direct
|
86 |
+
or contributory patent infringement, then any patent licenses
|
87 |
+
granted to You under this License for that Work shall terminate
|
88 |
+
as of the date such litigation is filed.
|
89 |
+
|
90 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
91 |
+
Work or Derivative Works thereof in any medium, with or without
|
92 |
+
modifications, and in Source or Object form, provided that You
|
93 |
+
meet the following conditions:
|
94 |
+
|
95 |
+
(a) You must give any other recipients of the Work or
|
96 |
+
Derivative Works a copy of this License; and
|
97 |
+
|
98 |
+
(b) You must cause any modified files to carry prominent notices
|
99 |
+
stating that You changed the files; and
|
100 |
+
|
101 |
+
(c) You must retain, in the Source form of any Derivative Works
|
102 |
+
that You distribute, all copyright, patent, trademark, and
|
103 |
+
attribution notices from the Source form of the Work,
|
104 |
+
excluding those notices that do not pertain to any part of
|
105 |
+
the Derivative Works; and
|
106 |
+
|
107 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
108 |
+
distribution, then any Derivative Works that You distribute must
|
109 |
+
include a readable copy of the attribution notices contained
|
110 |
+
within such NOTICE file, excluding those notices that do not
|
111 |
+
pertain to any part of the Derivative Works, in at least one
|
112 |
+
of the following places: within a NOTICE text file distributed
|
113 |
+
as part of the Derivative Works; within the Source form or
|
114 |
+
documentation, if provided along with the Derivative Works; or,
|
115 |
+
within a display generated by the Derivative Works, if and
|
116 |
+
wherever such third-party notices normally appear. The contents
|
117 |
+
of the NOTICE file are for informational purposes only and
|
118 |
+
do not modify the License. You may add Your own attribution
|
119 |
+
notices within Derivative Works that You distribute, alongside
|
120 |
+
or as an addendum to the NOTICE text from the Work, provided
|
121 |
+
that such additional attribution notices cannot be construed
|
122 |
+
as modifying the License.
|
123 |
+
|
124 |
+
You may add Your own copyright statement to Your modifications and
|
125 |
+
may provide additional or different license terms and conditions
|
126 |
+
for use, reproduction, or distribution of Your modifications, or
|
127 |
+
for any such Derivative Works as a whole, provided Your use,
|
128 |
+
reproduction, and distribution of the Work otherwise complies with
|
129 |
+
the conditions stated in this License.
|
130 |
+
|
131 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
132 |
+
any Contribution intentionally submitted for inclusion in the Work
|
133 |
+
by You to the Licensor shall be under the terms and conditions of
|
134 |
+
this License, without any additional terms or conditions.
|
135 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
136 |
+
the terms of any separate license agreement you may have executed
|
137 |
+
with Licensor regarding such Contributions.
|
138 |
+
|
139 |
+
6. Trademarks. This License does not grant permission to use the trade
|
140 |
+
names, trademarks, service marks, or product names of the Licensor,
|
141 |
+
except as required for reasonable and customary use in describing the
|
142 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
143 |
+
|
144 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
145 |
+
agreed to in writing, Licensor provides the Work (and each
|
146 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
147 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
148 |
+
implied, including, without limitation, any warranties or conditions
|
149 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
150 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
151 |
+
appropriateness of using or redistributing the Work and assume any
|
152 |
+
risks associated with Your exercise of permissions under this License.
|
153 |
+
|
154 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
155 |
+
whether in tort (including negligence), contract, or otherwise,
|
156 |
+
unless required by applicable law (such as deliberate and grossly
|
157 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
158 |
+
liable to You for damages, including any direct, indirect, special,
|
159 |
+
incidental, or consequential damages of any character arising as a
|
160 |
+
result of this License or out of the use or inability to use the
|
161 |
+
Work (including but not limited to damages for loss of goodwill,
|
162 |
+
work stoppage, computer failure or malfunction, or any and all
|
163 |
+
other commercial damages or losses), even if such Contributor
|
164 |
+
has been advised of the possibility of such damages.
|
165 |
+
|
166 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
167 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
168 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
169 |
+
or other liability obligations and/or rights consistent with this
|
170 |
+
License. However, in accepting such obligations, You may act only
|
171 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
172 |
+
of any other Contributor, and only if You agree to indemnify,
|
173 |
+
defend, and hold each Contributor harmless for any liability
|
174 |
+
incurred by, or claims asserted against, such Contributor by reason
|
175 |
+
of your accepting any such warranty or additional liability.
|
176 |
+
|
177 |
+
END OF TERMS AND CONDITIONS
|
178 |
+
|
179 |
+
APPENDIX: How to apply the Apache License to your work.
|
180 |
+
|
181 |
+
To apply the Apache License to your work, attach the following
|
182 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
183 |
+
replaced with your own identifying information. (Don't include
|
184 |
+
the brackets!) The text should be enclosed in the appropriate
|
185 |
+
comment syntax for the file format. We also recommend that a
|
186 |
+
file or class name and description of purpose be included on the
|
187 |
+
same "printed page" as the copyright notice for easier
|
188 |
+
identification within third-party archives.
|
189 |
+
|
190 |
+
Copyright [yyyy] [name of copyright owner]
|
191 |
+
|
192 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
193 |
+
you may not use this file except in compliance with the License.
|
194 |
+
You may obtain a copy of the License at
|
195 |
+
|
196 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
197 |
+
|
198 |
+
Unless required by applicable law or agreed to in writing, software
|
199 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
200 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
201 |
+
See the License for the specific language governing permissions and
|
202 |
+
limitations under the License.
|
llmeval-env/lib/python3.10/site-packages/nltk-3.8.1.dist-info/METADATA
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Metadata-Version: 2.1
|
2 |
+
Name: nltk
|
3 |
+
Version: 3.8.1
|
4 |
+
Summary: Natural Language Toolkit
|
5 |
+
Home-page: https://www.nltk.org/
|
6 |
+
Author: NLTK Team
|
7 |
+
Author-email: [email protected]
|
8 |
+
Maintainer: NLTK Team
|
9 |
+
Maintainer-email: [email protected]
|
10 |
+
License: Apache License, Version 2.0
|
11 |
+
Project-URL: Documentation, https://www.nltk.org/
|
12 |
+
Project-URL: Source Code, https://github.com/nltk/nltk
|
13 |
+
Project-URL: Issue Tracker, https://github.com/nltk/nltk/issues
|
14 |
+
Keywords: NLP,CL,natural language processing,computational linguistics,parsing,tagging,tokenizing,syntax,linguistics,language,natural language,text analytics
|
15 |
+
Platform: UNKNOWN
|
16 |
+
Classifier: Development Status :: 5 - Production/Stable
|
17 |
+
Classifier: Intended Audience :: Developers
|
18 |
+
Classifier: Intended Audience :: Education
|
19 |
+
Classifier: Intended Audience :: Information Technology
|
20 |
+
Classifier: Intended Audience :: Science/Research
|
21 |
+
Classifier: License :: OSI Approved :: Apache Software License
|
22 |
+
Classifier: Operating System :: OS Independent
|
23 |
+
Classifier: Programming Language :: Python :: 3.7
|
24 |
+
Classifier: Programming Language :: Python :: 3.8
|
25 |
+
Classifier: Programming Language :: Python :: 3.9
|
26 |
+
Classifier: Programming Language :: Python :: 3.10
|
27 |
+
Classifier: Programming Language :: Python :: 3.11
|
28 |
+
Classifier: Topic :: Scientific/Engineering
|
29 |
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
30 |
+
Classifier: Topic :: Scientific/Engineering :: Human Machine Interfaces
|
31 |
+
Classifier: Topic :: Scientific/Engineering :: Information Analysis
|
32 |
+
Classifier: Topic :: Text Processing
|
33 |
+
Classifier: Topic :: Text Processing :: Filters
|
34 |
+
Classifier: Topic :: Text Processing :: General
|
35 |
+
Classifier: Topic :: Text Processing :: Indexing
|
36 |
+
Classifier: Topic :: Text Processing :: Linguistic
|
37 |
+
Requires-Python: >=3.7
|
38 |
+
Requires-Dist: click
|
39 |
+
Requires-Dist: joblib
|
40 |
+
Requires-Dist: regex (>=2021.8.3)
|
41 |
+
Requires-Dist: tqdm
|
42 |
+
Provides-Extra: all
|
43 |
+
Requires-Dist: scikit-learn ; extra == 'all'
|
44 |
+
Requires-Dist: python-crfsuite ; extra == 'all'
|
45 |
+
Requires-Dist: requests ; extra == 'all'
|
46 |
+
Requires-Dist: numpy ; extra == 'all'
|
47 |
+
Requires-Dist: pyparsing ; extra == 'all'
|
48 |
+
Requires-Dist: twython ; extra == 'all'
|
49 |
+
Requires-Dist: scipy ; extra == 'all'
|
50 |
+
Requires-Dist: matplotlib ; extra == 'all'
|
51 |
+
Provides-Extra: corenlp
|
52 |
+
Requires-Dist: requests ; extra == 'corenlp'
|
53 |
+
Provides-Extra: machine_learning
|
54 |
+
Requires-Dist: numpy ; extra == 'machine_learning'
|
55 |
+
Requires-Dist: python-crfsuite ; extra == 'machine_learning'
|
56 |
+
Requires-Dist: scikit-learn ; extra == 'machine_learning'
|
57 |
+
Requires-Dist: scipy ; extra == 'machine_learning'
|
58 |
+
Provides-Extra: plot
|
59 |
+
Requires-Dist: matplotlib ; extra == 'plot'
|
60 |
+
Provides-Extra: tgrep
|
61 |
+
Requires-Dist: pyparsing ; extra == 'tgrep'
|
62 |
+
Provides-Extra: twitter
|
63 |
+
Requires-Dist: twython ; extra == 'twitter'
|
64 |
+
|
65 |
+
The Natural Language Toolkit (NLTK) is a Python package for
|
66 |
+
natural language processing. NLTK requires Python 3.7, 3.8, 3.9, 3.10 or 3.11.
|
67 |
+
|
llmeval-env/lib/python3.10/site-packages/nltk-3.8.1.dist-info/README.md
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit (NLTK)
|
2 |
+
[](https://pypi.python.org/pypi/nltk)
|
3 |
+

|
4 |
+
|
5 |
+
NLTK -- the Natural Language Toolkit -- is a suite of open source Python
|
6 |
+
modules, data sets, and tutorials supporting research and development in Natural
|
7 |
+
Language Processing. NLTK requires Python version 3.7, 3.8, 3.9, 3.10 or 3.11.
|
8 |
+
|
9 |
+
For documentation, please visit [nltk.org](https://www.nltk.org/).
|
10 |
+
|
11 |
+
|
12 |
+
## Contributing
|
13 |
+
|
14 |
+
Do you want to contribute to NLTK development? Great!
|
15 |
+
Please read [CONTRIBUTING.md](CONTRIBUTING.md) for more details.
|
16 |
+
|
17 |
+
See also [how to contribute to NLTK](https://www.nltk.org/contribute.html).
|
18 |
+
|
19 |
+
|
20 |
+
## Donate
|
21 |
+
|
22 |
+
Have you found the toolkit helpful? Please support NLTK development by donating
|
23 |
+
to the project via PayPal, using the link on the NLTK homepage.
|
24 |
+
|
25 |
+
|
26 |
+
## Citing
|
27 |
+
|
28 |
+
If you publish work that uses NLTK, please cite the NLTK book, as follows:
|
29 |
+
|
30 |
+
Bird, Steven, Edward Loper and Ewan Klein (2009).
|
31 |
+
Natural Language Processing with Python. O'Reilly Media Inc.
|
32 |
+
|
33 |
+
|
34 |
+
## Copyright
|
35 |
+
|
36 |
+
Copyright (C) 2001-2023 NLTK Project
|
37 |
+
|
38 |
+
For license information, see [LICENSE.txt](LICENSE.txt).
|
39 |
+
|
40 |
+
[AUTHORS.md](AUTHORS.md) contains a list of everyone who has contributed to NLTK.
|
41 |
+
|
42 |
+
|
43 |
+
### Redistributing
|
44 |
+
|
45 |
+
- NLTK source code is distributed under the Apache 2.0 License.
|
46 |
+
- NLTK documentation is distributed under the Creative Commons
|
47 |
+
Attribution-Noncommercial-No Derivative Works 3.0 United States license.
|
48 |
+
- NLTK corpora are provided under the terms given in the README file for each
|
49 |
+
corpus; all are redistributable and available for non-commercial use.
|
50 |
+
- NLTK may be freely redistributed, subject to the provisions of these licenses.
|
llmeval-env/lib/python3.10/site-packages/nltk-3.8.1.dist-info/RECORD
ADDED
@@ -0,0 +1,782 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
../../../bin/nltk,sha256=BPORS870sJtlzIbvcsuWYIoNzWWuAW4oAPgSh-ugPCc,239
|
2 |
+
nltk-3.8.1.dist-info/AUTHORS.md,sha256=lwegiKq14iCouEfpgu85VSAWadP2X1MkLhUsgYBfPOI,7628
|
3 |
+
nltk-3.8.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
4 |
+
nltk-3.8.1.dist-info/LICENSE.txt,sha256=Pd-b5cKP4n2tFDpdx27qJSIq0d1ok0oEcGTlbtL6QMU,11560
|
5 |
+
nltk-3.8.1.dist-info/METADATA,sha256=CUHc77qyEPWGmH6DiO6622SIsRU2fGENF_LCKUzGEOI,2847
|
6 |
+
nltk-3.8.1.dist-info/README.md,sha256=_oLlVxk8v-ARv0t4wAyrPKZ8KmLA2y1tlhJ4C3QjRk0,1789
|
7 |
+
nltk-3.8.1.dist-info/RECORD,,
|
8 |
+
nltk-3.8.1.dist-info/WHEEL,sha256=ewwEueio1C2XeHTvT17n8dZUJgOvyCWCt0WVNLClP9o,92
|
9 |
+
nltk-3.8.1.dist-info/entry_points.txt,sha256=SK6SzMicwUtBiwUOmv5P1ZVs0h-xqey6PnRpsUGGx5c,37
|
10 |
+
nltk-3.8.1.dist-info/top_level.txt,sha256=YoQ-mwqckmTv1Qktmlk5Ylb6lDG77jg5qwoEB7c-pXo,5
|
11 |
+
nltk/VERSION,sha256=932VxvO5Mh-hWNyZVXsqpdneLGKgY0kKcA4_XVSSvyQ,7
|
12 |
+
nltk/__init__.py,sha256=RSji7RAoc5vyHzOA6pB6LPpCUxB8o68igd6CrYElOGA,6432
|
13 |
+
nltk/__pycache__/__init__.cpython-310.pyc,,
|
14 |
+
nltk/__pycache__/book.cpython-310.pyc,,
|
15 |
+
nltk/__pycache__/cli.cpython-310.pyc,,
|
16 |
+
nltk/__pycache__/collections.cpython-310.pyc,,
|
17 |
+
nltk/__pycache__/collocations.cpython-310.pyc,,
|
18 |
+
nltk/__pycache__/compat.cpython-310.pyc,,
|
19 |
+
nltk/__pycache__/data.cpython-310.pyc,,
|
20 |
+
nltk/__pycache__/decorators.cpython-310.pyc,,
|
21 |
+
nltk/__pycache__/downloader.cpython-310.pyc,,
|
22 |
+
nltk/__pycache__/featstruct.cpython-310.pyc,,
|
23 |
+
nltk/__pycache__/grammar.cpython-310.pyc,,
|
24 |
+
nltk/__pycache__/help.cpython-310.pyc,,
|
25 |
+
nltk/__pycache__/internals.cpython-310.pyc,,
|
26 |
+
nltk/__pycache__/jsontags.cpython-310.pyc,,
|
27 |
+
nltk/__pycache__/langnames.cpython-310.pyc,,
|
28 |
+
nltk/__pycache__/lazyimport.cpython-310.pyc,,
|
29 |
+
nltk/__pycache__/probability.cpython-310.pyc,,
|
30 |
+
nltk/__pycache__/text.cpython-310.pyc,,
|
31 |
+
nltk/__pycache__/tgrep.cpython-310.pyc,,
|
32 |
+
nltk/__pycache__/toolbox.cpython-310.pyc,,
|
33 |
+
nltk/__pycache__/treeprettyprinter.cpython-310.pyc,,
|
34 |
+
nltk/__pycache__/treetransforms.cpython-310.pyc,,
|
35 |
+
nltk/__pycache__/util.cpython-310.pyc,,
|
36 |
+
nltk/__pycache__/wsd.cpython-310.pyc,,
|
37 |
+
nltk/app/__init__.py,sha256=xGZbbDC3xv67XnHHusxZmCPMNqj07BM9W6ZSlkWn9eQ,1578
|
38 |
+
nltk/app/__pycache__/__init__.cpython-310.pyc,,
|
39 |
+
nltk/app/__pycache__/chartparser_app.cpython-310.pyc,,
|
40 |
+
nltk/app/__pycache__/chunkparser_app.cpython-310.pyc,,
|
41 |
+
nltk/app/__pycache__/collocations_app.cpython-310.pyc,,
|
42 |
+
nltk/app/__pycache__/concordance_app.cpython-310.pyc,,
|
43 |
+
nltk/app/__pycache__/nemo_app.cpython-310.pyc,,
|
44 |
+
nltk/app/__pycache__/rdparser_app.cpython-310.pyc,,
|
45 |
+
nltk/app/__pycache__/srparser_app.cpython-310.pyc,,
|
46 |
+
nltk/app/__pycache__/wordfreq_app.cpython-310.pyc,,
|
47 |
+
nltk/app/__pycache__/wordnet_app.cpython-310.pyc,,
|
48 |
+
nltk/app/chartparser_app.py,sha256=8FX3-eJQmB-8LT9k-lQJe_y5dSO3Ly4AD2K_wIs9FuE,88195
|
49 |
+
nltk/app/chunkparser_app.py,sha256=tbEPtYtccyTcbSCUhFhhbkjCVi_rtel4EKPSogeJOT8,58322
|
50 |
+
nltk/app/collocations_app.py,sha256=gJBWxNmkUjXQWkruqcOSE8l51Md2fcM3RmZlrJHJZK4,14664
|
51 |
+
nltk/app/concordance_app.py,sha256=HjLN9ybKbjbKqkhJNUakivwIPojvDxqvwnqb7u07BYE,24882
|
52 |
+
nltk/app/nemo_app.py,sha256=6ZBJXlJWKWoYnsrEy3Yy6IeFxcy3FNaGWK6QnMYEy4E,12305
|
53 |
+
nltk/app/rdparser_app.py,sha256=j4tMGNLnwrwkVw3MyMr3-56TXAwAIIEo-v0yWyjDKEQ,37781
|
54 |
+
nltk/app/srparser_app.py,sha256=UQwxqEPDfSYJTw22SSdp5EUX0QwlbztyH4EYHtBerw0,34401
|
55 |
+
nltk/app/wordfreq_app.py,sha256=0mzSrNosW3Wh_J_5FdJV8Bq-F_a2x5HRV3iQAI03fnQ,957
|
56 |
+
nltk/app/wordnet_app.py,sha256=Ut5VU3hzM4inRayi4c5uyNsbAz7YcGW7_q8BBmrJpPs,35574
|
57 |
+
nltk/book.py,sha256=enAPUeJxxAXY0C60vlmPHCVhUxVY2K2gx3wWPH6tU6k,3912
|
58 |
+
nltk/ccg/__init__.py,sha256=Gz2z13lWdN_wdcvn78rxJGvU23EKIN_sNm5twz_2nWw,915
|
59 |
+
nltk/ccg/__pycache__/__init__.cpython-310.pyc,,
|
60 |
+
nltk/ccg/__pycache__/api.cpython-310.pyc,,
|
61 |
+
nltk/ccg/__pycache__/chart.cpython-310.pyc,,
|
62 |
+
nltk/ccg/__pycache__/combinator.cpython-310.pyc,,
|
63 |
+
nltk/ccg/__pycache__/lexicon.cpython-310.pyc,,
|
64 |
+
nltk/ccg/__pycache__/logic.cpython-310.pyc,,
|
65 |
+
nltk/ccg/api.py,sha256=3xzrsFkp0XM_SihDIEODQEMgFh-KYBaCQs4WNStLTgU,10360
|
66 |
+
nltk/ccg/chart.py,sha256=2lyYNM8PY6AhucRmNetqgylPfKz3Pzn4faAKtkvYuFA,14147
|
67 |
+
nltk/ccg/combinator.py,sha256=1C5Tqwhp-diD7rHtUfpPbVt4v1a7oPBY0bkHPm52OD4,10633
|
68 |
+
nltk/ccg/lexicon.py,sha256=9rC11EzdzOVMybBt6TeYdVw4xj73Ufy7NS1cTqtq5sU,9863
|
69 |
+
nltk/ccg/logic.py,sha256=MEukXOQu6dX-i-irRH3Nko5D2ElpDGjVosdgHPZs8wg,1871
|
70 |
+
nltk/chat/__init__.py,sha256=4aSic0g0Zwhlxm7PC_t-0JZjChyKcPXTS0hoWTyTvLw,1556
|
71 |
+
nltk/chat/__pycache__/__init__.cpython-310.pyc,,
|
72 |
+
nltk/chat/__pycache__/eliza.cpython-310.pyc,,
|
73 |
+
nltk/chat/__pycache__/iesha.cpython-310.pyc,,
|
74 |
+
nltk/chat/__pycache__/rude.cpython-310.pyc,,
|
75 |
+
nltk/chat/__pycache__/suntsu.cpython-310.pyc,,
|
76 |
+
nltk/chat/__pycache__/util.cpython-310.pyc,,
|
77 |
+
nltk/chat/__pycache__/zen.cpython-310.pyc,,
|
78 |
+
nltk/chat/eliza.py,sha256=27GYLQfKpMzsBvPixXQnZHqZSmaI_8H3mAvuPZAUVNw,9626
|
79 |
+
nltk/chat/iesha.py,sha256=WassBbqcT2LbxZHda7vwcIBeIOfzefZ19cxUGay-NNM,4407
|
80 |
+
nltk/chat/rude.py,sha256=JMoqOg2_r30pNRwknXWG8qIi_0mm__AnI7tTM1orj2I,3289
|
81 |
+
nltk/chat/suntsu.py,sha256=dlYCRQ3INyOXbfL0qwyLaq1E-fqIVS8weRk2gOC8tq0,7185
|
82 |
+
nltk/chat/util.py,sha256=dbgxikuBJGP6YhDPFw_ZYTSsBqpqEV5HU1ipWfj21Bw,4014
|
83 |
+
nltk/chat/zen.py,sha256=KtZcUzKXlwyfL_tQpa9rtuNB12PAscwaWt2pbvk6GcM,11679
|
84 |
+
nltk/chunk/__init__.py,sha256=hIssYRWZj_6YmHQOhJe3DRlvqbehf-Y7e6kSy8Sicp0,7597
|
85 |
+
nltk/chunk/__pycache__/__init__.cpython-310.pyc,,
|
86 |
+
nltk/chunk/__pycache__/api.cpython-310.pyc,,
|
87 |
+
nltk/chunk/__pycache__/named_entity.cpython-310.pyc,,
|
88 |
+
nltk/chunk/__pycache__/regexp.cpython-310.pyc,,
|
89 |
+
nltk/chunk/__pycache__/util.cpython-310.pyc,,
|
90 |
+
nltk/chunk/api.py,sha256=-gEfVh1nv3CO-YXV3kTSMNDS4_sbuKnM3xVuTq2oc60,1946
|
91 |
+
nltk/chunk/named_entity.py,sha256=v__H3Rply3PvrzKRUM2ktkLQcMYJc_14qHFbiKqqaMo,11140
|
92 |
+
nltk/chunk/regexp.py,sha256=KXfm9-KJNqSRSJFfV5192yErXVuLH2jmOeCJbROkPRU,55980
|
93 |
+
nltk/chunk/util.py,sha256=Ll5PB0ozF7rwNJtsdM6YiA1zktVLO7MOaQtJDR2Qx4g,21311
|
94 |
+
nltk/classify/__init__.py,sha256=2s2RPR2IPix1aXumcnpzKSYJ8BzaC-VsKcpVHHZPT0E,4596
|
95 |
+
nltk/classify/__pycache__/__init__.cpython-310.pyc,,
|
96 |
+
nltk/classify/__pycache__/api.cpython-310.pyc,,
|
97 |
+
nltk/classify/__pycache__/decisiontree.cpython-310.pyc,,
|
98 |
+
nltk/classify/__pycache__/maxent.cpython-310.pyc,,
|
99 |
+
nltk/classify/__pycache__/megam.cpython-310.pyc,,
|
100 |
+
nltk/classify/__pycache__/naivebayes.cpython-310.pyc,,
|
101 |
+
nltk/classify/__pycache__/positivenaivebayes.cpython-310.pyc,,
|
102 |
+
nltk/classify/__pycache__/rte_classify.cpython-310.pyc,,
|
103 |
+
nltk/classify/__pycache__/scikitlearn.cpython-310.pyc,,
|
104 |
+
nltk/classify/__pycache__/senna.cpython-310.pyc,,
|
105 |
+
nltk/classify/__pycache__/svm.cpython-310.pyc,,
|
106 |
+
nltk/classify/__pycache__/tadm.cpython-310.pyc,,
|
107 |
+
nltk/classify/__pycache__/textcat.cpython-310.pyc,,
|
108 |
+
nltk/classify/__pycache__/util.cpython-310.pyc,,
|
109 |
+
nltk/classify/__pycache__/weka.cpython-310.pyc,,
|
110 |
+
nltk/classify/api.py,sha256=PN1b_jw2InZWMNuzMaPSs2PP-f9_7IZfokohkKd0Xro,6625
|
111 |
+
nltk/classify/decisiontree.py,sha256=HL-V9gcFYX2uYaonc3glQq_CAEqyCxKTb1FnKxkpx8U,13083
|
112 |
+
nltk/classify/maxent.py,sha256=pJZFnshxF4jfYlY-8zgf3N8P5jVczqxTOCAI6HrVTqA,60921
|
113 |
+
nltk/classify/megam.py,sha256=4d2NlMAyrXca2TB_phpff41-qY8YZdqx6LrYLL5s0jI,6396
|
114 |
+
nltk/classify/naivebayes.py,sha256=fahYSKoSAMisUKOjXxkIDsRRU7swLyFMSAloL8mToNU,10713
|
115 |
+
nltk/classify/positivenaivebayes.py,sha256=WckMp6Olu6x6Ku__NCRVPO2n6WY_AI2yr1y46cy-IgU,7412
|
116 |
+
nltk/classify/rte_classify.py,sha256=d7BhvcXp-j1ovcbFs0jz2I22nZtn1pBvfT79kpc1xnY,6301
|
117 |
+
nltk/classify/scikitlearn.py,sha256=_D3TQC-jxEn-eq3Y7Ydc1OczkhIAbednxDpcFpbj99U,5548
|
118 |
+
nltk/classify/senna.py,sha256=WGne67HygHBl85t4DKqTjWgjILTVOaXoDrQgV7odLm8,6931
|
119 |
+
nltk/classify/svm.py,sha256=Izn33z8jQhQ70hJdbli-HUc_dly9O2sxMso0v1MZ5dY,525
|
120 |
+
nltk/classify/tadm.py,sha256=jGR9ga8n1rQUCoRg49kkSyZkJ7thteHc0TAApdtVaVU,3555
|
121 |
+
nltk/classify/textcat.py,sha256=BeHyxtRXdqAGJtnipjORVrCIjWTHVa0OJm1-WOoMybI,6035
|
122 |
+
nltk/classify/util.py,sha256=1Puz0ks5SrYXYQ8eJbcJpqRtGdwS1Hji0TMQk70ZCy4,12461
|
123 |
+
nltk/classify/weka.py,sha256=em2Rij5vMKo5LFZbHWBsPZlDkBb-QOLMMEGIPjSgBoI,12938
|
124 |
+
nltk/cli.py,sha256=ZdakKKRjRmDn_b3e4TL1UNqaTF4VsLSMgQ4juVWstEM,1897
|
125 |
+
nltk/cluster/__init__.py,sha256=1mPkvd-mjaRXe0Aha9qx5Gn_Dr39BRovq-qT74bUi54,4361
|
126 |
+
nltk/cluster/__pycache__/__init__.cpython-310.pyc,,
|
127 |
+
nltk/cluster/__pycache__/api.cpython-310.pyc,,
|
128 |
+
nltk/cluster/__pycache__/em.cpython-310.pyc,,
|
129 |
+
nltk/cluster/__pycache__/gaac.cpython-310.pyc,,
|
130 |
+
nltk/cluster/__pycache__/kmeans.cpython-310.pyc,,
|
131 |
+
nltk/cluster/__pycache__/util.cpython-310.pyc,,
|
132 |
+
nltk/cluster/api.py,sha256=sranVby2NHrTr3vmefGkZgREzzOjlE94MLCUbO63rlU,2162
|
133 |
+
nltk/cluster/em.py,sha256=uxd6qQ0T1PSE5e_3q41yk66OlkPMX0ws4L0J7ciq1YM,8419
|
134 |
+
nltk/cluster/gaac.py,sha256=c_2ewAkcLdWLhW0WjUedESoV7I1UPGlNeY_BNhOTQqY,5921
|
135 |
+
nltk/cluster/kmeans.py,sha256=1Ik_3_pIjCfpgDag0LmP4hi2SQiRIwy8sQqtXFSuHYA,8592
|
136 |
+
nltk/cluster/util.py,sha256=TVZtWob_8SoZOwG6NtsBPk4fOD40ZVjxnHj9oPa6eC8,10039
|
137 |
+
nltk/collections.py,sha256=DiSo-vicLp7UQLpDiDTljFwdDLdQVUi7UVAdgublO8A,23673
|
138 |
+
nltk/collocations.py,sha256=NWC5upNNulRI_FYmCHX0rNeZCkCQxqVXMlLnOC_bwa8,14964
|
139 |
+
nltk/compat.py,sha256=7f0Eg2_MbidKae8brT_oCuqDSHcfmOskS88Y6-lycmw,1307
|
140 |
+
nltk/corpus/__init__.py,sha256=qGkuNZ2GIP4qDOR6mMVtJHnC7oMV3kRxiNr8aGFHhfs,17359
|
141 |
+
nltk/corpus/__pycache__/__init__.cpython-310.pyc,,
|
142 |
+
nltk/corpus/__pycache__/europarl_raw.cpython-310.pyc,,
|
143 |
+
nltk/corpus/__pycache__/util.cpython-310.pyc,,
|
144 |
+
nltk/corpus/europarl_raw.py,sha256=aXMKViytBRbry4J0FrO0P20JTOV2bgjuJQ5hOxFkJ-0,1896
|
145 |
+
nltk/corpus/reader/__init__.py,sha256=urxkSILuhBlGI9qvsIlhQap6nFKSfkKrYi-rb4LCV5U,6677
|
146 |
+
nltk/corpus/reader/__pycache__/__init__.cpython-310.pyc,,
|
147 |
+
nltk/corpus/reader/__pycache__/aligned.cpython-310.pyc,,
|
148 |
+
nltk/corpus/reader/__pycache__/api.cpython-310.pyc,,
|
149 |
+
nltk/corpus/reader/__pycache__/bcp47.cpython-310.pyc,,
|
150 |
+
nltk/corpus/reader/__pycache__/bnc.cpython-310.pyc,,
|
151 |
+
nltk/corpus/reader/__pycache__/bracket_parse.cpython-310.pyc,,
|
152 |
+
nltk/corpus/reader/__pycache__/categorized_sents.cpython-310.pyc,,
|
153 |
+
nltk/corpus/reader/__pycache__/chasen.cpython-310.pyc,,
|
154 |
+
nltk/corpus/reader/__pycache__/childes.cpython-310.pyc,,
|
155 |
+
nltk/corpus/reader/__pycache__/chunked.cpython-310.pyc,,
|
156 |
+
nltk/corpus/reader/__pycache__/cmudict.cpython-310.pyc,,
|
157 |
+
nltk/corpus/reader/__pycache__/comparative_sents.cpython-310.pyc,,
|
158 |
+
nltk/corpus/reader/__pycache__/conll.cpython-310.pyc,,
|
159 |
+
nltk/corpus/reader/__pycache__/crubadan.cpython-310.pyc,,
|
160 |
+
nltk/corpus/reader/__pycache__/dependency.cpython-310.pyc,,
|
161 |
+
nltk/corpus/reader/__pycache__/framenet.cpython-310.pyc,,
|
162 |
+
nltk/corpus/reader/__pycache__/ieer.cpython-310.pyc,,
|
163 |
+
nltk/corpus/reader/__pycache__/indian.cpython-310.pyc,,
|
164 |
+
nltk/corpus/reader/__pycache__/ipipan.cpython-310.pyc,,
|
165 |
+
nltk/corpus/reader/__pycache__/knbc.cpython-310.pyc,,
|
166 |
+
nltk/corpus/reader/__pycache__/lin.cpython-310.pyc,,
|
167 |
+
nltk/corpus/reader/__pycache__/markdown.cpython-310.pyc,,
|
168 |
+
nltk/corpus/reader/__pycache__/mte.cpython-310.pyc,,
|
169 |
+
nltk/corpus/reader/__pycache__/nkjp.cpython-310.pyc,,
|
170 |
+
nltk/corpus/reader/__pycache__/nombank.cpython-310.pyc,,
|
171 |
+
nltk/corpus/reader/__pycache__/nps_chat.cpython-310.pyc,,
|
172 |
+
nltk/corpus/reader/__pycache__/opinion_lexicon.cpython-310.pyc,,
|
173 |
+
nltk/corpus/reader/__pycache__/panlex_lite.cpython-310.pyc,,
|
174 |
+
nltk/corpus/reader/__pycache__/panlex_swadesh.cpython-310.pyc,,
|
175 |
+
nltk/corpus/reader/__pycache__/pl196x.cpython-310.pyc,,
|
176 |
+
nltk/corpus/reader/__pycache__/plaintext.cpython-310.pyc,,
|
177 |
+
nltk/corpus/reader/__pycache__/ppattach.cpython-310.pyc,,
|
178 |
+
nltk/corpus/reader/__pycache__/propbank.cpython-310.pyc,,
|
179 |
+
nltk/corpus/reader/__pycache__/pros_cons.cpython-310.pyc,,
|
180 |
+
nltk/corpus/reader/__pycache__/reviews.cpython-310.pyc,,
|
181 |
+
nltk/corpus/reader/__pycache__/rte.cpython-310.pyc,,
|
182 |
+
nltk/corpus/reader/__pycache__/semcor.cpython-310.pyc,,
|
183 |
+
nltk/corpus/reader/__pycache__/senseval.cpython-310.pyc,,
|
184 |
+
nltk/corpus/reader/__pycache__/sentiwordnet.cpython-310.pyc,,
|
185 |
+
nltk/corpus/reader/__pycache__/sinica_treebank.cpython-310.pyc,,
|
186 |
+
nltk/corpus/reader/__pycache__/string_category.cpython-310.pyc,,
|
187 |
+
nltk/corpus/reader/__pycache__/switchboard.cpython-310.pyc,,
|
188 |
+
nltk/corpus/reader/__pycache__/tagged.cpython-310.pyc,,
|
189 |
+
nltk/corpus/reader/__pycache__/timit.cpython-310.pyc,,
|
190 |
+
nltk/corpus/reader/__pycache__/toolbox.cpython-310.pyc,,
|
191 |
+
nltk/corpus/reader/__pycache__/twitter.cpython-310.pyc,,
|
192 |
+
nltk/corpus/reader/__pycache__/udhr.cpython-310.pyc,,
|
193 |
+
nltk/corpus/reader/__pycache__/util.cpython-310.pyc,,
|
194 |
+
nltk/corpus/reader/__pycache__/verbnet.cpython-310.pyc,,
|
195 |
+
nltk/corpus/reader/__pycache__/wordlist.cpython-310.pyc,,
|
196 |
+
nltk/corpus/reader/__pycache__/wordnet.cpython-310.pyc,,
|
197 |
+
nltk/corpus/reader/__pycache__/xmldocs.cpython-310.pyc,,
|
198 |
+
nltk/corpus/reader/__pycache__/ycoe.cpython-310.pyc,,
|
199 |
+
nltk/corpus/reader/aligned.py,sha256=OJUpm8HyzqR0e8hPHd5CrAmH1bfQfXV2hZ4KCak0Zzw,5005
|
200 |
+
nltk/corpus/reader/api.py,sha256=Dhu491gmTJnWSilm6lWdQBN59RM1aIT410mrbiDwD1k,19671
|
201 |
+
nltk/corpus/reader/bcp47.py,sha256=I27Lj4hMW2IRM62RyXeK9OX-kSeAvEEkoXVWN1HwEiY,8534
|
202 |
+
nltk/corpus/reader/bnc.py,sha256=AhFjLzhCpgijSGvhhHiB5AT5vmAPRP6LO-bHEXOEXas,9716
|
203 |
+
nltk/corpus/reader/bracket_parse.py,sha256=1EfRuNmCU9DESWjwalyiDZhQuOwb3IYDjL-aAXlId_U,9619
|
204 |
+
nltk/corpus/reader/categorized_sents.py,sha256=QLpHrDk3JlbgitkpbfndSXGxfssAyAA5MsnBesFSKis,6221
|
205 |
+
nltk/corpus/reader/chasen.py,sha256=iVW9xtRtGcr3EpEf_BjbusRcPqRqPzKoFlOJfxdooTM,4699
|
206 |
+
nltk/corpus/reader/childes.py,sha256=keelogBlaIvHWv1A0Q_tHbzmKyMthB2_YNWXQQmG53g,26105
|
207 |
+
nltk/corpus/reader/chunked.py,sha256=8foVT90OCSIzv7B99rHsVGSiAX62e4HFc8QyGDqc_4Y,9366
|
208 |
+
nltk/corpus/reader/cmudict.py,sha256=CCDpjWDzwPMOXKprKxmNtvX_-gUSFahFO-vUdTJbNhU,3366
|
209 |
+
nltk/corpus/reader/comparative_sents.py,sha256=9YwKIk0xf_EhB6qBepCMOENtxoyvOqwuV_aIjW_WRk4,12069
|
210 |
+
nltk/corpus/reader/conll.py,sha256=l-pgBQfCiZP90EJo1hnG3TLM9wM5imFvqlt4FfXlDII,22301
|
211 |
+
nltk/corpus/reader/crubadan.py,sha256=FLorWHpsugP1Z3bgkukr51baIw_FvAdNqRKIcPeURL4,3627
|
212 |
+
nltk/corpus/reader/dependency.py,sha256=nAdjT0H-HV9JW6iUMNe4cW8RR5rdA8IB8ZQ5IQY9aIc,3890
|
213 |
+
nltk/corpus/reader/framenet.py,sha256=n59uoGpOSFIapeYnkbPmCamuslARpoa9q8773DkxB5Y,134791
|
214 |
+
nltk/corpus/reader/ieer.py,sha256=x6CnLllJTOQYpwGKxjsbtGjej-rxVCEKb4HUyUbmCAw,3802
|
215 |
+
nltk/corpus/reader/indian.py,sha256=GQMsNmZwoWvSS4bdeBT5guNUyU8-LErUoMfuT2ukJb0,3014
|
216 |
+
nltk/corpus/reader/ipipan.py,sha256=1yJ5cl7AwX9SFmJs07HsuW-eZHp_YkTL-n1nRhadzgw,13092
|
217 |
+
nltk/corpus/reader/knbc.py,sha256=rKg0SDmeQCUaSc3SOr97_IfM5FGJjjvTCdFDn1Vnvn8,5787
|
218 |
+
nltk/corpus/reader/lin.py,sha256=1gx48Odd8WJ9PpoF6_f5UjmqJ5j22Rsu8GMraepu7Mg,6654
|
219 |
+
nltk/corpus/reader/markdown.py,sha256=Y9AeB3F1gbUuEK7HNjR6iwHDAYeneI5XH1rMkw_ubWQ,12028
|
220 |
+
nltk/corpus/reader/mte.py,sha256=8p5iQIJOuxu9MOWa1xH1Iijj9J9KmcRUWh2vmcWSOqQ,14385
|
221 |
+
nltk/corpus/reader/nkjp.py,sha256=9npzIMG-tOJjb1NpiwDvH1uXXQ5eBEq8xp_KeQ5jD10,16332
|
222 |
+
nltk/corpus/reader/nombank.py,sha256=v-smV3YSQfl54cF-NM0St9dodOKawPL6JGsMv5tvI4g,16247
|
223 |
+
nltk/corpus/reader/nps_chat.py,sha256=tjp98O7FNsyoxXenMT5OW0XaJN1RciICJfeekN6AWa8,2940
|
224 |
+
nltk/corpus/reader/opinion_lexicon.py,sha256=Xnb9RpQJBxZWdnN-DN41Cw1S4HR35FmomKO-ISy476k,4230
|
225 |
+
nltk/corpus/reader/panlex_lite.py,sha256=QJg00vmQTl-iTot524qmEah5LBiacNA9GmpNp9hhZYE,5440
|
226 |
+
nltk/corpus/reader/panlex_swadesh.py,sha256=JxpPNLY-WqHZvXsSG7eKFc9eBUHsKO0qOCEUm4iupUw,3287
|
227 |
+
nltk/corpus/reader/pl196x.py,sha256=e7yXJOjyKh4uTxn78rvSdSF-K8wSDghYutvrf1IMPjk,12320
|
228 |
+
nltk/corpus/reader/plaintext.py,sha256=o1ekALImW_Uyv4pjXn3y2P7uAevWqAAU-X1FrAinqiA,8456
|
229 |
+
nltk/corpus/reader/ppattach.py,sha256=vGCcRHcwSMWaHYI1K6KliD9kTKtvabGosqGvPWZjIVs,2903
|
230 |
+
nltk/corpus/reader/propbank.py,sha256=1Bv1psLPUwLsbfVUuTP9RanC4MoJp1JHboxgRqGJxrs,17776
|
231 |
+
nltk/corpus/reader/pros_cons.py,sha256=DzQ0xyw7fiyZucbrrOqUooPsDv-SCARTMwTxcVzzwlo,4896
|
232 |
+
nltk/corpus/reader/reviews.py,sha256=nDNnRfW6XqdJEYh1nKbIOOHFxpHTPD27nxgQanmL8go,12321
|
233 |
+
nltk/corpus/reader/rte.py,sha256=ihayGJCEZ9Mw4idk337owoXGlC1Ikn4yM_eUZyHlqgY,4785
|
234 |
+
nltk/corpus/reader/semcor.py,sha256=UxLpm2sbR5_6kPQD-9QSwzYkciA71OGRZ4PGudlln1w,11694
|
235 |
+
nltk/corpus/reader/senseval.py,sha256=ZNyHHh5gpa2dGQHThMVUoxekDP0DRkrQhKAs8wmY0vI,7539
|
236 |
+
nltk/corpus/reader/sentiwordnet.py,sha256=yQeTvAT2T1XFg45dgYfFyIJBhXvkxvoEB7H7w-tEosk,4626
|
237 |
+
nltk/corpus/reader/sinica_treebank.py,sha256=64GeJSNCg3eXUFXDasGIql60OC6Pq1Fwc9OtB8Dsais,2541
|
238 |
+
nltk/corpus/reader/string_category.py,sha256=-oPUy5R2qb4xiyFsCKGAlUwc3CCsWYCvQsmc2F7NNt8,1919
|
239 |
+
nltk/corpus/reader/switchboard.py,sha256=yRjPwtDRx-9rZLsBiX_cAcrMl90zaJ2kanD7RB5hT2A,4547
|
240 |
+
nltk/corpus/reader/tagged.py,sha256=P-gUFkUTazKCJzOlqvwA8aAPWYB6Pw08pkieRduIaJU,12140
|
241 |
+
nltk/corpus/reader/timit.py,sha256=FpbCiufjEoKPZNgGjvUO1-dAhTq36qeCauaFIzqzRO8,18473
|
242 |
+
nltk/corpus/reader/toolbox.py,sha256=yVxaqjDOVCjHRNz4Zs_8Zu8fkbi88YEb_JxwCYtHwVE,2121
|
243 |
+
nltk/corpus/reader/twitter.py,sha256=UdnwB2Hh6quI1KhFAAlriybVrYBoUyNqxzUBCdFLics,4608
|
244 |
+
nltk/corpus/reader/udhr.py,sha256=tjqXc1JQiTURHnsTU8RIzwxdQIZasbtdMSPz4msnHVo,2592
|
245 |
+
nltk/corpus/reader/util.py,sha256=fZy5GyMxJ-urpTqO19Sj-oRQqgL-EI65_UDscr2nizg,32225
|
246 |
+
nltk/corpus/reader/verbnet.py,sha256=NBIwOd2JrnHUamt29yzQjHS5VyzqQsOoCI9ZwOfXZIU,25404
|
247 |
+
nltk/corpus/reader/wordlist.py,sha256=8dlcXRIjDuJ6U_dvbw6OF_VOgbC4EVeXI8uE2tCxjbM,5812
|
248 |
+
nltk/corpus/reader/wordnet.py,sha256=nSIDdVHF_FDx6eOlsU5pUgV5z6YjPbhvQoWyEfb8_Yo,93352
|
249 |
+
nltk/corpus/reader/xmldocs.py,sha256=_fVqoEIAaYKT772IEcYmuk_7OaqUdsRLKZpbaL4up88,16285
|
250 |
+
nltk/corpus/reader/ycoe.py,sha256=9VbkO_JnFG2joiWfjsfYZ53vsSPl8lWfK00faIAaLN4,10504
|
251 |
+
nltk/corpus/util.py,sha256=Q9xYJ97UUOy8vuuDA-uidzpE1oEU_-k6M6L0CcxsZ90,5867
|
252 |
+
nltk/data.py,sha256=rr3iRF4UJi7bh3Ss1Gp0bn8qml3YmTOV8kyWeYOavO8,52814
|
253 |
+
nltk/decorators.py,sha256=U1-DvExxy0Uv96M0St_rR8IAh8Em3eK6uS4AXIf_Ti4,8526
|
254 |
+
nltk/downloader.py,sha256=SWUlq_6w6PDWKs4UCXmY3HyvEfDII1Mp7bgjChv-KEM,95506
|
255 |
+
nltk/draw/__init__.py,sha256=vtk9kECEd_9ZZ0pqST6z5Sb-no-VDpEohi7UHD_YQcE,810
|
256 |
+
nltk/draw/__pycache__/__init__.cpython-310.pyc,,
|
257 |
+
nltk/draw/__pycache__/cfg.cpython-310.pyc,,
|
258 |
+
nltk/draw/__pycache__/dispersion.cpython-310.pyc,,
|
259 |
+
nltk/draw/__pycache__/table.cpython-310.pyc,,
|
260 |
+
nltk/draw/__pycache__/tree.cpython-310.pyc,,
|
261 |
+
nltk/draw/__pycache__/util.cpython-310.pyc,,
|
262 |
+
nltk/draw/cfg.py,sha256=Y-89bIKWPdiCAn1GkA0eOP08L6eeQHc4gVQKMzBj2sk,30794
|
263 |
+
nltk/draw/dispersion.py,sha256=MaCehYu6cTuRhMTzDW7E_cwGjXkP7auGCMsD31WjLcE,1854
|
264 |
+
nltk/draw/table.py,sha256=Gz7IZ6JDxsfLUc5zLui_g1IyTfhPCEJU-u8K71S_qrc,46257
|
265 |
+
nltk/draw/tree.py,sha256=N8qbNssr6A8OLp4zLE2FJ-jQzWYWFkeASvUeGzc2wKY,39275
|
266 |
+
nltk/draw/util.py,sha256=8n8YJrrTWSD-MUEy96bX-oaaRBufEtg74bXPXWzWbJ0,90944
|
267 |
+
nltk/featstruct.py,sha256=BVbotcvgnlNTKMDC1bL16-i3PCw5zXgP7X20tt-yPF0,106108
|
268 |
+
nltk/grammar.py,sha256=uTC2ScpQIVxWt38QfWceYczkTjTVzPplmD63RfLLKkY,59174
|
269 |
+
nltk/help.py,sha256=Sj2M3-tktpBZxwHxx1btdthJZ4hhZx-XUXlYuGv2Kp8,1709
|
270 |
+
nltk/inference/__init__.py,sha256=nw4pQFHOGUv4x7u21GrJBOUS2hc7JibvvgbVnqXuksA,814
|
271 |
+
nltk/inference/__pycache__/__init__.cpython-310.pyc,,
|
272 |
+
nltk/inference/__pycache__/api.cpython-310.pyc,,
|
273 |
+
nltk/inference/__pycache__/discourse.cpython-310.pyc,,
|
274 |
+
nltk/inference/__pycache__/mace.cpython-310.pyc,,
|
275 |
+
nltk/inference/__pycache__/nonmonotonic.cpython-310.pyc,,
|
276 |
+
nltk/inference/__pycache__/prover9.cpython-310.pyc,,
|
277 |
+
nltk/inference/__pycache__/resolution.cpython-310.pyc,,
|
278 |
+
nltk/inference/__pycache__/tableau.cpython-310.pyc,,
|
279 |
+
nltk/inference/api.py,sha256=GdomkZQT97b7Z_HER__KuhCDehRbT1jxD7MoV-1COnY,19560
|
280 |
+
nltk/inference/discourse.py,sha256=XojyiwkuvpTjBOZCcQ0q5CBpIMOw5fXw3eNzjZPJoqw,22691
|
281 |
+
nltk/inference/mace.py,sha256=EKRZuwCrX320jzWrq3NOFnfIugapuYdgiH31dVf2ZvE,12243
|
282 |
+
nltk/inference/nonmonotonic.py,sha256=4_uEiG5h11Cv9_Z7dFIGAZ8M_QpZ-gWNVew11TGbeSU,19174
|
283 |
+
nltk/inference/prover9.py,sha256=LHyuOmISLo5c64e5VOXiZsK-bG7z-0cb3xY01kWE068,16266
|
284 |
+
nltk/inference/resolution.py,sha256=wx0YCAC5GgICEOg0VN_x67W88cGHu854aQD3pIJcfq4,26761
|
285 |
+
nltk/inference/tableau.py,sha256=IpDZT3FbM02R2TnfjmAOlwXivnP19DXoT9Fu6xW4Cv0,26320
|
286 |
+
nltk/internals.py,sha256=eOGggyeDGf6gyU76p2SU6_vFQP6h5Frg8idDyS6naW8,39416
|
287 |
+
nltk/jsontags.py,sha256=IWrXxAjSzlgaxqclkKcAnxNzANbG1Zjf_j2jjPnxUy4,1948
|
288 |
+
nltk/langnames.py,sha256=dDNlEkDmsxWUKxU5CGpYLBHpWlcD1ZrUiO3XTMEPBFU,17957
|
289 |
+
nltk/lazyimport.py,sha256=qBI5PNKz5qYLrxow19KwLrSE821TyRhVKCafAGML-1E,4719
|
290 |
+
nltk/lm/__init__.py,sha256=Gsg0OaefWlZVbCDGYqh1cEluhXpWS7vN8Bgpfm3wa-w,8051
|
291 |
+
nltk/lm/__pycache__/__init__.cpython-310.pyc,,
|
292 |
+
nltk/lm/__pycache__/api.cpython-310.pyc,,
|
293 |
+
nltk/lm/__pycache__/counter.cpython-310.pyc,,
|
294 |
+
nltk/lm/__pycache__/models.cpython-310.pyc,,
|
295 |
+
nltk/lm/__pycache__/preprocessing.cpython-310.pyc,,
|
296 |
+
nltk/lm/__pycache__/smoothing.cpython-310.pyc,,
|
297 |
+
nltk/lm/__pycache__/util.cpython-310.pyc,,
|
298 |
+
nltk/lm/__pycache__/vocabulary.cpython-310.pyc,,
|
299 |
+
nltk/lm/api.py,sha256=u65V1dwqoBjCdxNSbLLUpY0zjzY0-WBGK7HHvV0Dct4,8495
|
300 |
+
nltk/lm/counter.py,sha256=AOqwTQFxaWNMnJZp5E6i5bPBq54Lc7dklp76J5Ty_rY,5250
|
301 |
+
nltk/lm/models.py,sha256=ricaFU593KT1n8ri5b-3JTxwpO__XXkZCYgPadPmrLA,4903
|
302 |
+
nltk/lm/preprocessing.py,sha256=yeW6yCp2e0zGFpcQ_puPZ0VBsjcespq2MLPPdUojY3A,1714
|
303 |
+
nltk/lm/smoothing.py,sha256=GqBAZAgZbQgMFTfu8LJFuWobda4AC8A8va2C3hbkI28,4745
|
304 |
+
nltk/lm/util.py,sha256=X7x-__sk-f_Z8ttRmLP1ASLIQlVLOVo1ziID3F9qDZQ,474
|
305 |
+
nltk/lm/vocabulary.py,sha256=rMng32oqXSg1XXOFpRi0TQtjaF_fQNx3b9MGKGakPnQ,7099
|
306 |
+
nltk/metrics/__init__.py,sha256=gu6faSWxN5vW86Lk7fvzb_NeD4H-5BcUvTqmo5lSNLg,1243
|
307 |
+
nltk/metrics/__pycache__/__init__.cpython-310.pyc,,
|
308 |
+
nltk/metrics/__pycache__/agreement.cpython-310.pyc,,
|
309 |
+
nltk/metrics/__pycache__/aline.cpython-310.pyc,,
|
310 |
+
nltk/metrics/__pycache__/association.cpython-310.pyc,,
|
311 |
+
nltk/metrics/__pycache__/confusionmatrix.cpython-310.pyc,,
|
312 |
+
nltk/metrics/__pycache__/distance.cpython-310.pyc,,
|
313 |
+
nltk/metrics/__pycache__/paice.cpython-310.pyc,,
|
314 |
+
nltk/metrics/__pycache__/scores.cpython-310.pyc,,
|
315 |
+
nltk/metrics/__pycache__/segmentation.cpython-310.pyc,,
|
316 |
+
nltk/metrics/__pycache__/spearman.cpython-310.pyc,,
|
317 |
+
nltk/metrics/agreement.py,sha256=NveWTfhBZDQWxqwEWbDMJKpeRElk7vtdN0bh9nMnKWI,16421
|
318 |
+
nltk/metrics/aline.py,sha256=yWktce0eQ_r60KKUioLZSkorZT8U2zPbxLA-wqLzzDA,32845
|
319 |
+
nltk/metrics/association.py,sha256=1hNurRIKGop2rDttTKddxtZkMrJ_0qBczugF9Mm4S4o,16569
|
320 |
+
nltk/metrics/confusionmatrix.py,sha256=szVY58vzNWQRr4objnsiouDcwd-kqAT5_b59wvwx4q8,13035
|
321 |
+
nltk/metrics/distance.py,sha256=oQ-o9tMyv5kTOJsRe2EKHLlArLiZxlAMJY73tKFnAcw,17661
|
322 |
+
nltk/metrics/paice.py,sha256=yge6FA2y8_AGYSmCWfzapmKBFF6kkJTe-puWp-KcTBk,14739
|
323 |
+
nltk/metrics/scores.py,sha256=nv0f5lsR_nKY_A2pJrVMtH4Mzef_CBpDWpkBoHnb-L0,7922
|
324 |
+
nltk/metrics/segmentation.py,sha256=WdPD6aH31Z55RfEJ7OM0FQoxmGLr_q-q6vEJ0CqxwDY,7221
|
325 |
+
nltk/metrics/spearman.py,sha256=irorJE5fYsGGxLLvKGvvL6je-hM4aonDBVhWS_ZiOS0,2197
|
326 |
+
nltk/misc/__init__.py,sha256=pgYpCMn6fRf90Zwn52OjzHDe5MsPgl7u9cbTKeEH8pk,406
|
327 |
+
nltk/misc/__pycache__/__init__.cpython-310.pyc,,
|
328 |
+
nltk/misc/__pycache__/babelfish.cpython-310.pyc,,
|
329 |
+
nltk/misc/__pycache__/chomsky.cpython-310.pyc,,
|
330 |
+
nltk/misc/__pycache__/minimalset.cpython-310.pyc,,
|
331 |
+
nltk/misc/__pycache__/sort.cpython-310.pyc,,
|
332 |
+
nltk/misc/__pycache__/wordfinder.cpython-310.pyc,,
|
333 |
+
nltk/misc/babelfish.py,sha256=9UkSa6l_j1BHxaT9CU1Viv_51RB0k9AA--3ytmQpsAk,361
|
334 |
+
nltk/misc/chomsky.py,sha256=UatgZu7Zj3W5DQzuxHZW6Zyxv5kH9l_W2u_ZnOCqXcc,5319
|
335 |
+
nltk/misc/minimalset.py,sha256=z7-UaqT7F-2ba_qybwY46bgaH4l5zQcyBkg5K_joFQs,2979
|
336 |
+
nltk/misc/sort.py,sha256=aeqONHRGSDcVksvB6a5npkwdf5sAy6A2vRP9wYp0Y1w,4547
|
337 |
+
nltk/misc/wordfinder.py,sha256=2HR5Fj4hv7Q2IDFlJb5Nl644uNWtSkWEUe-hBKYt6Zg,4352
|
338 |
+
nltk/parse/__init__.py,sha256=w2H8yrs8-Wov_wtGTHoMyRyGt7Q4iLyQC6Z05LhuEhc,3797
|
339 |
+
nltk/parse/__pycache__/__init__.cpython-310.pyc,,
|
340 |
+
nltk/parse/__pycache__/api.cpython-310.pyc,,
|
341 |
+
nltk/parse/__pycache__/bllip.cpython-310.pyc,,
|
342 |
+
nltk/parse/__pycache__/chart.cpython-310.pyc,,
|
343 |
+
nltk/parse/__pycache__/corenlp.cpython-310.pyc,,
|
344 |
+
nltk/parse/__pycache__/dependencygraph.cpython-310.pyc,,
|
345 |
+
nltk/parse/__pycache__/earleychart.cpython-310.pyc,,
|
346 |
+
nltk/parse/__pycache__/evaluate.cpython-310.pyc,,
|
347 |
+
nltk/parse/__pycache__/featurechart.cpython-310.pyc,,
|
348 |
+
nltk/parse/__pycache__/generate.cpython-310.pyc,,
|
349 |
+
nltk/parse/__pycache__/malt.cpython-310.pyc,,
|
350 |
+
nltk/parse/__pycache__/nonprojectivedependencyparser.cpython-310.pyc,,
|
351 |
+
nltk/parse/__pycache__/pchart.cpython-310.pyc,,
|
352 |
+
nltk/parse/__pycache__/projectivedependencyparser.cpython-310.pyc,,
|
353 |
+
nltk/parse/__pycache__/recursivedescent.cpython-310.pyc,,
|
354 |
+
nltk/parse/__pycache__/shiftreduce.cpython-310.pyc,,
|
355 |
+
nltk/parse/__pycache__/stanford.cpython-310.pyc,,
|
356 |
+
nltk/parse/__pycache__/transitionparser.cpython-310.pyc,,
|
357 |
+
nltk/parse/__pycache__/util.cpython-310.pyc,,
|
358 |
+
nltk/parse/__pycache__/viterbi.cpython-310.pyc,,
|
359 |
+
nltk/parse/api.py,sha256=R3xG2NEEAq5TvzQpuTKGE_xX_JLoVwp0lTGCQsL2WV8,2354
|
360 |
+
nltk/parse/bllip.py,sha256=LipdGFlJrKinwKVIgiEKosvdaDrFQGQq-7rZUOkkPNw,10976
|
361 |
+
nltk/parse/chart.py,sha256=6b3HoLPbT0OlKkETtezIODvqpBKIlkAPoY7r4gV91oM,63760
|
362 |
+
nltk/parse/corenlp.py,sha256=t9ePlsr3zWxf6CKEbczk71mCVe_y5j9U2sd8el8REms,27745
|
363 |
+
nltk/parse/dependencygraph.py,sha256=J9DcO7sszs6Pp_TqSRL-pJLwBATw_F98_xaZ9m0-kbM,32468
|
364 |
+
nltk/parse/earleychart.py,sha256=OVBGyzHbv6M76LyFlHv1Gz-0PvZ0xdRdIYgSb_Qet84,18274
|
365 |
+
nltk/parse/evaluate.py,sha256=P3qGZ7WG90mOrP4MEQXzDbIOB8WWhDBG3PIDIYfozx0,4468
|
366 |
+
nltk/parse/featurechart.py,sha256=iK0O0uC-ZAgg6Bcj9ACeQkAvBTrWo-hteu-b-cPdPhU,22532
|
367 |
+
nltk/parse/generate.py,sha256=kDlYgWj1f_YpbzVJq7OL6O5H0zU03hrrjxLuKLzJgvE,2381
|
368 |
+
nltk/parse/malt.py,sha256=_XMFcW3SOH0XCrzfv-o4-U8RTkcwDbFSHpfbWOzMtuM,16571
|
369 |
+
nltk/parse/nonprojectivedependencyparser.py,sha256=ncMpvaMMc8TtRzaPSub8Qt0IUfaXQV42YSbUMQDEnvg,29446
|
370 |
+
nltk/parse/pchart.py,sha256=KGMYKf5x2psS9XZfU6wbUnIF5jwtoeAtsHzJz6vyZ-E,20480
|
371 |
+
nltk/parse/projectivedependencyparser.py,sha256=WIWDRYAaUqr8u9UQP5AwSLXbciyoaClSN4TavumkfkY,28243
|
372 |
+
nltk/parse/recursivedescent.py,sha256=uaVune-fIIWa_wT3CTY4O8p9MhTP4VgW1UoNauPsBZQ,26032
|
373 |
+
nltk/parse/shiftreduce.py,sha256=Yl2JYRdUOqJIjQXOEWv4fUfpfGu5kLq94HaYPTV5QAk,17071
|
374 |
+
nltk/parse/stanford.py,sha256=xyPRx710ddMCVYIXje2gTwvZrvYgxOM1CMhM7ziDuVQ,19312
|
375 |
+
nltk/parse/transitionparser.py,sha256=9EdYOIGZPq77l3fgI4dnH63146YpC5PBfaZKaBPkhlU,32272
|
376 |
+
nltk/parse/util.py,sha256=6mu9ZVO2hRduKhZGr40bAwRPvhKyTO_Srx2MT8pAy6E,8667
|
377 |
+
nltk/parse/viterbi.py,sha256=czOjotH__XU_A_Mpf5-xYlYi8bXYOTbIff1mrPZBNYQ,18351
|
378 |
+
nltk/probability.py,sha256=ikWJyp0Equm4RyGxXOJFBBgNfr2jo2fK0Ck7xwmSks0,92907
|
379 |
+
nltk/sem/__init__.py,sha256=3-QdBYTgLd1iRHzo6e2f3OT0CDBWZAVmzhnlh7Yvu24,2443
|
380 |
+
nltk/sem/__pycache__/__init__.cpython-310.pyc,,
|
381 |
+
nltk/sem/__pycache__/boxer.cpython-310.pyc,,
|
382 |
+
nltk/sem/__pycache__/chat80.cpython-310.pyc,,
|
383 |
+
nltk/sem/__pycache__/cooper_storage.cpython-310.pyc,,
|
384 |
+
nltk/sem/__pycache__/drt.cpython-310.pyc,,
|
385 |
+
nltk/sem/__pycache__/drt_glue_demo.cpython-310.pyc,,
|
386 |
+
nltk/sem/__pycache__/evaluate.cpython-310.pyc,,
|
387 |
+
nltk/sem/__pycache__/glue.cpython-310.pyc,,
|
388 |
+
nltk/sem/__pycache__/hole.cpython-310.pyc,,
|
389 |
+
nltk/sem/__pycache__/lfg.cpython-310.pyc,,
|
390 |
+
nltk/sem/__pycache__/linearlogic.cpython-310.pyc,,
|
391 |
+
nltk/sem/__pycache__/logic.cpython-310.pyc,,
|
392 |
+
nltk/sem/__pycache__/relextract.cpython-310.pyc,,
|
393 |
+
nltk/sem/__pycache__/skolemize.cpython-310.pyc,,
|
394 |
+
nltk/sem/__pycache__/util.cpython-310.pyc,,
|
395 |
+
nltk/sem/boxer.py,sha256=rACkFGXGKpK0pMxGXxtpkaKF50-aysHw_9GQjf9xtww,55287
|
396 |
+
nltk/sem/chat80.py,sha256=TAiZ4BROBzIibz3jVkcgWL_BlmkC-3gMziEySS8JWMw,26519
|
397 |
+
nltk/sem/cooper_storage.py,sha256=ghg6kYG6a54I4sDiwgzX70Jcm6HlNCPDm5gZi2ZqPG4,4210
|
398 |
+
nltk/sem/drt.py,sha256=7YDYqc9XuWBleZ0aL41LW_wqpBbpvCMgqMnTOiRXX9s,53149
|
399 |
+
nltk/sem/drt_glue_demo.py,sha256=xe-SoNG6JVIYtB7YoMO75vEil0KSVGI-gxcfzuLgQdc,19171
|
400 |
+
nltk/sem/evaluate.py,sha256=07NnlgTmfjJy93UtFfFB6mdZRiW2hrm9ZTfO4RLfjcM,26282
|
401 |
+
nltk/sem/glue.py,sha256=1nBjlbsqna4gXnNG8iMQn5wb15QPxejyDn-vbTPmXrc,30254
|
402 |
+
nltk/sem/hole.py,sha256=D9Cnc89WvG9WCDlDNjYF3cig4UmHJgFIwtevuNX1CBs,14216
|
403 |
+
nltk/sem/lfg.py,sha256=SR-OYvj8HBtIx-EBvnfkOFstSj8eKXzAiVyQeFIsmZI,7716
|
404 |
+
nltk/sem/linearlogic.py,sha256=Wg_jzVVQDy1nCvYkuJB-g9tdwBYRmsenQboD7eRbEU8,17234
|
405 |
+
nltk/sem/logic.py,sha256=F0giVBpzZi3HYLQBlsiU0tLOOVEEkswlOYkRsitiVnU,70239
|
406 |
+
nltk/sem/relextract.py,sha256=UPv_7dKm-GpdYGL_J8CsJHAz0ALxGv_4EZ_rFk7_y7o,15809
|
407 |
+
nltk/sem/skolemize.py,sha256=CtOfU12APkeIkp-Z_jMlAOMkXIEO9le8TQUvlZ5rBns,5870
|
408 |
+
nltk/sem/util.py,sha256=GB8SOc7dtyywa4WrlVHr1mHwRntX9ppD7N1em-Bh1Yo,9062
|
409 |
+
nltk/sentiment/__init__.py,sha256=vre9oZROX6xHjdKjM8inuxYfiVLf341HZ_sPKyoA2Jo,382
|
410 |
+
nltk/sentiment/__pycache__/__init__.cpython-310.pyc,,
|
411 |
+
nltk/sentiment/__pycache__/sentiment_analyzer.cpython-310.pyc,,
|
412 |
+
nltk/sentiment/__pycache__/util.cpython-310.pyc,,
|
413 |
+
nltk/sentiment/__pycache__/vader.cpython-310.pyc,,
|
414 |
+
nltk/sentiment/sentiment_analyzer.py,sha256=vB09EsrK-16IqHDD7TdsHZYGrkrUcY1rPzNtHpTrL1w,10432
|
415 |
+
nltk/sentiment/util.py,sha256=memSkOlHeTfJpeZYtgYJ9LsiMOcoVw4wvwHBOsFwneY,31275
|
416 |
+
nltk/sentiment/vader.py,sha256=efJHlAUaRr6bV63ZVDIMAOaolVqct6lBKo9tMVCOpkg,21764
|
417 |
+
nltk/stem/__init__.py,sha256=BUVavJGvw_wgnL3PkklsVg9PNMbz3rCW1U6XTufamC8,1296
|
418 |
+
nltk/stem/__pycache__/__init__.cpython-310.pyc,,
|
419 |
+
nltk/stem/__pycache__/api.cpython-310.pyc,,
|
420 |
+
nltk/stem/__pycache__/arlstem.cpython-310.pyc,,
|
421 |
+
nltk/stem/__pycache__/arlstem2.cpython-310.pyc,,
|
422 |
+
nltk/stem/__pycache__/cistem.cpython-310.pyc,,
|
423 |
+
nltk/stem/__pycache__/isri.cpython-310.pyc,,
|
424 |
+
nltk/stem/__pycache__/lancaster.cpython-310.pyc,,
|
425 |
+
nltk/stem/__pycache__/porter.cpython-310.pyc,,
|
426 |
+
nltk/stem/__pycache__/regexp.cpython-310.pyc,,
|
427 |
+
nltk/stem/__pycache__/rslp.cpython-310.pyc,,
|
428 |
+
nltk/stem/__pycache__/snowball.cpython-310.pyc,,
|
429 |
+
nltk/stem/__pycache__/util.cpython-310.pyc,,
|
430 |
+
nltk/stem/__pycache__/wordnet.cpython-310.pyc,,
|
431 |
+
nltk/stem/api.py,sha256=eCDlGturJXqs0AG-q0AHN6EiqU7ytGs43ly-eEiob6s,741
|
432 |
+
nltk/stem/arlstem.py,sha256=3LW-2dSdsNNAGlQluBiOmHR6V7xOaRdIT2OdlZgcJVk,13006
|
433 |
+
nltk/stem/arlstem2.py,sha256=2SrhWIANG2Gd9Rxbvj-UIWc7-zr5-zIJGB8HDyszqW0,16535
|
434 |
+
nltk/stem/cistem.py,sha256=59OmWHS_EWFO-I-ZN1DVFnnz1rdcUxKCBwM4z8DRQoo,7259
|
435 |
+
nltk/stem/isri.py,sha256=2tOa82AbRnXPympONMJOVc6-CsWdyWlMgcllFwqrJ-I,14990
|
436 |
+
nltk/stem/lancaster.py,sha256=QrvXzQGULfwIMsW1Z8By8Pz8Exb1MTTydc5ZBV4dJao,12587
|
437 |
+
nltk/stem/porter.py,sha256=c5jYrt7IHEpV-neeZp6zHOjLXYh83x0tksE3VP7YkRg,28372
|
438 |
+
nltk/stem/regexp.py,sha256=5lb_FGFd7SixF8hyAZpNNLCCeNDfRhnp750We9dbZJA,1578
|
439 |
+
nltk/stem/rslp.py,sha256=cMyro3T1eslSdD-sE3Vq5Nnp4yJNrlB6gTQz08tlOxU,5511
|
440 |
+
nltk/stem/snowball.py,sha256=6somwXR8EoIN9TNmK251dPRGRZ75oC4i6CfPV8iqwk8,183890
|
441 |
+
nltk/stem/util.py,sha256=ktwGClVb3h-AndTu60wS6rfTAdruI41M1zbccWt7wm0,644
|
442 |
+
nltk/stem/wordnet.py,sha256=AojhkFURMhpF8vmS7uTkfeJNL-EYrvGb42v-yrTSD8w,1655
|
443 |
+
nltk/tag/__init__.py,sha256=v7hPbsW3lrb6AFSIZ3uhZ33iwCKVx7RvBHrNkNro1NY,7298
|
444 |
+
nltk/tag/__pycache__/__init__.cpython-310.pyc,,
|
445 |
+
nltk/tag/__pycache__/api.cpython-310.pyc,,
|
446 |
+
nltk/tag/__pycache__/brill.cpython-310.pyc,,
|
447 |
+
nltk/tag/__pycache__/brill_trainer.cpython-310.pyc,,
|
448 |
+
nltk/tag/__pycache__/crf.cpython-310.pyc,,
|
449 |
+
nltk/tag/__pycache__/hmm.cpython-310.pyc,,
|
450 |
+
nltk/tag/__pycache__/hunpos.cpython-310.pyc,,
|
451 |
+
nltk/tag/__pycache__/mapping.cpython-310.pyc,,
|
452 |
+
nltk/tag/__pycache__/perceptron.cpython-310.pyc,,
|
453 |
+
nltk/tag/__pycache__/senna.cpython-310.pyc,,
|
454 |
+
nltk/tag/__pycache__/sequential.cpython-310.pyc,,
|
455 |
+
nltk/tag/__pycache__/stanford.cpython-310.pyc,,
|
456 |
+
nltk/tag/__pycache__/tnt.cpython-310.pyc,,
|
457 |
+
nltk/tag/__pycache__/util.cpython-310.pyc,,
|
458 |
+
nltk/tag/api.py,sha256=hxGeLViDHBmcXYnWtYA8N3r7meYyOzW8nxafEaSXH0c,14810
|
459 |
+
nltk/tag/brill.py,sha256=PA8cP2tXwbxYtSO33vrV8RwGJNm4ZU_T5iCkqKf0W4g,16829
|
460 |
+
nltk/tag/brill_trainer.py,sha256=ba0A2b255xtF3N30iiLWy8oazQycJ3IP-gBY6SMuX2w,27900
|
461 |
+
nltk/tag/crf.py,sha256=eaU05hpUOPRO9TRVt6VXffvCB9ZsahkDu2gQi1x8FFQ,7960
|
462 |
+
nltk/tag/hmm.py,sha256=mkD06CBebJyBZ-lqXVsyVwCL1blqA0ucijINjynZiJQ,50349
|
463 |
+
nltk/tag/hunpos.py,sha256=th_TEehZi3QIlaFLJhI40tBpqn1Rn3QYzfv_dT1n1w8,5195
|
464 |
+
nltk/tag/mapping.py,sha256=TfcRmPsp-cM7FFx03ElLmf0ZYvW48N35SIl_y7M7QHY,4024
|
465 |
+
nltk/tag/perceptron.py,sha256=ge04T_6s-qIk2ElRLA43euMSzbaxQur2KxosuLfq_Tg,13425
|
466 |
+
nltk/tag/senna.py,sha256=_Y-mrYv1Y4SH8720pMKiXYvPFnuNJMeDeG6PUu1TyHk,5903
|
467 |
+
nltk/tag/sequential.py,sha256=hWSEZAYlZa8uKd5-o7NeAIsEaHOj2lZVaVX5F5ymdoI,28621
|
468 |
+
nltk/tag/stanford.py,sha256=_HuQnKPvcHn01gUtrvhaYI_GpOG2tldthDXoV3daCFA,8427
|
469 |
+
nltk/tag/tnt.py,sha256=gNsbvbZYFnhPLnbCKCAOdwJIsK7OyNxAje2SfRclyz8,18432
|
470 |
+
nltk/tag/util.py,sha256=FEApJmJ5lpb1mWbfhRvtKce9sR93WPiRJfUyAvBoc78,2353
|
471 |
+
nltk/tbl/__init__.py,sha256=7w88VhcTvvCRY03cUftsmoLkf5YanRyM3PXU-Ik2t2c,790
|
472 |
+
nltk/tbl/__pycache__/__init__.cpython-310.pyc,,
|
473 |
+
nltk/tbl/__pycache__/api.cpython-310.pyc,,
|
474 |
+
nltk/tbl/__pycache__/demo.cpython-310.pyc,,
|
475 |
+
nltk/tbl/__pycache__/erroranalysis.cpython-310.pyc,,
|
476 |
+
nltk/tbl/__pycache__/feature.cpython-310.pyc,,
|
477 |
+
nltk/tbl/__pycache__/rule.cpython-310.pyc,,
|
478 |
+
nltk/tbl/__pycache__/template.cpython-310.pyc,,
|
479 |
+
nltk/tbl/api.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
480 |
+
nltk/tbl/demo.py,sha256=VpqlJQdaXU5wrpJF2aSLhTlF62fOQl5sCYiBJrPZcZU,15338
|
481 |
+
nltk/tbl/erroranalysis.py,sha256=pav23zp0nOiGydqQ4wSJbXAVJmixxCwgodSthCL-onU,1454
|
482 |
+
nltk/tbl/feature.py,sha256=CrcNBJ_BGqnEXzFshpz_MGe6mTB1V0rMWBGWyn5Kukw,9690
|
483 |
+
nltk/tbl/rule.py,sha256=k-0gRVsZOigPWkqn_NuZvOeQ5bXx6YTYuSRA-oUSXPo,11515
|
484 |
+
nltk/tbl/template.py,sha256=LeTafIw_oehScGgwFgl9fklSIOY5tJvuJNFgzaRsJHU,12892
|
485 |
+
nltk/test/__init__.py,sha256=79tUwF8keWTdPOWa-gpVx0VkJr6DABwmq9j589IFABU,487
|
486 |
+
nltk/test/__pycache__/__init__.cpython-310.pyc,,
|
487 |
+
nltk/test/__pycache__/all.cpython-310.pyc,,
|
488 |
+
nltk/test/__pycache__/childes_fixt.cpython-310.pyc,,
|
489 |
+
nltk/test/__pycache__/classify_fixt.cpython-310.pyc,,
|
490 |
+
nltk/test/__pycache__/conftest.cpython-310.pyc,,
|
491 |
+
nltk/test/__pycache__/gensim_fixt.cpython-310.pyc,,
|
492 |
+
nltk/test/__pycache__/gluesemantics_malt_fixt.cpython-310.pyc,,
|
493 |
+
nltk/test/__pycache__/portuguese_en_fixt.cpython-310.pyc,,
|
494 |
+
nltk/test/__pycache__/probability_fixt.cpython-310.pyc,,
|
495 |
+
nltk/test/__pycache__/setup_fixt.cpython-310.pyc,,
|
496 |
+
nltk/test/all.py,sha256=Ojl5ZxWh7LV7XsdOyR8kWEOxMk7E4m7QnCUrQoACzQk,819
|
497 |
+
nltk/test/bleu.doctest,sha256=2LmQDiyg-BueBx8LbLJanXCs7UZI9zsLe5nurrXBVBs,862
|
498 |
+
nltk/test/bnc.doctest,sha256=_8MYpY69ibBaUmkNE3lu187WDOVV_AF4GgTvxSmRuLQ,2051
|
499 |
+
nltk/test/ccg.doctest,sha256=YHiQsfRdaAuOXrkBfD2WTSd9yWbAWyJ8K2KDtkAWyhk,19786
|
500 |
+
nltk/test/ccg_semantics.doctest,sha256=vPdQojJQwAxOXJ12cUQXKnHTnU5IOr1z-FszyX1MAAQ,31066
|
501 |
+
nltk/test/chat80.doctest,sha256=-bY1zKFhVZmtKf82ndwBt3aiNHy7SeSduXjLKggyy2o,8735
|
502 |
+
nltk/test/childes.doctest,sha256=FcKzuX_RXXHZNhCyU_S2B7xTN46R54mxjYs17bejhN0,9363
|
503 |
+
nltk/test/childes_fixt.py,sha256=A04g6D3QytqCatGOYAg6fCXqXT8XyjIPbRSkdG1ys8o,372
|
504 |
+
nltk/test/chunk.doctest,sha256=hDVcWtgJPEZJZLceKbm_885sgpaSR6DFHQMesFzyYeU,11511
|
505 |
+
nltk/test/classify.doctest,sha256=e1MEsCmHQcLkky6XomMc4je0mhImN8YghEy-A13Ieh8,7699
|
506 |
+
nltk/test/classify_fixt.py,sha256=zUj2OUNhyi2sJNmVdShbQnuhLUieJ7h_AwKmvj1Gnzw,119
|
507 |
+
nltk/test/collections.doctest,sha256=ygTv1l_HbXdVHDw91rjUxJy6KFnSvbvzpkXLvTfnVUU,622
|
508 |
+
nltk/test/collocations.doctest,sha256=ezSwbUboIG91fbmKX7mvFo3c9C_BlbgM85tRrctZAKU,12506
|
509 |
+
nltk/test/concordance.doctest,sha256=rPESoW6-ugpgj2LgBYgChcjOWv75x0eu3Y4A2MuZSsQ,3544
|
510 |
+
nltk/test/conftest.py,sha256=apMfe_V5EHXZHCXCMxVqMt3gpHCUwwxR-MxESis9--Q,804
|
511 |
+
nltk/test/corpus.doctest,sha256=K3vInYJLjonuHTysZCrHvZ1ytgseREpqZMcz9VsolHA,99206
|
512 |
+
nltk/test/crubadan.doctest,sha256=qn63dlNT7XmxUA1m2JmcwuX_i12_cWCAfJDVxSp9Kgs,2060
|
513 |
+
nltk/test/data.doctest,sha256=ggmXuZT1Hy7Bd5_bppQPhz64lO8E-dDHXtuh2hQ5HYM,14266
|
514 |
+
nltk/test/dependency.doctest,sha256=pXyaAjDE232xA0Q2EEvxkMNu8COSk0l52ELMi07C-qI,7669
|
515 |
+
nltk/test/discourse.doctest,sha256=AxJetWi0DetMfptecrI9lXac7mN4YVrfCVPr3VXDBkQ,17923
|
516 |
+
nltk/test/drt.doctest,sha256=wTRxo8ISpwcsF9RpwlHn0PLfpaL5ZCZxN2oEpqlGHYg,20076
|
517 |
+
nltk/test/featgram.doctest,sha256=APe6OaMrt1UwuKzLM-7NRXIQF0mPnpZj4vR1yTSnz0o,28870
|
518 |
+
nltk/test/featstruct.doctest,sha256=hjCDZs3NN2DJdyAH9BygfXUuWAP723xQFk3YMSg7p_Q,38894
|
519 |
+
nltk/test/framenet.doctest,sha256=KcZIkt-8_HJ2gDl7BvWG-vfOI3NI7Be2VT9sc9hcAXY,10797
|
520 |
+
nltk/test/generate.doctest,sha256=7yYAJ1EO1qAHy9OYVcoEehPs4hoEhvTvs0VH5H08oBg,2050
|
521 |
+
nltk/test/gensim.doctest,sha256=kmwbF1XcgZvyfrfdotjNw-VlkzgGFVxwVObQ0IPcAXI,5200
|
522 |
+
nltk/test/gensim_fixt.py,sha256=2p-4RObBWBU-YzbGPXAj03aPhimK-l_RLNdS_RuKHh4,77
|
523 |
+
nltk/test/gluesemantics.doctest,sha256=QVR_Hki10s1CRghGvxJDNDA2u1J89uj1O_nKt0RF8eo,12705
|
524 |
+
nltk/test/gluesemantics_malt.doctest,sha256=4S5usX2BTPAPpK7t1SIfwHGaajpVXAuk5CuAUvSagZU,2667
|
525 |
+
nltk/test/gluesemantics_malt_fixt.py,sha256=H7YUsT_M5LteTBF9utPjqUP8pybUaW3w2fQdGUPiT3c,232
|
526 |
+
nltk/test/grammar.doctest,sha256=z2ZzpqBjN64pPB7J3WbZcI_QsH5a1TkgdNKN5GsHMLE,1949
|
527 |
+
nltk/test/grammartestsuites.doctest,sha256=4eF9lME7iQqaAUMvp3IX0s815yi9MSUgbDrT_hJov2c,3309
|
528 |
+
nltk/test/index.doctest,sha256=MuVP-xRyC9Zu_bY7PU8BZekRztycmrD28ngq6q6RonI,2701
|
529 |
+
nltk/test/inference.doctest,sha256=8dABBDz575EQIw1qMwVdDCEQClRmaRxxMARqC1CHYBw,18365
|
530 |
+
nltk/test/internals.doctest,sha256=dzXAofPbEfck_BZYO1ZHp5NpqQd_Ofz3nGKjP_KlzHE,4283
|
531 |
+
nltk/test/japanese.doctest,sha256=79O_D43s3rVCS0Am06pG2hCiNUkT2A7AT1Up_BT82_g,1093
|
532 |
+
nltk/test/lm.doctest,sha256=mTo97a-_5wZ1t_G3Hso-LuhluTTtHZrsE5b89dhQXfY,3951
|
533 |
+
nltk/test/logic.doctest,sha256=h8numvKfRWQucoEA9HPJ02pxviSb8SdCa02BUvWPL5o,35183
|
534 |
+
nltk/test/meteor.doctest,sha256=dcxi8dfWOG6fm-7_VlHkxnaW8d3vEw6TBir0iAsf2Qo,1523
|
535 |
+
nltk/test/metrics.doctest,sha256=h9P-WsPkMHmFQDQw98TRwfRBAVhkuLqE3MOSl3hZaJY,11283
|
536 |
+
nltk/test/misc.doctest,sha256=upseLcrsXziqxxF0pCmA1Nyx9ovGAuUlpG7w4PK8a1k,3464
|
537 |
+
nltk/test/nonmonotonic.doctest,sha256=mG0_JgtIhZh9pD-jqiGAHD7nuQHGClxWhKvffiBPHNM,10370
|
538 |
+
nltk/test/paice.doctest,sha256=9KOoUsd6O-ACSMbt3Ras4zS-FE55R4jZ6xh1JKopb3c,1273
|
539 |
+
nltk/test/parse.doctest,sha256=fKDMs2TD3qhUgNdwW30CTYcEOl1olf3TRCHsf_i1heY,34936
|
540 |
+
nltk/test/portuguese_en.doctest,sha256=voMsH9rhsaI2ETsXYI8T8-ZXKYv6l1-ghv4TMaEDt7c,23121
|
541 |
+
nltk/test/portuguese_en_fixt.py,sha256=-66oHXFBbvDKHkwkcOLnpwcn29iUpwUEWrJ-LqSk5FM,130
|
542 |
+
nltk/test/probability.doctest,sha256=4BuJPzdy6l8hlRXIJOEMxfzlLl1oaru2ezTV6olNx0U,9244
|
543 |
+
nltk/test/probability_fixt.py,sha256=avszs9PHMTVYHHcOXA19-EsTYDahH4VPPPMqv1QkGpE,188
|
544 |
+
nltk/test/propbank.doctest,sha256=jDD0XEFjm2-hDA8g_Y7Us3feNLeOgsw8lvDn-tApO0g,6694
|
545 |
+
nltk/test/relextract.doctest,sha256=Klf15poDywGJTJlpmpTBnLF5ur-_0tLZC-S93Sox95A,9520
|
546 |
+
nltk/test/resolution.doctest,sha256=XJw6Bs4CBYSEEWreyqWTaoTo1ADGwGwRptEsZV8qUH8,8010
|
547 |
+
nltk/test/semantics.doctest,sha256=XhmM0qSpAcmqYZu6dV7Veugf3FpuyMxaHEh1t2UEqwI,25190
|
548 |
+
nltk/test/sentiment.doctest,sha256=dwARYfcbIn6oaPX7kRAo_ZjjJ_YDowxh3zAgr-16Mak,12229
|
549 |
+
nltk/test/sentiwordnet.doctest,sha256=7wIk6gIiYtONvkpNfAUK_xk-jXNVVzIzPlQJ4h2UTrk,1051
|
550 |
+
nltk/test/setup_fixt.py,sha256=IQUyYM-mNaVbfsGFvfOdJc0ymAJ-0u5OAZd2cqRgF0s,912
|
551 |
+
nltk/test/simple.doctest,sha256=ZF_0SZ5vp7pMfFp6iKf3ZvKkRYLHlxBamC0aaQItSog,2407
|
552 |
+
nltk/test/stem.doctest,sha256=XJu6ADeinzu41KgldR5pVuiLdzkmCsoJIXLaSQocTPs,2552
|
553 |
+
nltk/test/tag.doctest,sha256=Dl3QKGZi-1uJQwQKenGThEIfOLKKR7k3wghJKAf4GC4,34100
|
554 |
+
nltk/test/tokenize.doctest,sha256=9fZOgyZnwOBNiPYS0xRFSPcr8asz18Tc29Et8_nzHs4,20353
|
555 |
+
nltk/test/toolbox.doctest,sha256=NfbQ7Q_WFajCTtjxcLSzYkkvr8VVm8SGtqDerv5KBJ4,10323
|
556 |
+
nltk/test/translate.doctest,sha256=if9_vzqjIWk0wnok6QSaLO-dry5lt3DLWCTj99VWmf0,8396
|
557 |
+
nltk/test/tree.doctest,sha256=_MYclk55SLXJ4zGRu-bbL-oPOThpWc2G4c0cuLkyXXo,47273
|
558 |
+
nltk/test/treeprettyprinter.doctest,sha256=yuAL_WWYVV5jCAe0TmzQ9j4N2CSZD9r13_g992DUvkM,9376
|
559 |
+
nltk/test/treetransforms.doctest,sha256=UDSeLha6tfB-PN4_eJGQeOMifVhTIY89tho_2cuXDyc,5006
|
560 |
+
nltk/test/unit/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
561 |
+
nltk/test/unit/__pycache__/__init__.cpython-310.pyc,,
|
562 |
+
nltk/test/unit/__pycache__/test_aline.cpython-310.pyc,,
|
563 |
+
nltk/test/unit/__pycache__/test_bllip.cpython-310.pyc,,
|
564 |
+
nltk/test/unit/__pycache__/test_brill.cpython-310.pyc,,
|
565 |
+
nltk/test/unit/__pycache__/test_cfd_mutation.cpython-310.pyc,,
|
566 |
+
nltk/test/unit/__pycache__/test_cfg2chomsky.cpython-310.pyc,,
|
567 |
+
nltk/test/unit/__pycache__/test_chunk.cpython-310.pyc,,
|
568 |
+
nltk/test/unit/__pycache__/test_classify.cpython-310.pyc,,
|
569 |
+
nltk/test/unit/__pycache__/test_collocations.cpython-310.pyc,,
|
570 |
+
nltk/test/unit/__pycache__/test_concordance.cpython-310.pyc,,
|
571 |
+
nltk/test/unit/__pycache__/test_corenlp.cpython-310.pyc,,
|
572 |
+
nltk/test/unit/__pycache__/test_corpora.cpython-310.pyc,,
|
573 |
+
nltk/test/unit/__pycache__/test_corpus_views.cpython-310.pyc,,
|
574 |
+
nltk/test/unit/__pycache__/test_data.cpython-310.pyc,,
|
575 |
+
nltk/test/unit/__pycache__/test_disagreement.cpython-310.pyc,,
|
576 |
+
nltk/test/unit/__pycache__/test_distance.cpython-310.pyc,,
|
577 |
+
nltk/test/unit/__pycache__/test_downloader.cpython-310.pyc,,
|
578 |
+
nltk/test/unit/__pycache__/test_freqdist.cpython-310.pyc,,
|
579 |
+
nltk/test/unit/__pycache__/test_hmm.cpython-310.pyc,,
|
580 |
+
nltk/test/unit/__pycache__/test_json2csv_corpus.cpython-310.pyc,,
|
581 |
+
nltk/test/unit/__pycache__/test_json_serialization.cpython-310.pyc,,
|
582 |
+
nltk/test/unit/__pycache__/test_metrics.cpython-310.pyc,,
|
583 |
+
nltk/test/unit/__pycache__/test_naivebayes.cpython-310.pyc,,
|
584 |
+
nltk/test/unit/__pycache__/test_nombank.cpython-310.pyc,,
|
585 |
+
nltk/test/unit/__pycache__/test_pl196x.cpython-310.pyc,,
|
586 |
+
nltk/test/unit/__pycache__/test_pos_tag.cpython-310.pyc,,
|
587 |
+
nltk/test/unit/__pycache__/test_ribes.cpython-310.pyc,,
|
588 |
+
nltk/test/unit/__pycache__/test_rte_classify.cpython-310.pyc,,
|
589 |
+
nltk/test/unit/__pycache__/test_seekable_unicode_stream_reader.cpython-310.pyc,,
|
590 |
+
nltk/test/unit/__pycache__/test_senna.cpython-310.pyc,,
|
591 |
+
nltk/test/unit/__pycache__/test_stem.cpython-310.pyc,,
|
592 |
+
nltk/test/unit/__pycache__/test_tag.cpython-310.pyc,,
|
593 |
+
nltk/test/unit/__pycache__/test_tgrep.cpython-310.pyc,,
|
594 |
+
nltk/test/unit/__pycache__/test_tokenize.cpython-310.pyc,,
|
595 |
+
nltk/test/unit/__pycache__/test_twitter_auth.cpython-310.pyc,,
|
596 |
+
nltk/test/unit/__pycache__/test_util.cpython-310.pyc,,
|
597 |
+
nltk/test/unit/__pycache__/test_wordnet.cpython-310.pyc,,
|
598 |
+
nltk/test/unit/lm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
599 |
+
nltk/test/unit/lm/__pycache__/__init__.cpython-310.pyc,,
|
600 |
+
nltk/test/unit/lm/__pycache__/test_counter.cpython-310.pyc,,
|
601 |
+
nltk/test/unit/lm/__pycache__/test_models.cpython-310.pyc,,
|
602 |
+
nltk/test/unit/lm/__pycache__/test_preprocessing.cpython-310.pyc,,
|
603 |
+
nltk/test/unit/lm/__pycache__/test_vocabulary.cpython-310.pyc,,
|
604 |
+
nltk/test/unit/lm/test_counter.py,sha256=t1Dgd6b6JFrtkggAzoRXSGfZ1gwNKiD4aM_cBPoFsEM,3891
|
605 |
+
nltk/test/unit/lm/test_models.py,sha256=3yVFNUsVC2_VUJexqclnSjInpQe14MuPVq3qsYlr2lc,20160
|
606 |
+
nltk/test/unit/lm/test_preprocessing.py,sha256=05lMhOdkglGipiMuJ0gTJwaGweYed0qVGade6VY1uXw,999
|
607 |
+
nltk/test/unit/lm/test_vocabulary.py,sha256=1Pd5rfniE0C_Pq5v2k3IKGdug6ZuA4ycbNZKfRpCfuI,5917
|
608 |
+
nltk/test/unit/test_aline.py,sha256=u9y3D19sJGY_VE9nSOOON5GMYyTIRGVXMyqveeMypyU,1130
|
609 |
+
nltk/test/unit/test_bllip.py,sha256=rm3KrL9F6F49WfXDV42GpuPlvbvsXibTbUXU0pWa-pw,1115
|
610 |
+
nltk/test/unit/test_brill.py,sha256=ZTDCN3y2mTZ-dLeL0GwvO3yM6LlcTmIQ1LaNMBtXZyE,1024
|
611 |
+
nltk/test/unit/test_cfd_mutation.py,sha256=YutOmFGBizpUzZ61l1cDxIegJah2ntuwKL88C49-WBA,1373
|
612 |
+
nltk/test/unit/test_cfg2chomsky.py,sha256=g2wKKjmogJpAaRiLxA0_xw4KLwZa4zvjA-gBMt9z1l0,1726
|
613 |
+
nltk/test/unit/test_chunk.py,sha256=xo-ItBtJdBsRIt-rX1rYYkV_ufABgPK4e6HlogRTvWg,2219
|
614 |
+
nltk/test/unit/test_classify.py,sha256=4Bv5-rDyrjDGdmRHY_qh4Rq_5VdoghzlJEPB_PCIzQo,1337
|
615 |
+
nltk/test/unit/test_collocations.py,sha256=vaiBeImr5dCDOhFPAN8Q4CAUVojJTwNHpodolyIymCU,3690
|
616 |
+
nltk/test/unit/test_concordance.py,sha256=91x9LT-875n7OOi3it5O1qr1xKdOA1ufZY3KLH10Iaw,4108
|
617 |
+
nltk/test/unit/test_corenlp.py,sha256=doMoc3Drnl3fDFxTaZKVPKq-75RTXOGSOqkI9K_74FQ,58632
|
618 |
+
nltk/test/unit/test_corpora.py,sha256=46IA2v_oxDRFQQGa6iGTHiTHAPsDZjIovDSG2432Ubc,9923
|
619 |
+
nltk/test/unit/test_corpus_views.py,sha256=mIxoCvqWSfInEQkISPwfZvTG6dTxYh7Bx0kCGC6VsoA,1600
|
620 |
+
nltk/test/unit/test_data.py,sha256=y1fXWnIylRrff9fBJBUYZ6xw3T6uwMg_6View-jKcas,390
|
621 |
+
nltk/test/unit/test_disagreement.py,sha256=e2JIXrNqCg1YTSh6P2lnGs9YN8KmkWcFD-zcZPsNkjk,4461
|
622 |
+
nltk/test/unit/test_distance.py,sha256=DIMhkfn2y6WvsiJRyw1y_T5b_4OHI6wG01eEAt8Cd9Q,5839
|
623 |
+
nltk/test/unit/test_downloader.py,sha256=QvpnRVehOfLZVJ-iUH8m5mEHG8w4deKxRhF7IOnjAZM,741
|
624 |
+
nltk/test/unit/test_freqdist.py,sha256=I6qkc8zleTMeivGWB0NntBVQDx_tVxthWRwcOB-T0i4,210
|
625 |
+
nltk/test/unit/test_hmm.py,sha256=bX7fSFd7k89JCr9VNFr1ZAng4m2KkfuTL_M2TNvA1nU,2285
|
626 |
+
nltk/test/unit/test_json2csv_corpus.py,sha256=-BUZfzFHAof4umKGAL-9WKGUBiPFyKzLVZOCmzW-a-g,5888
|
627 |
+
nltk/test/unit/test_json_serialization.py,sha256=CfpHkTvY0lF8rMQXQsv_0nSVhDfhxVkqDwTLq26pv5Q,3634
|
628 |
+
nltk/test/unit/test_metrics.py,sha256=iK6bLxVi1fVll-2eCmgzE-ubWnQlFeQjP079qdiRP-A,1949
|
629 |
+
nltk/test/unit/test_naivebayes.py,sha256=a_tjsQsyvPIsO3mrtmN6knaC9BFwPE7PDNHBSNdhYMc,764
|
630 |
+
nltk/test/unit/test_nombank.py,sha256=gIgs6vlEI2NheAh8c6wlJdk6apHmAMmaDZkP8laIvKY,760
|
631 |
+
nltk/test/unit/test_pl196x.py,sha256=C41qhbllNBqtVJ9tCFM8mReQqzsdbM7uoMo9hFVHKLg,410
|
632 |
+
nltk/test/unit/test_pos_tag.py,sha256=5HkW7hpjZd2270RVSFXECLxXg8jCY2iBViDoDA8O2Qs,2782
|
633 |
+
nltk/test/unit/test_ribes.py,sha256=DItkydO5d543kRFYiAebnqudiF2HETHrMAntG3H75jA,5204
|
634 |
+
nltk/test/unit/test_rte_classify.py,sha256=oNGw78oedct_VpwelsMVFb7v3bRFepnQWWbHgKp3GBQ,2765
|
635 |
+
nltk/test/unit/test_seekable_unicode_stream_reader.py,sha256=XBxkic2HcfqxfTY0XxBBBRNEo5FQrYYQzkg1vywwUA0,2265
|
636 |
+
nltk/test/unit/test_senna.py,sha256=fuLdpQO7kG-12rWpGprIOiH9fwxhv1yseNxKtpcUmss,3712
|
637 |
+
nltk/test/unit/test_stem.py,sha256=kjtoZlKkgtCZYX8kxyVQIPf5f6QSPzUCLkCJLDvDWFA,6347
|
638 |
+
nltk/test/unit/test_tag.py,sha256=h7YztNxvYcx2177MkQrPqPgYR2gL1sdl9YB3ZMKuciw,535
|
639 |
+
nltk/test/unit/test_tgrep.py,sha256=elx0roGwZJEOJy7-j7cqwAXvzvldlzYkxo8lHDoKf8E,31708
|
640 |
+
nltk/test/unit/test_tokenize.py,sha256=9uQx21Vs5Iv5mBmNyiHqTaOmIecSlD1n9jUY9dF1mBM,30921
|
641 |
+
nltk/test/unit/test_twitter_auth.py,sha256=bms9DQ07DwEr53IqMr49qGL9ria_1rEf3aA7xt8oR-A,2509
|
642 |
+
nltk/test/unit/test_util.py,sha256=UMUTzBJRSSAdFwp7tZkG7gygQ9gHcrk2IEiuq6XvTRA,1888
|
643 |
+
nltk/test/unit/test_wordnet.py,sha256=tZCn_lZVJ8POuehMbAIcgV000vCMwXFJUbdhuPEOSmw,9260
|
644 |
+
nltk/test/unit/translate/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
645 |
+
nltk/test/unit/translate/__pycache__/__init__.cpython-310.pyc,,
|
646 |
+
nltk/test/unit/translate/__pycache__/test_bleu.cpython-310.pyc,,
|
647 |
+
nltk/test/unit/translate/__pycache__/test_gdfa.cpython-310.pyc,,
|
648 |
+
nltk/test/unit/translate/__pycache__/test_ibm1.cpython-310.pyc,,
|
649 |
+
nltk/test/unit/translate/__pycache__/test_ibm2.cpython-310.pyc,,
|
650 |
+
nltk/test/unit/translate/__pycache__/test_ibm3.cpython-310.pyc,,
|
651 |
+
nltk/test/unit/translate/__pycache__/test_ibm4.cpython-310.pyc,,
|
652 |
+
nltk/test/unit/translate/__pycache__/test_ibm5.cpython-310.pyc,,
|
653 |
+
nltk/test/unit/translate/__pycache__/test_ibm_model.cpython-310.pyc,,
|
654 |
+
nltk/test/unit/translate/__pycache__/test_meteor.cpython-310.pyc,,
|
655 |
+
nltk/test/unit/translate/__pycache__/test_nist.cpython-310.pyc,,
|
656 |
+
nltk/test/unit/translate/__pycache__/test_stack_decoder.cpython-310.pyc,,
|
657 |
+
nltk/test/unit/translate/test_bleu.py,sha256=YpYHqzzebFqF8M-x2AjUYeI3YNmRSseJmAf4xoT13XY,15874
|
658 |
+
nltk/test/unit/translate/test_gdfa.py,sha256=E4r6o0S2r5rzK7NPFTyDA2a7IAfYzmIhCU4WXB0Wvdo,4770
|
659 |
+
nltk/test/unit/translate/test_ibm1.py,sha256=7dGtPK_T9qXGTmB8skOeJ_mNJ2G8VaoYOlqMkhP0fBs,2669
|
660 |
+
nltk/test/unit/translate/test_ibm2.py,sha256=fAjggyyMHRzPmpuFdov7dWfwsNM7hK9Z8p_qalCn_lY,3377
|
661 |
+
nltk/test/unit/translate/test_ibm3.py,sha256=9PUQNN75ITw_TgzWayxQrMZRNS6pdyNThSw-sTdscP4,4189
|
662 |
+
nltk/test/unit/translate/test_ibm4.py,sha256=r1gfJCmXlP0UZhZdrdzc0CcJLZNCn0zrn_BIMH0dVDk,5209
|
663 |
+
nltk/test/unit/translate/test_ibm5.py,sha256=20agaTpArhfMcx-Ady0BUXyxayBU_ipPiDTvb8s_1oo,6761
|
664 |
+
nltk/test/unit/translate/test_ibm_model.py,sha256=qTMFR4acSkEP5-kta-9B6RymoswEIitV3ljn86riNCo,9676
|
665 |
+
nltk/test/unit/translate/test_meteor.py,sha256=sldeMjDkStoMnxBnx1MKDRNBGmcs4Hdu9VmMSzpl1Jo,750
|
666 |
+
nltk/test/unit/translate/test_nist.py,sha256=HFfcs5Gq_goyYm-NSqdb_Eet6kClibKNvcr3gdasMmk,1645
|
667 |
+
nltk/test/unit/translate/test_stack_decoder.py,sha256=37pm9cVUc-I0789Yt-yUZME9wG6Xrcdzqs0a3lB_8mg,10000
|
668 |
+
nltk/test/util.doctest,sha256=BYtTUbvvvKXlM57NfVs-QGe2YqSN3M_Ad40fJNI0go0,1058
|
669 |
+
nltk/test/wordnet.doctest,sha256=5VI2tl-FxJ-NCwpMbv5AMvy8vvEoqaLW1hUNu2mzV9A,30528
|
670 |
+
nltk/test/wordnet_lch.doctest,sha256=5a80en1DUmUKj9RepGbrpJmPsgvYSFCezlYqh_da9ME,2361
|
671 |
+
nltk/test/wsd.doctest,sha256=wa0eAdE0glaOrzkY7VnxIb297tmA46TsuNqdeyHUNR4,3014
|
672 |
+
nltk/text.py,sha256=TVmS9X9weLrodN2y_VlHqSYaJi6LOh9U6lpLtyAJU0o,28909
|
673 |
+
nltk/tgrep.py,sha256=g3BNjLGCcIrKUnwXRoMd32f-l-8JqF6mUrJfxFKLOUI,37911
|
674 |
+
nltk/tokenize/__init__.py,sha256=0qmUpIe6PExgEzV9lXnzeiSfGkHVLoIxVrcZIgBF3FA,5243
|
675 |
+
nltk/tokenize/__pycache__/__init__.cpython-310.pyc,,
|
676 |
+
nltk/tokenize/__pycache__/api.cpython-310.pyc,,
|
677 |
+
nltk/tokenize/__pycache__/casual.cpython-310.pyc,,
|
678 |
+
nltk/tokenize/__pycache__/destructive.cpython-310.pyc,,
|
679 |
+
nltk/tokenize/__pycache__/legality_principle.cpython-310.pyc,,
|
680 |
+
nltk/tokenize/__pycache__/mwe.cpython-310.pyc,,
|
681 |
+
nltk/tokenize/__pycache__/nist.cpython-310.pyc,,
|
682 |
+
nltk/tokenize/__pycache__/punkt.cpython-310.pyc,,
|
683 |
+
nltk/tokenize/__pycache__/regexp.cpython-310.pyc,,
|
684 |
+
nltk/tokenize/__pycache__/repp.cpython-310.pyc,,
|
685 |
+
nltk/tokenize/__pycache__/sexpr.cpython-310.pyc,,
|
686 |
+
nltk/tokenize/__pycache__/simple.cpython-310.pyc,,
|
687 |
+
nltk/tokenize/__pycache__/sonority_sequencing.cpython-310.pyc,,
|
688 |
+
nltk/tokenize/__pycache__/stanford.cpython-310.pyc,,
|
689 |
+
nltk/tokenize/__pycache__/stanford_segmenter.cpython-310.pyc,,
|
690 |
+
nltk/tokenize/__pycache__/texttiling.cpython-310.pyc,,
|
691 |
+
nltk/tokenize/__pycache__/toktok.cpython-310.pyc,,
|
692 |
+
nltk/tokenize/__pycache__/treebank.cpython-310.pyc,,
|
693 |
+
nltk/tokenize/__pycache__/util.cpython-310.pyc,,
|
694 |
+
nltk/tokenize/api.py,sha256=gSsubjy4wvCeoLz2LIToXV7fEbH-ZYuU1ZQKiZAUATc,2357
|
695 |
+
nltk/tokenize/casual.py,sha256=38kW21jjDImAPLoCsvoCIHV91U6SqKYKroIzts82f3o,16101
|
696 |
+
nltk/tokenize/destructive.py,sha256=JkyvgJ4vbKQ7PKeG5_Ss3LWvYVhCLzrnuNC9mmXO66U,9447
|
697 |
+
nltk/tokenize/legality_principle.py,sha256=AIbUBCKtuAvehwOkC0Aa4lGmc8vJoRrNN5hm7wCpfyg,6236
|
698 |
+
nltk/tokenize/mwe.py,sha256=lKHLQ-4lwHuhDeg3OJvTnq1xPk2H-Sp78VYL1WRovQ0,4181
|
699 |
+
nltk/tokenize/nist.py,sha256=-EXf8gKQOFQmuymmIsslr1RInp4lS20rgXlECRNWbdA,7720
|
700 |
+
nltk/tokenize/punkt.py,sha256=KVKagPpsPNYKzGc6a9sPkBMUlHjw5LwZxWr_r4z6hpw,68804
|
701 |
+
nltk/tokenize/regexp.py,sha256=4oFnXCBYYwHbl9PLVIRdLIsMoFDKvhUvXUTB0N_Yu10,8331
|
702 |
+
nltk/tokenize/repp.py,sha256=0c9syu4dcvFjlFt-bwIIlM2uunhtnAc0rcR_QqfjYJ4,8245
|
703 |
+
nltk/tokenize/sexpr.py,sha256=NZazV0MD6VJH30gHaf4Ul4NlJMZ5r5S9ZR8xOROnezw,5302
|
704 |
+
nltk/tokenize/simple.py,sha256=Ie7Fhs95ubIHTzb-1Kfnfm0xjRNNsR9Jplu9Ei8v728,5379
|
705 |
+
nltk/tokenize/sonority_sequencing.py,sha256=XBiVVAp9f2kCX_bg80r0QI835uF0jtTMUuCUimsqWPQ,7739
|
706 |
+
nltk/tokenize/stanford.py,sha256=_p90fpkx6hmIwBtZYsKsXRa0i0OthUZ2kJqgXAkHLRY,3875
|
707 |
+
nltk/tokenize/stanford_segmenter.py,sha256=E3Y1HS4Y298DpKFvotlpWf2YMcO7zCs7JrajhugJf1Q,9857
|
708 |
+
nltk/tokenize/texttiling.py,sha256=QYDETnGqm4RdimXBZNInhALjzt0Wca8M3mH8aaX1lFU,16943
|
709 |
+
nltk/tokenize/toktok.py,sha256=R1eW8VozEtxuwDYjNVn2eWSGSTDvtTKtASMhDvdrdIk,7679
|
710 |
+
nltk/tokenize/treebank.py,sha256=ahB5jsQrFeXS-H1Y2Cm5dkkJE91Dva5nWI9Io-Uco2M,16669
|
711 |
+
nltk/tokenize/util.py,sha256=iw9hPFtD_oZOVetjcCjh0P6s98u_fjbMDgpxGxn4RGY,10339
|
712 |
+
nltk/toolbox.py,sha256=bMXsrkHbgGrP-Ktg2MBLDr-6MJupIlkTRtBhLUwI7tY,18337
|
713 |
+
nltk/translate/__init__.py,sha256=a9SOGnQ057m96m5YY8JOP5OIWi353ManWall-EIlxCo,1331
|
714 |
+
nltk/translate/__pycache__/__init__.cpython-310.pyc,,
|
715 |
+
nltk/translate/__pycache__/api.cpython-310.pyc,,
|
716 |
+
nltk/translate/__pycache__/bleu_score.cpython-310.pyc,,
|
717 |
+
nltk/translate/__pycache__/chrf_score.cpython-310.pyc,,
|
718 |
+
nltk/translate/__pycache__/gale_church.cpython-310.pyc,,
|
719 |
+
nltk/translate/__pycache__/gdfa.cpython-310.pyc,,
|
720 |
+
nltk/translate/__pycache__/gleu_score.cpython-310.pyc,,
|
721 |
+
nltk/translate/__pycache__/ibm1.cpython-310.pyc,,
|
722 |
+
nltk/translate/__pycache__/ibm2.cpython-310.pyc,,
|
723 |
+
nltk/translate/__pycache__/ibm3.cpython-310.pyc,,
|
724 |
+
nltk/translate/__pycache__/ibm4.cpython-310.pyc,,
|
725 |
+
nltk/translate/__pycache__/ibm5.cpython-310.pyc,,
|
726 |
+
nltk/translate/__pycache__/ibm_model.cpython-310.pyc,,
|
727 |
+
nltk/translate/__pycache__/meteor_score.cpython-310.pyc,,
|
728 |
+
nltk/translate/__pycache__/metrics.cpython-310.pyc,,
|
729 |
+
nltk/translate/__pycache__/nist_score.cpython-310.pyc,,
|
730 |
+
nltk/translate/__pycache__/phrase_based.cpython-310.pyc,,
|
731 |
+
nltk/translate/__pycache__/ribes_score.cpython-310.pyc,,
|
732 |
+
nltk/translate/__pycache__/stack_decoder.cpython-310.pyc,,
|
733 |
+
nltk/translate/api.py,sha256=SM3sIpzqhMYSFxnOHo1G30y9hkxKrQtE2Ugi1Qln03o,11109
|
734 |
+
nltk/translate/bleu_score.py,sha256=YpFNn80ydIWg8iKHgAJ7jgFFkPrkuEMku6m1rC9X_kA,30415
|
735 |
+
nltk/translate/chrf_score.py,sha256=C7mEHCk0Jn7QdmxctQImEja-HovmvpPOCfos_UhUH80,8978
|
736 |
+
nltk/translate/gale_church.py,sha256=fy4jIbJpZmiJyjVEc_2s1ng6BORp63vG9_HbEToFM6E,8732
|
737 |
+
nltk/translate/gdfa.py,sha256=dMWFOM72FZh8d3iuJhyhQZ8KxMfbusSzR0Nr74lyfKQ,6246
|
738 |
+
nltk/translate/gleu_score.py,sha256=symY8I6w4SxQDKUN9EyJL6_1Da-4bVT_y-Kd2nnFe38,8831
|
739 |
+
nltk/translate/ibm1.py,sha256=MnR2l9vpkyxCAyqGd6ajMUkVxsp5Q6m8Hz--lLzWVMs,9522
|
740 |
+
nltk/translate/ibm2.py,sha256=kaGLTJfIrsy3KYj1-72vFYMNtDD8ptJst73mS4NH2nk,12561
|
741 |
+
nltk/translate/ibm3.py,sha256=E9aNT2lKkV0I9wwGCvhZPWXk7FUl5VwqV2qPeDUbTck,14154
|
742 |
+
nltk/translate/ibm4.py,sha256=1kUdywKq3klNtmId3AHrdYhk-3ptzYlaRw9CH7Jydzk,20765
|
743 |
+
nltk/translate/ibm5.py,sha256=H6V--iB46jm9btUyvRSaJ09L2fPCda8xNhbNW9AbtKQ,27957
|
744 |
+
nltk/translate/ibm_model.py,sha256=AXy2cgctd8CBV77lih-Yvw2G32xyCO8YMLIGpTugXHU,20504
|
745 |
+
nltk/translate/meteor_score.py,sha256=-ZKuCuXxmexx3U-GkpUBvbxNyFEyKZUL16ZIkNnOVYY,17301
|
746 |
+
nltk/translate/metrics.py,sha256=qyM4DXkdyRY6OYiHR0M9anTGl129lsHyKIzutfJ-Low,1513
|
747 |
+
nltk/translate/nist_score.py,sha256=5n8KyFK_99PcGPGE_l-wkKPbbj9Uy68iE4MZiWEKaxY,8148
|
748 |
+
nltk/translate/phrase_based.py,sha256=KBfNqROhEiut1N0C-nFDCivllejA4OUeZ6BLyuMNYTA,7860
|
749 |
+
nltk/translate/ribes_score.py,sha256=OGPOh-byCf0R7i2aQYWR-Vh21RJtl9nxQKHEH_YuBto,14027
|
750 |
+
nltk/translate/stack_decoder.py,sha256=MuRPezJG4gq2TQsArF0Cm3xn2X6E8FAKSg9BD4Qx4cI,20516
|
751 |
+
nltk/tree/__init__.py,sha256=mruLTDldjRtc3viZeVxuUp_QaWW49aRrCSIlFUgln9I,1466
|
752 |
+
nltk/tree/__pycache__/__init__.cpython-310.pyc,,
|
753 |
+
nltk/tree/__pycache__/immutable.cpython-310.pyc,,
|
754 |
+
nltk/tree/__pycache__/parented.cpython-310.pyc,,
|
755 |
+
nltk/tree/__pycache__/parsing.cpython-310.pyc,,
|
756 |
+
nltk/tree/__pycache__/prettyprinter.cpython-310.pyc,,
|
757 |
+
nltk/tree/__pycache__/probabilistic.cpython-310.pyc,,
|
758 |
+
nltk/tree/__pycache__/transforms.cpython-310.pyc,,
|
759 |
+
nltk/tree/__pycache__/tree.cpython-310.pyc,,
|
760 |
+
nltk/tree/immutable.py,sha256=NOmT_xXNUrk3ct0m7ZMNhqj_kL-91lRdP_ZwTc6bZEc,4178
|
761 |
+
nltk/tree/parented.py,sha256=0VrrC0i7eBQuj0Q3H4bGrhWXXzlqvIX-7efnZmXtNTI,23192
|
762 |
+
nltk/tree/parsing.py,sha256=IpYHYTBD_VZkp4m0NbC8qmhj9ZiQVI_8x4iJmoe2Hp4,2083
|
763 |
+
nltk/tree/prettyprinter.py,sha256=1nybXp3GGP8AF_FxG_v6xrXiQxyvTAt7yseLLZ0QLdI,25586
|
764 |
+
nltk/tree/probabilistic.py,sha256=mOnOfXKwE_OeIs4llAStM0vtm4DOqDbWgMOrR42Ni6U,2492
|
765 |
+
nltk/tree/transforms.py,sha256=PmprKuO_0pYsSxQPdhnNnJsRmZNOtDYkImwZMoOVfvY,13689
|
766 |
+
nltk/tree/tree.py,sha256=euTPBiCu9qVe2_-amu74Ew4SdqxrfIC0itRZkh_r05Q,36500
|
767 |
+
nltk/treeprettyprinter.py,sha256=TJU6UHvemo2C_9a0LdTYS6n4ES4QjVf4sQcXq8HapQo,975
|
768 |
+
nltk/treetransforms.py,sha256=B_0-bNh4gkTu2dY8E5mt8XQdN97IKFL7LrtOMeASRps,5288
|
769 |
+
nltk/twitter/__init__.py,sha256=npOhJzWN63BFf7aPDfUAia76r8uVgVy3ll6ohLKS8fU,819
|
770 |
+
nltk/twitter/__pycache__/__init__.cpython-310.pyc,,
|
771 |
+
nltk/twitter/__pycache__/api.cpython-310.pyc,,
|
772 |
+
nltk/twitter/__pycache__/common.cpython-310.pyc,,
|
773 |
+
nltk/twitter/__pycache__/twitter_demo.cpython-310.pyc,,
|
774 |
+
nltk/twitter/__pycache__/twitterclient.cpython-310.pyc,,
|
775 |
+
nltk/twitter/__pycache__/util.cpython-310.pyc,,
|
776 |
+
nltk/twitter/api.py,sha256=yAHSw0JeVRAKJOHvWGqml3U3t7GYaQwhPU3Kf1sRYqw,4692
|
777 |
+
nltk/twitter/common.py,sha256=RJbcw3Wvr6YTnQ4YqfWQDN494vKATE_REx-5-9YL5Qg,10120
|
778 |
+
nltk/twitter/twitter_demo.py,sha256=dCplglFGLm9I5NgSt3UHVM-NVES2zWWjDnVPRFUONIQ,8309
|
779 |
+
nltk/twitter/twitterclient.py,sha256=Vliyz7To9z2IJmUUg6AWcwDS-gNIIF2Yi5fuPfJN0zE,19927
|
780 |
+
nltk/twitter/util.py,sha256=Ij7TX3ypt1uYcfo6nSwCbygUpK6_59OhNN3l1ZPefS0,4546
|
781 |
+
nltk/util.py,sha256=VijbYZYpM5wjWEsgI0uu-GnR-zJMulfBoDgeJI9QpGw,42026
|
782 |
+
nltk/wsd.py,sha256=5Ie3V_RtWQGRsIeL0rMnGGA_9KEGD_9l9GkbkFyYQoA,1789
|
llmeval-env/lib/python3.10/site-packages/nltk-3.8.1.dist-info/WHEEL
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Wheel-Version: 1.0
|
2 |
+
Generator: bdist_wheel (0.37.0)
|
3 |
+
Root-Is-Purelib: true
|
4 |
+
Tag: py3-none-any
|
5 |
+
|
llmeval-env/lib/python3.10/site-packages/nltk-3.8.1.dist-info/entry_points.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
[console_scripts]
|
3 |
+
nltk=nltk.cli:cli
|
llmeval-env/lib/python3.10/site-packages/nltk-3.8.1.dist-info/top_level.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
nltk
|
llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__init__.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023-present the HuggingFace Inc. team.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
from .config import AdaptionPromptConfig
|
15 |
+
from .layer import AdaptedAttention
|
16 |
+
from .model import AdaptionPromptModel
|
17 |
+
|
18 |
+
|
19 |
+
__all__ = ["AdaptionPromptConfig", "AdaptedAttention", "AdaptionPromptModel"]
|
llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (382 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__pycache__/config.cpython-310.pyc
ADDED
Binary file (2.12 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__pycache__/layer.cpython-310.pyc
ADDED
Binary file (3.29 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__pycache__/model.cpython-310.pyc
ADDED
Binary file (5.56 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/__pycache__/utils.cpython-310.pyc
ADDED
Binary file (3.56 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/config.py
ADDED
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023-present the HuggingFace Inc. team.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
from collections import namedtuple
|
16 |
+
from dataclasses import dataclass, field
|
17 |
+
|
18 |
+
from peft.config import PeftConfig
|
19 |
+
from peft.utils import PeftType
|
20 |
+
|
21 |
+
from .utils import llama_compute_query_states
|
22 |
+
|
23 |
+
|
24 |
+
@dataclass
|
25 |
+
class AdaptionPromptConfig(PeftConfig):
|
26 |
+
"""Stores the configuration of an [`AdaptionPromptModel`]."""
|
27 |
+
|
28 |
+
target_modules: str = field(
|
29 |
+
default=None, metadata={"help": "Name of the attention submodules to insert adaption prompts into."}
|
30 |
+
)
|
31 |
+
adapter_len: int = field(default=None, metadata={"help": "Number of adapter tokens to insert"})
|
32 |
+
adapter_layers: int = field(default=None, metadata={"help": "Number of adapter layers (from the top)"})
|
33 |
+
|
34 |
+
def __post_init__(self):
|
35 |
+
self.peft_type = PeftType.ADAPTION_PROMPT
|
36 |
+
|
37 |
+
@property
|
38 |
+
def is_adaption_prompt(self) -> bool:
|
39 |
+
"""Return True if this is an adaption prompt config."""
|
40 |
+
return True
|
41 |
+
|
42 |
+
|
43 |
+
# Contains the config that is specific to a transformers model type.
|
44 |
+
ModelTypeConfig = namedtuple(
|
45 |
+
"ModelTypeConfig", ["compute_query_states", "target_modules", "k_proj_layer", "v_proj_layer", "o_proj_layer"]
|
46 |
+
)
|
47 |
+
|
48 |
+
# Mapping of transformers model types to their specific configuration.
|
49 |
+
TRANSFORMERS_MODEL_CONFIG = {
|
50 |
+
"llama": ModelTypeConfig(
|
51 |
+
compute_query_states=llama_compute_query_states,
|
52 |
+
target_modules="self_attn",
|
53 |
+
k_proj_layer="k_proj",
|
54 |
+
v_proj_layer="v_proj",
|
55 |
+
o_proj_layer="o_proj",
|
56 |
+
),
|
57 |
+
"mistral": ModelTypeConfig( # same as llama,
|
58 |
+
compute_query_states=llama_compute_query_states,
|
59 |
+
target_modules="self_attn",
|
60 |
+
k_proj_layer="k_proj",
|
61 |
+
v_proj_layer="v_proj",
|
62 |
+
o_proj_layer="o_proj",
|
63 |
+
),
|
64 |
+
}
|
65 |
+
|
66 |
+
|
67 |
+
def prepare_config(
|
68 |
+
peft_config: AdaptionPromptConfig,
|
69 |
+
model,
|
70 |
+
) -> AdaptionPromptConfig:
|
71 |
+
"""Prepare the config based on the llama model type."""
|
72 |
+
if model.config.model_type not in TRANSFORMERS_MODEL_CONFIG:
|
73 |
+
raise ValueError("Unsupported model type for adaption prompt: '{model.config.model_type}'.")
|
74 |
+
|
75 |
+
model_config = TRANSFORMERS_MODEL_CONFIG[model.config.model_type]
|
76 |
+
|
77 |
+
if peft_config.target_modules is None:
|
78 |
+
peft_config.target_modules = model_config.target_modules
|
79 |
+
|
80 |
+
return peft_config
|
llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/layer.py
ADDED
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023-present the HuggingFace Inc. team.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
import math
|
16 |
+
|
17 |
+
import torch
|
18 |
+
import torch.nn as nn
|
19 |
+
import torch.nn.functional as F
|
20 |
+
|
21 |
+
from .config import TRANSFORMERS_MODEL_CONFIG
|
22 |
+
|
23 |
+
|
24 |
+
class AdaptedAttention(nn.Module):
|
25 |
+
"""This module wraps a LLamaAttention module and injects adaption prompts."""
|
26 |
+
|
27 |
+
def __init__(self, model_type: str, adapter_len: int, model):
|
28 |
+
"""
|
29 |
+
Initialize object.
|
30 |
+
|
31 |
+
Args:
|
32 |
+
model_type: The transformer model type. This is used to retrieve the right method to
|
33 |
+
compute query states.
|
34 |
+
adapter_len: The length of the adaption prompt to insert.
|
35 |
+
model: The original transformer attention module that is being wrapped.
|
36 |
+
"""
|
37 |
+
assert not isinstance(model, AdaptedAttention)
|
38 |
+
super().__init__()
|
39 |
+
self.model_type = model_type
|
40 |
+
self.model = model
|
41 |
+
self.adapter_len = adapter_len
|
42 |
+
# Assume all parameters of the attention model we are wrapping are on the same device.
|
43 |
+
device = next(model.parameters()).device
|
44 |
+
# Don't think this was specified in the paper, but we follow the official repo which used an Embedding
|
45 |
+
# which initializes the tokens with standard normal values.
|
46 |
+
# https://github.com/ZrrSkywalker/LLaMA-Adapter/blob/41c3546fe1997ab8a65809dc8d8f9252b19d9faf/llama/model.py#L234
|
47 |
+
# (bsz, adapter_len, hidden_size)
|
48 |
+
target_dtype = (
|
49 |
+
model.q_proj.weight.dtype if model.q_proj.weight.dtype not in [torch.int8, torch.uint8] else torch.float32
|
50 |
+
)
|
51 |
+
self.adaption_prompt = nn.Parameter(
|
52 |
+
torch.empty(1, adapter_len, self.model.hidden_size, device=device, dtype=target_dtype).normal_()
|
53 |
+
)
|
54 |
+
# Initialize the gate to 0 as this is "zero-init".
|
55 |
+
self.adaption_gate = nn.Parameter(torch.zeros(1, device=device, dtype=target_dtype))
|
56 |
+
|
57 |
+
def forward(self, **kwargs):
|
58 |
+
"""
|
59 |
+
Forward pass for the adapter which wraps the original LlamaAttention module.
|
60 |
+
|
61 |
+
"Official" paper implementation:
|
62 |
+
https://github.com/ZrrSkywalker/LLaMA-Adapter/blob/41c3546fe1997ab8a65809dc8d8f9252b19d9faf/llama/model.py#L141
|
63 |
+
|
64 |
+
Args:
|
65 |
+
kwargs: See the original LlamaAttention module.
|
66 |
+
"""
|
67 |
+
if kwargs.get("output_attention", False):
|
68 |
+
raise NotImplementedError("output_attention is not currently supported.")
|
69 |
+
|
70 |
+
output, _, past_key_value = self.model(**kwargs)
|
71 |
+
bsz = output.shape[0]
|
72 |
+
q_len = output.shape[1]
|
73 |
+
embed_dim = output.shape[2]
|
74 |
+
k_proj_layer = TRANSFORMERS_MODEL_CONFIG[self.model_type].k_proj_layer
|
75 |
+
v_proj_layer = TRANSFORMERS_MODEL_CONFIG[self.model_type].v_proj_layer
|
76 |
+
o_proj_layer = TRANSFORMERS_MODEL_CONFIG[self.model_type].o_proj_layer
|
77 |
+
factor = (
|
78 |
+
self.model.k_proj.in_features // self.model.k_proj.out_features
|
79 |
+
) # Mistral has different input and output dimension for k_proj and v_proj layers
|
80 |
+
|
81 |
+
if k_proj_layer == v_proj_layer:
|
82 |
+
_, key, value = getattr(self.model, k_proj_layer)(self.adaption_prompt).split(embed_dim, dim=2)
|
83 |
+
else:
|
84 |
+
key = getattr(self.model, k_proj_layer)(self.adaption_prompt)
|
85 |
+
value = getattr(self.model, v_proj_layer)(self.adaption_prompt)
|
86 |
+
|
87 |
+
# (bsz, num_key_value_heads, adapter_len, head_dim)
|
88 |
+
adapter_k = (
|
89 |
+
key.view(1, self.adapter_len, (self.model.num_heads // factor), self.model.head_dim)
|
90 |
+
.repeat(bsz, 1, 1, 1)
|
91 |
+
.transpose(1, 2)
|
92 |
+
)
|
93 |
+
adapter_v = (
|
94 |
+
value.view(1, self.adapter_len, (self.model.num_heads // factor), self.model.head_dim)
|
95 |
+
.repeat(bsz, 1, 1, 1)
|
96 |
+
.transpose(1, 2)
|
97 |
+
)
|
98 |
+
# Below is taken from https://github.com/huggingface/transformers/blob/e547458c43dfdbbb8f6a7757237e234c44e20a8f/src/transformers/models/mistral/modeling_mistral.py#L181
|
99 |
+
# (bsz, num_heads, adapter_len, head_dim)
|
100 |
+
adapter_k = torch.repeat_interleave(adapter_k, repeats=factor, dim=1)
|
101 |
+
adapter_v = torch.repeat_interleave(adapter_v, repeats=factor, dim=1)
|
102 |
+
# Recompute query states.
|
103 |
+
compute_query_states = TRANSFORMERS_MODEL_CONFIG[self.model_type].compute_query_states
|
104 |
+
# (bsz, num_heads, q_len, head_dim)
|
105 |
+
query_states = compute_query_states(model=self.model, **kwargs)
|
106 |
+
|
107 |
+
previous_dtype = query_states.dtype
|
108 |
+
|
109 |
+
# (bsz, num_heads, q_len, adapter_len)
|
110 |
+
scores = torch.matmul(query_states, adapter_k.transpose(2, 3).to(previous_dtype)) / math.sqrt(
|
111 |
+
self.model.head_dim
|
112 |
+
)
|
113 |
+
# Upcast attention to fp32
|
114 |
+
# (bsz, num_heads, q_len, adapter_len)
|
115 |
+
scores = self.adaption_gate * F.softmax(scores, dim=-1, dtype=torch.float32).to(previous_dtype)
|
116 |
+
# (bsz, q_len, num_heads * head_dim)
|
117 |
+
adapter_output = torch.matmul(scores, adapter_v).transpose(1, 2).reshape(bsz, q_len, -1)
|
118 |
+
|
119 |
+
# (bsz, q_len, hidden_size)
|
120 |
+
if o_proj_layer is not None:
|
121 |
+
adapter_output = getattr(self.model, o_proj_layer)(adapter_output)
|
122 |
+
|
123 |
+
# Add adaption prompt output to original output.
|
124 |
+
output = output + adapter_output
|
125 |
+
|
126 |
+
# Restore original dtype.
|
127 |
+
output = output.to(previous_dtype)
|
128 |
+
return output, None, past_key_value
|
llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/model.py
ADDED
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023-present the HuggingFace Inc. team.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
from typing import Dict, List
|
16 |
+
|
17 |
+
import torch.nn as nn
|
18 |
+
|
19 |
+
from peft.utils import _freeze_adapter, _get_submodules
|
20 |
+
|
21 |
+
from .config import AdaptionPromptConfig, prepare_config
|
22 |
+
from .layer import AdaptedAttention
|
23 |
+
from .utils import is_adaption_prompt_trainable
|
24 |
+
|
25 |
+
|
26 |
+
class AdaptionPromptModel(nn.Module):
|
27 |
+
"""
|
28 |
+
Implements adaption prompts as described in https://arxiv.org/pdf/2303.16199.pdf.
|
29 |
+
|
30 |
+
The top L attention modules are replaced with AdaptedAttention modules that wrap the original ones, but insert
|
31 |
+
trainable prompts with gates (for zero init).
|
32 |
+
|
33 |
+
Notes on the multi-adapter pattern:
|
34 |
+
- We store the states of different adapters by keeping a dictionary of AdaptedAttention modules indexed by adapter
|
35 |
+
name.
|
36 |
+
- Every time we switch adapters, we remove the modules of the currently active adapter from the model, store them
|
37 |
+
in the dictionary, and replace them with the modules of the new adapter.
|
38 |
+
- To avoid duplicated and potentially inconsistent state, the currently active adapter is always removed from the
|
39 |
+
dictionary.
|
40 |
+
- Disabling the adapter would also result in the modules being removed from the model.
|
41 |
+
"""
|
42 |
+
|
43 |
+
def __init__(self, model, configs: Dict, adapter_name: str):
|
44 |
+
super().__init__()
|
45 |
+
self.model = model
|
46 |
+
# Store adapter configs by name.
|
47 |
+
self.peft_config: Dict[str, AdaptionPromptConfig] = {}
|
48 |
+
# Store lists of the parents of the affected attention modules by adapter name.
|
49 |
+
# We keep references to the parents so we can swap the adapters in-and-out of the model.
|
50 |
+
self._parents: Dict[str, List[nn.Module]] = {}
|
51 |
+
# Store lists of cached AdaptedAttention modules by name.
|
52 |
+
self._cached_adapters: Dict[str, List] = {}
|
53 |
+
# The name of the currently active adapter.
|
54 |
+
self._active_adapter = None
|
55 |
+
# Whether the adapter is enabled.
|
56 |
+
self._enabled = True
|
57 |
+
self.forward = self.model.forward
|
58 |
+
self.add_adapter(adapter_name, configs[adapter_name])
|
59 |
+
self._mark_only_adaption_prompts_as_trainable(self.model)
|
60 |
+
|
61 |
+
def add_adapter(self, adapter_name: str, config: AdaptionPromptConfig) -> None:
|
62 |
+
"""Add an adapter with the given name and config."""
|
63 |
+
config = prepare_config(config, self.model)
|
64 |
+
if adapter_name in self.peft_config:
|
65 |
+
raise ValueError(f"Adapter with name '{adapter_name}' already exists.")
|
66 |
+
|
67 |
+
parents = []
|
68 |
+
for name, _ in self.model.named_modules():
|
69 |
+
if name.endswith(config.target_modules):
|
70 |
+
par, _, _ = _get_submodules(self.model, name)
|
71 |
+
parents.append(par)
|
72 |
+
if len(parents) < config.adapter_layers:
|
73 |
+
raise ValueError(
|
74 |
+
f"Config specifies more adapter layers '{config.adapter_layers}'"
|
75 |
+
f" than the model has '{len(parents)}'."
|
76 |
+
)
|
77 |
+
# Note that if the target modules are not in Sequential, ModuleList, or
|
78 |
+
# some other PyTorch ordered container, the behavior is undefined as we
|
79 |
+
# assume here that the order of the modules is the same as the order of
|
80 |
+
# the transformer decoder layers.
|
81 |
+
parents = parents[-config.adapter_layers :]
|
82 |
+
self._parents[adapter_name] = parents
|
83 |
+
|
84 |
+
# It is only None during initialization.
|
85 |
+
# If it is disabled, we don't have to remove the modules.
|
86 |
+
if self._active_adapter is not None and self._enabled:
|
87 |
+
self._remove_adapted_attentions(self._active_adapter)
|
88 |
+
self._active_adapter = adapter_name
|
89 |
+
self.peft_config[adapter_name] = config
|
90 |
+
self._create_adapted_attentions(config, parents)
|
91 |
+
if not self._enabled:
|
92 |
+
self._remove_adapted_attentions(self._active_adapter)
|
93 |
+
|
94 |
+
if config.inference_mode:
|
95 |
+
_freeze_adapter(self.model, adapter_name)
|
96 |
+
|
97 |
+
def set_adapter(self, adapter_name: str) -> None:
|
98 |
+
"""Set the model to use the adapter with the given name."""
|
99 |
+
if self._active_adapter == adapter_name:
|
100 |
+
return
|
101 |
+
if adapter_name not in self.peft_config:
|
102 |
+
raise ValueError(f"Adapter with name '{adapter_name}' does not exist.")
|
103 |
+
|
104 |
+
if self._enabled:
|
105 |
+
self._remove_adapted_attentions(self._active_adapter)
|
106 |
+
self._set_adapted_attentions(adapter_name)
|
107 |
+
|
108 |
+
self._active_adapter = adapter_name
|
109 |
+
|
110 |
+
def enable_adapter_layers(self):
|
111 |
+
"""Enable adapter layers by swapping in cached AdaptedAttention modules."""
|
112 |
+
self._enabled = True
|
113 |
+
self._set_adapted_attentions(self._active_adapter)
|
114 |
+
|
115 |
+
def disable_adapter_layers(self):
|
116 |
+
"""Disable adapter layers by swapping out AdaptedAttention modules."""
|
117 |
+
self._enabled = False
|
118 |
+
self._remove_adapted_attentions(self._active_adapter)
|
119 |
+
|
120 |
+
def _create_adapted_attentions(self, config: AdaptionPromptConfig, parents: List[nn.Module]) -> None:
|
121 |
+
"""Wrap LlamaAttention modules with newly created AdaptedAttention modules."""
|
122 |
+
for par in parents:
|
123 |
+
attn = AdaptedAttention(
|
124 |
+
model_type=self.model.config.model_type,
|
125 |
+
adapter_len=config.adapter_len,
|
126 |
+
model=getattr(par, config.target_modules),
|
127 |
+
)
|
128 |
+
setattr(par, config.target_modules, attn)
|
129 |
+
|
130 |
+
def _set_adapted_attentions(self, adapter_name: str) -> None:
|
131 |
+
"""Replace LlamaAttention modules with cached AdaptedAttention modules."""
|
132 |
+
cached = self._cached_adapters[adapter_name]
|
133 |
+
del self._cached_adapters[adapter_name]
|
134 |
+
config = self.peft_config[adapter_name]
|
135 |
+
for i, par in enumerate(self._parents[adapter_name]):
|
136 |
+
setattr(par, config.target_modules, cached[i])
|
137 |
+
|
138 |
+
def _remove_adapted_attentions(self, adapter_name: str) -> None:
|
139 |
+
"""Remove AdaptedAttention modules from the model and store them in the cache."""
|
140 |
+
config = self.peft_config[adapter_name]
|
141 |
+
adapted_attentions = []
|
142 |
+
for par in self._parents[adapter_name]:
|
143 |
+
attn = getattr(par, config.target_modules)
|
144 |
+
adapted_attentions.append(attn)
|
145 |
+
setattr(par, config.target_modules, attn.model)
|
146 |
+
self._cached_adapters[adapter_name] = adapted_attentions
|
147 |
+
|
148 |
+
def _mark_only_adaption_prompts_as_trainable(self, model: nn.Module) -> None:
|
149 |
+
"""Freeze all parameters of the model except the adaption prompts."""
|
150 |
+
for n, p in model.named_parameters():
|
151 |
+
if not is_adaption_prompt_trainable(n):
|
152 |
+
p.requires_grad = False
|
153 |
+
|
154 |
+
def __getattr__(self, name: str):
|
155 |
+
"""Forward missing attributes to the wrapped module."""
|
156 |
+
try:
|
157 |
+
return super().__getattr__(name) # defer to nn.Module's logic
|
158 |
+
except AttributeError:
|
159 |
+
# This is necessary as e.g. causal models have various methods that we
|
160 |
+
# don't want to re-implement here.
|
161 |
+
return getattr(self.model, name)
|
llmeval-env/lib/python3.10/site-packages/peft/tuners/adaption_prompt/utils.py
ADDED
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023-present the HuggingFace Inc. team.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
import inspect
|
15 |
+
|
16 |
+
import torch
|
17 |
+
import torch.nn as nn
|
18 |
+
|
19 |
+
|
20 |
+
def llama_rotate_half(x: torch.Tensor) -> torch.Tensor:
|
21 |
+
"""
|
22 |
+
Rotate half the hidden dims of the input.
|
23 |
+
|
24 |
+
This function was duplicated verbatim from:
|
25 |
+
https://github.com/huggingface/transformers/blob/1de8ce9ee1191ba761a593ac15d9ccbf5851bfc5/src/transformers/models/llama/modeling_llama.py#L126
|
26 |
+
|
27 |
+
This was done to eliminate the Llama transformers implementation as a dependency of this file. Note that some other
|
28 |
+
functions were also adapted from the transformers implementation but were modified.
|
29 |
+
"""
|
30 |
+
x1 = x[..., : x.shape[-1] // 2]
|
31 |
+
x2 = x[..., x.shape[-1] // 2 :]
|
32 |
+
return torch.cat((-x2, x1), dim=-1)
|
33 |
+
|
34 |
+
|
35 |
+
def llama_apply_rotary_pos_emb(q, cos, sin, position_ids):
|
36 |
+
"""
|
37 |
+
Apply rotary position embedding to query states in the Llama model.
|
38 |
+
|
39 |
+
This function was adapted from:
|
40 |
+
https://github.com/huggingface/transformers/blob/1de8ce9ee1191ba761a593ac15d9ccbf5851bfc5/src/transformers/models/llama/modeling_llama.py#L133
|
41 |
+
|
42 |
+
It was modified to remove unnecessary processing of key states. The method is compatible with transformers <=
|
43 |
+
4.34.2 and also with the latest version (>=4.35).
|
44 |
+
"""
|
45 |
+
# In previous transformers version cos/sin cached had a shape of 4D
|
46 |
+
if len(cos.shape) == 4:
|
47 |
+
gather_indices = position_ids[:, None, :, None] # [bs, 1, seq_len, 1]
|
48 |
+
gather_indices = gather_indices.repeat(1, cos.shape[1], 1, cos.shape[3])
|
49 |
+
cos = torch.gather(cos.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices)
|
50 |
+
sin = torch.gather(sin.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices)
|
51 |
+
# In the new version, it is 2D so we fall back to the new implementation
|
52 |
+
# https://github.com/huggingface/transformers/blame/eef7ea98c31a333bacdc7ae7a2372bde772be8e4/src/transformers/models/llama/modeling_llama.py#L222-L226
|
53 |
+
else:
|
54 |
+
cos = cos[position_ids].unsqueeze(1)
|
55 |
+
sin = sin[position_ids].unsqueeze(1)
|
56 |
+
q_embed = (q * cos) + (llama_rotate_half(q) * sin)
|
57 |
+
return q_embed
|
58 |
+
|
59 |
+
|
60 |
+
def llama_compute_query_states(model: nn.Module, **kwargs) -> torch.Tensor:
|
61 |
+
"""
|
62 |
+
Compute query states for Llama models specifically. They need to be recomputed as the forward() method of the
|
63 |
+
original LlamaModel in the transformers library does not return them. See the related discussion in the PR:
|
64 |
+
https://github.com/huggingface/peft/pull/268
|
65 |
+
"""
|
66 |
+
hidden_states = kwargs.get("hidden_states")
|
67 |
+
position_ids = kwargs.get("position_ids")
|
68 |
+
past_key_value = kwargs.get("past_key_value")
|
69 |
+
bsz, q_len, _ = hidden_states.size()
|
70 |
+
query_states = model.q_proj(hidden_states).view(bsz, q_len, model.num_heads, model.head_dim).transpose(1, 2)
|
71 |
+
|
72 |
+
factor = model.k_proj.in_features // model.k_proj.out_features
|
73 |
+
value_states = (
|
74 |
+
model.v_proj(hidden_states).view(bsz, q_len, (model.num_heads // factor), model.head_dim).transpose(1, 2)
|
75 |
+
)
|
76 |
+
|
77 |
+
seq_len = q_len
|
78 |
+
|
79 |
+
if past_key_value is not None:
|
80 |
+
if isinstance(past_key_value, tuple):
|
81 |
+
# for transformers <= 4.35
|
82 |
+
seq_len += past_key_value[0].shape[-2]
|
83 |
+
else:
|
84 |
+
# since transformers 4.36, this is a DynamicCache instance
|
85 |
+
seq_len += past_key_value.get_seq_length(model.layer_idx)
|
86 |
+
|
87 |
+
# For transformers > 4.37.2 `position_ids` became a required arguments in the rotary embedding's forward pass.
|
88 |
+
if "position_ids" not in inspect.signature(model.rotary_emb.forward).parameters:
|
89 |
+
# TODO we assume that position_ids is not None here, not sure if that is safe but the old code also did that
|
90 |
+
cos, sin = model.rotary_emb(value_states, seq_len=seq_len)
|
91 |
+
return llama_apply_rotary_pos_emb(query_states, cos, sin, position_ids)
|
92 |
+
|
93 |
+
past_seen_tokens = 0
|
94 |
+
if position_ids is None:
|
95 |
+
# Compute position_ids, since they are required for transformers > 4.37.2
|
96 |
+
if past_key_value is None:
|
97 |
+
new_cache_positions = torch.arange(q_len, q_len + q_len, device=value_states.device)
|
98 |
+
else:
|
99 |
+
past_seen_tokens = past_key_value.get_usable_length(q_len, model.layer_idx)
|
100 |
+
new_cache_positions = torch.arange(past_seen_tokens, past_seen_tokens + q_len, device=value_states.device)
|
101 |
+
position_ids = new_cache_positions.unsqueeze(0)
|
102 |
+
|
103 |
+
rotary_emb_kwargs = {"position_ids": position_ids}
|
104 |
+
# The `seq_len` argument has been officially removed in transformers >= 4.39.0
|
105 |
+
if "seq_len" in inspect.signature(model.rotary_emb.forward).parameters:
|
106 |
+
rotary_emb_kwargs["seq_len"] = q_len + past_seen_tokens
|
107 |
+
|
108 |
+
cos, sin = model.rotary_emb(value_states, **rotary_emb_kwargs)
|
109 |
+
|
110 |
+
# For batched inference unsqueeze it on the correct dim
|
111 |
+
# since: https://github.com/huggingface/transformers/pull/29109
|
112 |
+
if len(cos.shape) == 3:
|
113 |
+
cos = cos.unsqueeze(1)
|
114 |
+
sin = sin.unsqueeze(1)
|
115 |
+
|
116 |
+
return (query_states * cos) + (llama_rotate_half(query_states) * sin)
|
117 |
+
|
118 |
+
|
119 |
+
def is_adaption_prompt_trainable(params: str) -> bool:
|
120 |
+
"""Return True if module is trainable under adaption prompt fine-tuning."""
|
121 |
+
return params.split(".")[-1].startswith("adaption_")
|
llmeval-env/lib/python3.10/site-packages/peft/tuners/lokr/__init__.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023-present the HuggingFace Inc. team.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
from .config import LoKrConfig
|
16 |
+
from .layer import Conv2d, Linear, LoKrLayer
|
17 |
+
from .model import LoKrModel
|
18 |
+
|
19 |
+
|
20 |
+
__all__ = ["LoKrConfig", "LoKrModel", "Conv2d", "Linear", "LoKrLayer"]
|
llmeval-env/lib/python3.10/site-packages/peft/tuners/lokr/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (388 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/peft/tuners/lokr/__pycache__/config.cpython-310.pyc
ADDED
Binary file (5.65 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/peft/tuners/lokr/__pycache__/layer.cpython-310.pyc
ADDED
Binary file (10.8 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/peft/tuners/lokr/__pycache__/model.cpython-310.pyc
ADDED
Binary file (3.96 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/peft/tuners/lokr/config.py
ADDED
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023-present the HuggingFace Inc. team.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
from dataclasses import dataclass, field
|
16 |
+
from typing import List, Optional, Union
|
17 |
+
|
18 |
+
from peft.tuners.lycoris_utils import LycorisConfig
|
19 |
+
from peft.utils import PeftType
|
20 |
+
|
21 |
+
|
22 |
+
@dataclass
|
23 |
+
class LoKrConfig(LycorisConfig):
|
24 |
+
"""
|
25 |
+
Configuration class of [`LoKrModel`].
|
26 |
+
|
27 |
+
Args:
|
28 |
+
r (`int`):
|
29 |
+
LoKr rank.
|
30 |
+
alpha (`int`):
|
31 |
+
The alpha parameter for LoKr scaling.
|
32 |
+
rank_dropout (`float`):
|
33 |
+
The dropout probability for rank dimension during training.
|
34 |
+
module_dropout (`float`):
|
35 |
+
The dropout probability for disabling LoKr modules during training.
|
36 |
+
use_effective_conv2d (`bool`):
|
37 |
+
Use parameter effective decomposition for Conv2d with ksize > 1 ("Proposition 3" from FedPara paper).
|
38 |
+
decompose_both (`bool`):
|
39 |
+
Perform rank decomposition of left kronecker product matrix.
|
40 |
+
decompose_factor (`int`):
|
41 |
+
Kronecker product decomposition factor.
|
42 |
+
target_modules (`Optional[Union[List[str], str]]`):
|
43 |
+
The names of the modules to apply the adapter to. If this is specified, only the modules with the specified
|
44 |
+
names will be replaced. When passing a string, a regex match will be performed. When passing a list of
|
45 |
+
strings, either an exact match will be performed or it is checked if the name of the module ends with any
|
46 |
+
of the passed strings. If this is specified as 'all-linear', then all linear/Conv1D modules are chosen,
|
47 |
+
excluding the output layer. If this is not specified, modules will be chosen according to the model
|
48 |
+
architecture. If the architecture is not known, an error will be raised -- in this case, you should specify
|
49 |
+
the target modules manually.
|
50 |
+
init_weights (`bool`):
|
51 |
+
Whether to perform initialization of adapter weights. This defaults to `True`, passing `False` is
|
52 |
+
discouraged.
|
53 |
+
layers_to_transform (`Union[List[int], int]`):
|
54 |
+
The layer indices to transform. If a list of ints is passed, it will apply the adapter to the layer indices
|
55 |
+
that are specified in this list. If a single integer is passed, it will apply the transformations on the
|
56 |
+
layer at this index.
|
57 |
+
layers_pattern (`str`):
|
58 |
+
The layer pattern name, used only if `layers_to_transform` is different from `None`.
|
59 |
+
rank_pattern (`dict`):
|
60 |
+
The mapping from layer names or regexp expression to ranks which are different from the default rank
|
61 |
+
specified by `r`.
|
62 |
+
alpha_pattern (`dict`):
|
63 |
+
The mapping from layer names or regexp expression to alphas which are different from the default alpha
|
64 |
+
specified by `alpha`.
|
65 |
+
modules_to_save (`Optional[List[str]]`):
|
66 |
+
List of modules apart from adapter layers to be set as trainable and saved in the final checkpoint.
|
67 |
+
"""
|
68 |
+
|
69 |
+
r: int = field(default=8, metadata={"help": "LoKr rank"})
|
70 |
+
alpha: int = field(default=8, metadata={"help": "LoKr alpha"})
|
71 |
+
rank_dropout: float = field(
|
72 |
+
default=0.0, metadata={"help": "The dropout probability for rank dimension during training"}
|
73 |
+
)
|
74 |
+
module_dropout: float = field(
|
75 |
+
default=0.0, metadata={"help": "The dropout probability for disabling LoKr modules during training"}
|
76 |
+
)
|
77 |
+
use_effective_conv2d: bool = field(
|
78 |
+
default=False,
|
79 |
+
metadata={
|
80 |
+
"help": 'Use parameter effective decomposition for Conv2d 3x3 with ksize > 1 ("Proposition 3" from FedPara paper)'
|
81 |
+
},
|
82 |
+
)
|
83 |
+
decompose_both: bool = field(
|
84 |
+
default=False,
|
85 |
+
metadata={"help": "Perform rank decomposition of left kronecker product matrix."},
|
86 |
+
)
|
87 |
+
decompose_factor: int = field(default=-1, metadata={"help": "Kronecker product decomposition factor."})
|
88 |
+
target_modules: Optional[Union[List[str], str]] = field(
|
89 |
+
default=None,
|
90 |
+
metadata={
|
91 |
+
"help": "List of module names or regex expression of the module names to replace with LoKr."
|
92 |
+
"For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' "
|
93 |
+
"This can also be a wildcard 'all-linear' which matches all linear/Conv1D layers except the output layer."
|
94 |
+
},
|
95 |
+
)
|
96 |
+
init_weights: bool = field(
|
97 |
+
default=True,
|
98 |
+
metadata={
|
99 |
+
"help": (
|
100 |
+
"Whether to initialize the weights of the LoKr layers with their default initialization. Don't change "
|
101 |
+
"this setting, except if you know exactly what you're doing."
|
102 |
+
),
|
103 |
+
},
|
104 |
+
)
|
105 |
+
layers_to_transform: Optional[Union[List[int], int]] = field(
|
106 |
+
default=None,
|
107 |
+
metadata={
|
108 |
+
"help": "The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index."
|
109 |
+
},
|
110 |
+
)
|
111 |
+
layers_pattern: Optional[str] = field(
|
112 |
+
default=None,
|
113 |
+
metadata={
|
114 |
+
"help": "The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern."
|
115 |
+
},
|
116 |
+
)
|
117 |
+
modules_to_save: Optional[List[str]] = field(
|
118 |
+
default=None,
|
119 |
+
metadata={
|
120 |
+
"help": "List of modules apart from LoKr layers to be set as trainable and saved in the final checkpoint. "
|
121 |
+
"For example, in Sequence Classification or Token Classification tasks, "
|
122 |
+
"the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved."
|
123 |
+
},
|
124 |
+
)
|
125 |
+
|
126 |
+
def __post_init__(self):
|
127 |
+
self.peft_type = PeftType.LOKR
|
llmeval-env/lib/python3.10/site-packages/peft/tuners/lokr/layer.py
ADDED
@@ -0,0 +1,409 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023-present the HuggingFace Inc. team.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
import math
|
16 |
+
from typing import Any, Optional, Set, Tuple, Union
|
17 |
+
|
18 |
+
import torch
|
19 |
+
import torch.nn as nn
|
20 |
+
import torch.nn.functional as F
|
21 |
+
|
22 |
+
from peft.tuners.lycoris_utils import LycorisLayer
|
23 |
+
|
24 |
+
|
25 |
+
class LoKrLayer(nn.Module, LycorisLayer):
|
26 |
+
# All names of layers that may contain adapter weights
|
27 |
+
adapter_layer_names = (
|
28 |
+
"lokr_w1",
|
29 |
+
"lokr_w1_a",
|
30 |
+
"lokr_w1_b",
|
31 |
+
"lokr_w2",
|
32 |
+
"lokr_w2_a",
|
33 |
+
"lokr_w2_b",
|
34 |
+
"lokr_t2",
|
35 |
+
)
|
36 |
+
# other_param_names is defined on parent class
|
37 |
+
|
38 |
+
def __init__(self, base_layer: nn.Module) -> None:
|
39 |
+
super().__init__()
|
40 |
+
LycorisLayer.__init__(self, base_layer)
|
41 |
+
|
42 |
+
# LoKr info
|
43 |
+
self.lokr_w1 = nn.ParameterDict({})
|
44 |
+
self.lokr_w1_a = nn.ParameterDict({})
|
45 |
+
self.lokr_w1_b = nn.ParameterDict({})
|
46 |
+
self.lokr_w2 = nn.ParameterDict({})
|
47 |
+
self.lokr_w2_a = nn.ParameterDict({})
|
48 |
+
self.lokr_w2_b = nn.ParameterDict({})
|
49 |
+
self.lokr_t2 = nn.ParameterDict({})
|
50 |
+
|
51 |
+
@property
|
52 |
+
def _available_adapters(self) -> Set[str]:
|
53 |
+
return {
|
54 |
+
*self.lokr_w1,
|
55 |
+
*self.lokr_w1_a,
|
56 |
+
*self.lokr_w1_b,
|
57 |
+
*self.lokr_w2,
|
58 |
+
*self.lokr_w2_a,
|
59 |
+
*self.lokr_w2_b,
|
60 |
+
*self.lokr_t2,
|
61 |
+
}
|
62 |
+
|
63 |
+
def create_adapter_parameters(
|
64 |
+
self,
|
65 |
+
adapter_name: str,
|
66 |
+
r: int,
|
67 |
+
shape,
|
68 |
+
use_w1: bool,
|
69 |
+
use_w2: bool,
|
70 |
+
use_effective_conv2d: bool,
|
71 |
+
):
|
72 |
+
if use_w1:
|
73 |
+
self.lokr_w1[adapter_name] = nn.Parameter(torch.empty(shape[0][0], shape[1][0]))
|
74 |
+
else:
|
75 |
+
self.lokr_w1_a[adapter_name] = nn.Parameter(torch.empty(shape[0][0], r))
|
76 |
+
self.lokr_w1_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1][0]))
|
77 |
+
|
78 |
+
if len(shape) == 4:
|
79 |
+
# Conv2d
|
80 |
+
if use_w2:
|
81 |
+
self.lokr_w2[adapter_name] = nn.Parameter(torch.empty(shape[0][1], shape[1][1], *shape[2:]))
|
82 |
+
elif use_effective_conv2d:
|
83 |
+
self.lokr_t2[adapter_name] = nn.Parameter(torch.empty(r, r, shape[2], shape[3]))
|
84 |
+
self.lokr_w2_a[adapter_name] = nn.Parameter(torch.empty(r, shape[0][1])) # b, 1-mode
|
85 |
+
self.lokr_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1][1])) # d, 2-mode
|
86 |
+
else:
|
87 |
+
self.lokr_w2_a[adapter_name] = nn.Parameter(torch.empty(shape[0][1], r))
|
88 |
+
self.lokr_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1][1] * shape[2] * shape[3]))
|
89 |
+
else:
|
90 |
+
# Linear
|
91 |
+
if use_w2:
|
92 |
+
self.lokr_w2[adapter_name] = nn.Parameter(torch.empty(shape[0][1], shape[1][1]))
|
93 |
+
else:
|
94 |
+
self.lokr_w2_a[adapter_name] = nn.Parameter(torch.empty(shape[0][1], r))
|
95 |
+
self.lokr_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1][1]))
|
96 |
+
|
97 |
+
def reset_adapter_parameters(self, adapter_name: str):
|
98 |
+
if adapter_name in self.lokr_w1:
|
99 |
+
nn.init.zeros_(self.lokr_w1[adapter_name])
|
100 |
+
else:
|
101 |
+
nn.init.zeros_(self.lokr_w1_a[adapter_name])
|
102 |
+
nn.init.kaiming_uniform_(self.lokr_w1_b[adapter_name], a=math.sqrt(5))
|
103 |
+
|
104 |
+
if adapter_name in self.lokr_w2:
|
105 |
+
nn.init.kaiming_uniform_(self.lokr_w2[adapter_name], a=math.sqrt(5))
|
106 |
+
else:
|
107 |
+
nn.init.kaiming_uniform_(self.lokr_w2_a[adapter_name], a=math.sqrt(5))
|
108 |
+
nn.init.kaiming_uniform_(self.lokr_w2_b[adapter_name], a=math.sqrt(5))
|
109 |
+
|
110 |
+
if adapter_name in self.lokr_t2:
|
111 |
+
nn.init.kaiming_uniform_(self.lokr_t2[adapter_name], a=math.sqrt(5))
|
112 |
+
|
113 |
+
def reset_adapter_parameters_random(self, adapter_name: str):
|
114 |
+
if adapter_name in self.lokr_w1:
|
115 |
+
nn.init.kaiming_uniform_(self.lokr_w1[adapter_name], a=math.sqrt(5))
|
116 |
+
else:
|
117 |
+
nn.init.kaiming_uniform_(self.lokr_w1_a[adapter_name], a=math.sqrt(5))
|
118 |
+
nn.init.kaiming_uniform_(self.lokr_w1_b[adapter_name], a=math.sqrt(5))
|
119 |
+
|
120 |
+
if adapter_name in self.lokr_w2:
|
121 |
+
nn.init.kaiming_uniform_(self.lokr_w2[adapter_name], a=math.sqrt(5))
|
122 |
+
else:
|
123 |
+
nn.init.kaiming_uniform_(self.lokr_w2_a[adapter_name], a=math.sqrt(5))
|
124 |
+
nn.init.kaiming_uniform_(self.lokr_w2_b[adapter_name], a=math.sqrt(5))
|
125 |
+
|
126 |
+
if adapter_name in self.lokr_t2:
|
127 |
+
nn.init.kaiming_uniform_(self.lokr_t2[adapter_name], a=math.sqrt(5))
|
128 |
+
|
129 |
+
def update_layer(
|
130 |
+
self,
|
131 |
+
adapter_name: str,
|
132 |
+
r: int,
|
133 |
+
alpha: float,
|
134 |
+
rank_dropout: float,
|
135 |
+
module_dropout: float,
|
136 |
+
init_weights: bool,
|
137 |
+
use_effective_conv2d: bool,
|
138 |
+
decompose_both: bool,
|
139 |
+
decompose_factor: int,
|
140 |
+
**kwargs,
|
141 |
+
) -> None:
|
142 |
+
"""Internal function to create lokr adapter
|
143 |
+
|
144 |
+
Args:
|
145 |
+
adapter_name (`str`): Name for the adapter to add.
|
146 |
+
r (`int`): Rank for the added adapter.
|
147 |
+
alpha (`float`): Alpha for the added adapter.
|
148 |
+
rank_dropout (`float`): The dropout probability for rank dimension during training
|
149 |
+
module_dropout (`float`): The dropout probability for disabling adapter during training.
|
150 |
+
init_weights (`bool`): Whether to initialize adapter weights.
|
151 |
+
use_effective_conv2d (`bool`): Use parameter effective decomposition for Conv2d with ksize > 1.
|
152 |
+
decompose_both (`bool`): Perform rank decomposition of left kronecker product matrix.
|
153 |
+
decompose_factor (`int`): Kronecker product decomposition factor.
|
154 |
+
"""
|
155 |
+
if r <= 0:
|
156 |
+
raise ValueError(f"`r` should be a positive integer value but the value passed is {r}")
|
157 |
+
|
158 |
+
self.r[adapter_name] = r
|
159 |
+
self.alpha[adapter_name] = alpha
|
160 |
+
self.scaling[adapter_name] = alpha / r
|
161 |
+
self.rank_dropout[adapter_name] = rank_dropout
|
162 |
+
self.module_dropout[adapter_name] = module_dropout
|
163 |
+
base_layer = self.get_base_layer()
|
164 |
+
|
165 |
+
# Determine shape of LoKr weights
|
166 |
+
if isinstance(base_layer, nn.Linear):
|
167 |
+
in_dim, out_dim = base_layer.in_features, base_layer.out_features
|
168 |
+
|
169 |
+
in_m, in_n = factorization(in_dim, decompose_factor)
|
170 |
+
out_l, out_k = factorization(out_dim, decompose_factor)
|
171 |
+
shape = ((out_l, out_k), (in_m, in_n)) # ((a, b), (c, d)), out_dim = a*c, in_dim = b*d
|
172 |
+
|
173 |
+
use_w1 = not (decompose_both and r < max(shape[0][0], shape[1][0]) / 2)
|
174 |
+
use_w2 = not (r < max(shape[0][1], shape[1][1]) / 2)
|
175 |
+
use_effective_conv2d = False
|
176 |
+
elif isinstance(base_layer, nn.Conv2d):
|
177 |
+
in_dim, out_dim = base_layer.in_channels, base_layer.out_channels
|
178 |
+
k_size = base_layer.kernel_size
|
179 |
+
|
180 |
+
in_m, in_n = factorization(in_dim, decompose_factor)
|
181 |
+
out_l, out_k = factorization(out_dim, decompose_factor)
|
182 |
+
shape = ((out_l, out_k), (in_m, in_n), *k_size) # ((a, b), (c, d), *k_size)
|
183 |
+
|
184 |
+
use_w1 = not (decompose_both and r < max(shape[0][0], shape[1][0]) / 2)
|
185 |
+
use_w2 = r >= max(shape[0][1], shape[1][1]) / 2
|
186 |
+
use_effective_conv2d = use_effective_conv2d and base_layer.kernel_size != (1, 1)
|
187 |
+
else:
|
188 |
+
raise TypeError(f"LoKr is not implemented for base layers of type {type(base_layer).__name__}")
|
189 |
+
|
190 |
+
# Create weights with provided shape
|
191 |
+
self.create_adapter_parameters(adapter_name, r, shape, use_w1, use_w2, use_effective_conv2d)
|
192 |
+
|
193 |
+
# Initialize weights
|
194 |
+
if init_weights:
|
195 |
+
self.reset_adapter_parameters(adapter_name)
|
196 |
+
else:
|
197 |
+
self.reset_adapter_parameters_random(adapter_name)
|
198 |
+
|
199 |
+
# Move new weights to device
|
200 |
+
weight = getattr(self.get_base_layer(), "weight", None)
|
201 |
+
if weight is not None:
|
202 |
+
# the layer is already completely initialized, this is an update
|
203 |
+
if weight.dtype.is_floating_point or weight.dtype.is_complex:
|
204 |
+
self.to(weight.device, dtype=weight.dtype)
|
205 |
+
else:
|
206 |
+
self.to(weight.device)
|
207 |
+
self.set_adapter(self.active_adapters)
|
208 |
+
|
209 |
+
def get_delta_weight(self, adapter_name: str) -> torch.Tensor:
|
210 |
+
# https://github.com/KohakuBlueleaf/LyCORIS/blob/e4259b870d3354a9615a96be61cb5d07455c58ea/lycoris/modules/lokr.py#L224
|
211 |
+
if adapter_name in self.lokr_w1:
|
212 |
+
w1 = self.lokr_w1[adapter_name]
|
213 |
+
else:
|
214 |
+
w1 = self.lokr_w1_a[adapter_name] @ self.lokr_w1_b[adapter_name]
|
215 |
+
|
216 |
+
if adapter_name in self.lokr_w2:
|
217 |
+
w2 = self.lokr_w2[adapter_name]
|
218 |
+
elif adapter_name in self.lokr_t2:
|
219 |
+
w2 = make_weight_cp(self.lokr_t2[adapter_name], self.lokr_w2_a[adapter_name], self.lokr_w2_b[adapter_name])
|
220 |
+
else:
|
221 |
+
w2 = self.lokr_w2_a[adapter_name] @ self.lokr_w2_b[adapter_name]
|
222 |
+
|
223 |
+
# Make weights with Kronecker product
|
224 |
+
weight = make_kron(w1, w2)
|
225 |
+
weight = weight.reshape(self.get_base_layer().weight.shape)
|
226 |
+
|
227 |
+
# Perform rank dropout during training - drop rows of addition weights
|
228 |
+
rank_dropout = self.rank_dropout[adapter_name]
|
229 |
+
if self.training and rank_dropout:
|
230 |
+
drop = (torch.rand(weight.size(0)) > rank_dropout).float()
|
231 |
+
drop = drop.view(-1, *[1] * len(weight.shape[1:])).to(weight.device)
|
232 |
+
drop /= drop.mean()
|
233 |
+
weight *= drop
|
234 |
+
|
235 |
+
return weight
|
236 |
+
|
237 |
+
def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
|
238 |
+
previous_dtype = x.dtype
|
239 |
+
|
240 |
+
if self.disable_adapters:
|
241 |
+
if self.merged:
|
242 |
+
self.unmerge()
|
243 |
+
result = self.base_layer(x, *args, **kwargs)
|
244 |
+
elif self.merged:
|
245 |
+
result = self.base_layer(x, *args, **kwargs)
|
246 |
+
else:
|
247 |
+
result = self.base_layer(x, *args, **kwargs)
|
248 |
+
|
249 |
+
# Execute all the adapters
|
250 |
+
for active_adapter in self.active_adapters:
|
251 |
+
if active_adapter not in self._available_adapters:
|
252 |
+
continue
|
253 |
+
|
254 |
+
module_dropout = self.module_dropout[active_adapter]
|
255 |
+
|
256 |
+
# Modify current execution weights
|
257 |
+
if (not self.training) or (self.training and torch.rand(1) > module_dropout):
|
258 |
+
result = result + self._get_delta_activations(active_adapter, x, *args, **kwargs)
|
259 |
+
|
260 |
+
result = result.to(previous_dtype)
|
261 |
+
return result
|
262 |
+
|
263 |
+
|
264 |
+
class Linear(LoKrLayer):
|
265 |
+
"""LoKr implemented in Linear layer"""
|
266 |
+
|
267 |
+
def __init__(
|
268 |
+
self,
|
269 |
+
base_layer: nn.Module,
|
270 |
+
device: Optional[Union[str, torch.device]] = None,
|
271 |
+
dtype: Optional[torch.dtype] = None,
|
272 |
+
adapter_name: str = "default",
|
273 |
+
r: int = 0,
|
274 |
+
alpha: float = 0.0,
|
275 |
+
rank_dropout: float = 0.0,
|
276 |
+
module_dropout: float = 0.0,
|
277 |
+
init_weights: bool = True,
|
278 |
+
**kwargs,
|
279 |
+
):
|
280 |
+
super().__init__(base_layer)
|
281 |
+
|
282 |
+
# Create adapter and set it active
|
283 |
+
self._active_adapter = adapter_name
|
284 |
+
self.update_layer(adapter_name, r, alpha, rank_dropout, module_dropout, init_weights, **kwargs)
|
285 |
+
|
286 |
+
def _get_delta_activations(
|
287 |
+
self, adapter_name: str, input: torch.Tensor, *args: Any, **kwargs: Any
|
288 |
+
) -> torch.Tensor:
|
289 |
+
delta_weight = self.get_delta_weight(adapter_name)
|
290 |
+
# don't add bias here, because the bias is already included in the output of the base_layer
|
291 |
+
return F.linear(input, delta_weight)
|
292 |
+
|
293 |
+
def __repr__(self) -> str:
|
294 |
+
rep = super().__repr__()
|
295 |
+
return "lokr." + rep
|
296 |
+
|
297 |
+
|
298 |
+
class Conv2d(LoKrLayer):
|
299 |
+
"""LoKr implemented in Conv2d layer"""
|
300 |
+
|
301 |
+
def __init__(
|
302 |
+
self,
|
303 |
+
base_layer: nn.Module,
|
304 |
+
device: Optional[Union[str, torch.device]] = None,
|
305 |
+
dtype: Optional[torch.dtype] = None,
|
306 |
+
adapter_name: str = "default",
|
307 |
+
r: int = 0,
|
308 |
+
alpha: float = 0.0,
|
309 |
+
rank_dropout: float = 0.0,
|
310 |
+
module_dropout: float = 0.0,
|
311 |
+
use_effective_conv2d: bool = False,
|
312 |
+
init_weights: bool = True,
|
313 |
+
**kwargs,
|
314 |
+
):
|
315 |
+
super().__init__(base_layer)
|
316 |
+
|
317 |
+
# Create adapter and set it active
|
318 |
+
self._active_adapter = adapter_name
|
319 |
+
self.update_layer(
|
320 |
+
adapter_name, r, alpha, rank_dropout, module_dropout, init_weights, use_effective_conv2d, **kwargs
|
321 |
+
)
|
322 |
+
|
323 |
+
def _get_delta_activations(
|
324 |
+
self, adapter_name: str, input: torch.Tensor, *args: Any, **kwargs: Any
|
325 |
+
) -> torch.Tensor:
|
326 |
+
delta_weight = self.get_delta_weight(adapter_name)
|
327 |
+
# don't add bias here, because the bias is already included in the output of the base_layer
|
328 |
+
base_layer = self.get_base_layer()
|
329 |
+
return F.conv2d(
|
330 |
+
input,
|
331 |
+
delta_weight,
|
332 |
+
stride=base_layer.stride,
|
333 |
+
padding=base_layer.padding,
|
334 |
+
dilation=base_layer.dilation,
|
335 |
+
groups=base_layer.groups,
|
336 |
+
)
|
337 |
+
|
338 |
+
def __repr__(self) -> str:
|
339 |
+
rep = super().__repr__()
|
340 |
+
return "lokr." + rep
|
341 |
+
|
342 |
+
|
343 |
+
# Below code is a direct copy from https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/lokr.py#L11
|
344 |
+
|
345 |
+
|
346 |
+
def factorization(dimension: int, factor: int = -1) -> Tuple[int, int]:
|
347 |
+
"""Factorizes the provided number into the product of two numbers
|
348 |
+
|
349 |
+
Args:
|
350 |
+
dimension (`int`): The number that needs to be factorized.
|
351 |
+
factor (`int`, optional):
|
352 |
+
Factorization divider. The algorithm will try to output two numbers, one of each will be as close to the
|
353 |
+
factor as possible. If -1 is provided, the decomposition algorithm would try to search dividers near the
|
354 |
+
square root of the dimension. Defaults to -1.
|
355 |
+
|
356 |
+
Returns:
|
357 |
+
Tuple[`int`, `int`]: A tuple of two numbers, whose product is equal to the provided number. The first number is
|
358 |
+
always less than or equal to the second.
|
359 |
+
|
360 |
+
Example:
|
361 |
+
```py
|
362 |
+
>>> factorization(256, factor=-1)
|
363 |
+
(16, 16)
|
364 |
+
|
365 |
+
>>> factorization(128, factor=-1)
|
366 |
+
(8, 16)
|
367 |
+
|
368 |
+
>>> factorization(127, factor=-1)
|
369 |
+
(1, 127)
|
370 |
+
|
371 |
+
>>> factorization(128, factor=4)
|
372 |
+
(4, 32)
|
373 |
+
```
|
374 |
+
"""
|
375 |
+
|
376 |
+
if factor > 0 and (dimension % factor) == 0:
|
377 |
+
m = factor
|
378 |
+
n = dimension // factor
|
379 |
+
return m, n
|
380 |
+
if factor == -1:
|
381 |
+
factor = dimension
|
382 |
+
m, n = 1, dimension
|
383 |
+
length = m + n
|
384 |
+
while m < n:
|
385 |
+
new_m = m + 1
|
386 |
+
while dimension % new_m != 0:
|
387 |
+
new_m += 1
|
388 |
+
new_n = dimension // new_m
|
389 |
+
if new_m + new_n > length or new_m > factor:
|
390 |
+
break
|
391 |
+
else:
|
392 |
+
m, n = new_m, new_n
|
393 |
+
if m > n:
|
394 |
+
n, m = m, n
|
395 |
+
return m, n
|
396 |
+
|
397 |
+
|
398 |
+
def make_weight_cp(t, wa, wb):
|
399 |
+
rebuild2 = torch.einsum("i j k l, i p, j r -> p r k l", t, wa, wb) # [c, d, k1, k2]
|
400 |
+
return rebuild2
|
401 |
+
|
402 |
+
|
403 |
+
def make_kron(w1, w2, scale=1.0):
|
404 |
+
if len(w2.shape) == 4:
|
405 |
+
w1 = w1.unsqueeze(2).unsqueeze(2)
|
406 |
+
w2 = w2.contiguous()
|
407 |
+
rebuild = torch.kron(w1, w2)
|
408 |
+
|
409 |
+
return rebuild * scale
|
llmeval-env/lib/python3.10/site-packages/peft/tuners/lokr/model.py
ADDED
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023-present the HuggingFace Inc. team.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
import re
|
16 |
+
from itertools import chain
|
17 |
+
from typing import Dict, Type, Union
|
18 |
+
|
19 |
+
import torch
|
20 |
+
from torch import nn
|
21 |
+
|
22 |
+
from peft.tuners.lycoris_utils import LycorisConfig, LycorisTuner
|
23 |
+
|
24 |
+
from .layer import Conv2d, Linear, LoKrLayer
|
25 |
+
|
26 |
+
|
27 |
+
class LoKrModel(LycorisTuner):
|
28 |
+
"""
|
29 |
+
Creates Low-Rank Kronecker Product model from a pretrained model. The original method is partially described in
|
30 |
+
https://arxiv.org/abs/2108.06098 and in https://arxiv.org/abs/2309.14859 Current implementation heavily borrows
|
31 |
+
from
|
32 |
+
https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/lokr.py
|
33 |
+
|
34 |
+
Args:
|
35 |
+
model (`torch.nn.Module`): The model to which the adapter tuner layers will be attached.
|
36 |
+
config ([`LoKrConfig`]): The configuration of the LoKr model.
|
37 |
+
adapter_name (`str`): The name of the adapter, defaults to `"default"`.
|
38 |
+
|
39 |
+
Returns:
|
40 |
+
`torch.nn.Module`: The LoKr model.
|
41 |
+
|
42 |
+
Example:
|
43 |
+
```py
|
44 |
+
>>> from diffusers import StableDiffusionPipeline
|
45 |
+
>>> from peft import LoKrModel, LoKrConfig
|
46 |
+
|
47 |
+
>>> config_te = LoKrConfig(
|
48 |
+
... r=8,
|
49 |
+
... lora_alpha=32,
|
50 |
+
... target_modules=["k_proj", "q_proj", "v_proj", "out_proj", "fc1", "fc2"],
|
51 |
+
... rank_dropout=0.0,
|
52 |
+
... module_dropout=0.0,
|
53 |
+
... init_weights=True,
|
54 |
+
... )
|
55 |
+
>>> config_unet = LoKrConfig(
|
56 |
+
... r=8,
|
57 |
+
... lora_alpha=32,
|
58 |
+
... target_modules=[
|
59 |
+
... "proj_in",
|
60 |
+
... "proj_out",
|
61 |
+
... "to_k",
|
62 |
+
... "to_q",
|
63 |
+
... "to_v",
|
64 |
+
... "to_out.0",
|
65 |
+
... "ff.net.0.proj",
|
66 |
+
... "ff.net.2",
|
67 |
+
... ],
|
68 |
+
... rank_dropout=0.0,
|
69 |
+
... module_dropout=0.0,
|
70 |
+
... init_weights=True,
|
71 |
+
... use_effective_conv2d=True,
|
72 |
+
... )
|
73 |
+
|
74 |
+
>>> model = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
|
75 |
+
>>> model.text_encoder = LoKrModel(model.text_encoder, config_te, "default")
|
76 |
+
>>> model.unet = LoKrModel(model.unet, config_unet, "default")
|
77 |
+
```
|
78 |
+
|
79 |
+
**Attributes**:
|
80 |
+
- **model** ([`~torch.nn.Module`]) -- The model to be adapted.
|
81 |
+
- **peft_config** ([`LoKrConfig`]): The configuration of the LoKr model.
|
82 |
+
"""
|
83 |
+
|
84 |
+
prefix: str = "lokr_"
|
85 |
+
layers_mapping: Dict[Type[torch.nn.Module], Type[LoKrLayer]] = {
|
86 |
+
torch.nn.Conv2d: Conv2d,
|
87 |
+
torch.nn.Linear: Linear,
|
88 |
+
}
|
89 |
+
|
90 |
+
def _create_and_replace(
|
91 |
+
self,
|
92 |
+
config: LycorisConfig,
|
93 |
+
adapter_name: str,
|
94 |
+
target: Union[LoKrLayer, nn.Module],
|
95 |
+
target_name: str,
|
96 |
+
parent: nn.Module,
|
97 |
+
current_key: str,
|
98 |
+
) -> None:
|
99 |
+
"""
|
100 |
+
A private method to create and replace the target module with the adapter module.
|
101 |
+
"""
|
102 |
+
|
103 |
+
# Regexp matching - Find key which matches current target_name in patterns provided
|
104 |
+
pattern_keys = list(chain(config.rank_pattern.keys(), config.alpha_pattern.keys()))
|
105 |
+
target_name_key = next(filter(lambda key: re.match(rf"(.*\.)?{key}$", current_key), pattern_keys), target_name)
|
106 |
+
|
107 |
+
kwargs = config.to_dict()
|
108 |
+
kwargs["r"] = config.rank_pattern.get(target_name_key, config.r)
|
109 |
+
kwargs["alpha"] = config.alpha_pattern.get(target_name_key, config.alpha)
|
110 |
+
|
111 |
+
if isinstance(target, LoKrLayer):
|
112 |
+
target.update_layer(adapter_name, **kwargs)
|
113 |
+
else:
|
114 |
+
new_module = self._create_new_module(config, adapter_name, target, **kwargs)
|
115 |
+
self._replace_module(parent, target_name, new_module, target)
|
llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__init__.py
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023-present the HuggingFace Inc. team.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
from peft.import_utils import is_bnb_4bit_available, is_bnb_available
|
16 |
+
|
17 |
+
from .config import LoftQConfig, LoraConfig
|
18 |
+
from .gptq import QuantLinear
|
19 |
+
from .layer import Conv2d, Embedding, Linear, LoraLayer
|
20 |
+
from .model import LoraModel
|
21 |
+
|
22 |
+
|
23 |
+
__all__ = ["LoraConfig", "LoftQConfig", "Conv2d", "Embedding", "LoraLayer", "Linear", "LoraModel", "QuantLinear"]
|
24 |
+
|
25 |
+
|
26 |
+
def __getattr__(name):
|
27 |
+
if (name == "Linear8bitLt") and is_bnb_available():
|
28 |
+
from .bnb import Linear8bitLt
|
29 |
+
|
30 |
+
return Linear8bitLt
|
31 |
+
|
32 |
+
if (name == "Linear4bit") and is_bnb_4bit_available():
|
33 |
+
from .bnb import Linear4bit
|
34 |
+
|
35 |
+
return Linear4bit
|
36 |
+
|
37 |
+
raise AttributeError(f"module {__name__} has no attribute {name}")
|
llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (909 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/aqlm.cpython-310.pyc
ADDED
Binary file (2.3 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/awq.cpython-310.pyc
ADDED
Binary file (2.71 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/bnb.cpython-310.pyc
ADDED
Binary file (11 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/gptq.cpython-310.pyc
ADDED
Binary file (2.51 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/layer.cpython-310.pyc
ADDED
Binary file (22.9 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/model.cpython-310.pyc
ADDED
Binary file (25.8 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/__pycache__/tp_layer.cpython-310.pyc
ADDED
Binary file (5.04 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/peft/tuners/lora/gptq.py
ADDED
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023-present the HuggingFace Inc. team.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
from typing import Any, Optional
|
16 |
+
|
17 |
+
import torch
|
18 |
+
|
19 |
+
from peft.tuners.lora.layer import LoraLayer
|
20 |
+
from peft.tuners.tuners_utils import BaseTunerLayer
|
21 |
+
from peft.utils import get_auto_gptq_quant_linear
|
22 |
+
|
23 |
+
|
24 |
+
class QuantLinear(torch.nn.Module, LoraLayer):
|
25 |
+
def __init__(
|
26 |
+
self,
|
27 |
+
base_layer,
|
28 |
+
adapter_name: str,
|
29 |
+
r: int = 0,
|
30 |
+
lora_alpha: int = 1,
|
31 |
+
lora_dropout: float = 0.0,
|
32 |
+
init_lora_weights: bool = True,
|
33 |
+
use_rslora: bool = False,
|
34 |
+
use_dora: bool = False,
|
35 |
+
**kwargs,
|
36 |
+
):
|
37 |
+
super().__init__()
|
38 |
+
LoraLayer.__init__(self, base_layer)
|
39 |
+
|
40 |
+
if use_dora:
|
41 |
+
raise ValueError(f"{self.__class__.__name__} does not support DoRA yet, please set it to False")
|
42 |
+
|
43 |
+
# self.base_layer and self.quant_linear_module are the same; we need the former for consistency and the latter
|
44 |
+
# for backwards compatibility
|
45 |
+
self.quant_linear_module = base_layer
|
46 |
+
self._active_adapter = adapter_name
|
47 |
+
self.update_layer(
|
48 |
+
adapter_name,
|
49 |
+
r,
|
50 |
+
lora_alpha=lora_alpha,
|
51 |
+
lora_dropout=lora_dropout,
|
52 |
+
init_lora_weights=init_lora_weights,
|
53 |
+
use_rslora=use_rslora,
|
54 |
+
use_dora=use_dora,
|
55 |
+
)
|
56 |
+
|
57 |
+
def forward(self, x: torch.Tensor):
|
58 |
+
# note: logic differs from default Linear because merging is not supported
|
59 |
+
result = self.quant_linear_module(x)
|
60 |
+
|
61 |
+
if self.disable_adapters:
|
62 |
+
return result
|
63 |
+
|
64 |
+
for active_adapter in self.active_adapters:
|
65 |
+
if active_adapter not in self.lora_A.keys():
|
66 |
+
continue
|
67 |
+
lora_A = self.lora_A[active_adapter]
|
68 |
+
lora_B = self.lora_B[active_adapter]
|
69 |
+
dropout = self.lora_dropout[active_adapter]
|
70 |
+
scaling = self.scaling[active_adapter]
|
71 |
+
|
72 |
+
requires_conversion = not torch.is_autocast_enabled()
|
73 |
+
if requires_conversion:
|
74 |
+
expected_dtype = result.dtype
|
75 |
+
x = x.to(lora_A.weight.dtype)
|
76 |
+
|
77 |
+
output = lora_B(lora_A(dropout(x)))
|
78 |
+
if requires_conversion:
|
79 |
+
output = output.to(expected_dtype)
|
80 |
+
output = output * scaling
|
81 |
+
result += output
|
82 |
+
return result
|
83 |
+
|
84 |
+
def __repr__(self) -> str:
|
85 |
+
rep = super().__repr__()
|
86 |
+
return "lora." + rep
|
87 |
+
|
88 |
+
# TODO: Check if it is better as suggested by users https://github.com/PanQiWei/AutoGPTQ/pull/102
|
89 |
+
# def reset_lora_parameters(self, adapter_name):
|
90 |
+
# if adapter_name in self.lora_A.keys():
|
91 |
+
# torch.nn.init.xavier_uniform_(self.lora_A[adapter_name].weight)
|
92 |
+
# torch.nn.init.zeros_(self.lora_B[adapter_name].weight)
|
93 |
+
|
94 |
+
|
95 |
+
def dispatch_gptq(
|
96 |
+
target: torch.nn.Module,
|
97 |
+
adapter_name: str,
|
98 |
+
**kwargs: Any,
|
99 |
+
) -> Optional[torch.nn.Module]:
|
100 |
+
new_module = None
|
101 |
+
|
102 |
+
if isinstance(target, BaseTunerLayer):
|
103 |
+
target_base_layer = target.get_base_layer()
|
104 |
+
else:
|
105 |
+
target_base_layer = target
|
106 |
+
|
107 |
+
gptq_quantization_config = kwargs.get("gptq_quantization_config", None)
|
108 |
+
AutoGPTQQuantLinear = get_auto_gptq_quant_linear(gptq_quantization_config)
|
109 |
+
|
110 |
+
if AutoGPTQQuantLinear is not None and isinstance(target_base_layer, AutoGPTQQuantLinear):
|
111 |
+
new_module = QuantLinear(target, adapter_name, **kwargs)
|
112 |
+
target.qweight = target_base_layer.qweight
|
113 |
+
|
114 |
+
return new_module
|
llmeval-env/lib/python3.10/site-packages/peft/tuners/p_tuning/__init__.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023-present the HuggingFace Inc. team.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
from .config import PromptEncoderConfig, PromptEncoderReparameterizationType
|
16 |
+
from .model import PromptEncoder
|
17 |
+
|
18 |
+
|
19 |
+
__all__ = ["PromptEncoder", "PromptEncoderConfig", "PromptEncoderReparameterizationType"]
|
llmeval-env/lib/python3.10/site-packages/peft/tuners/p_tuning/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (368 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/peft/tuners/p_tuning/__pycache__/config.cpython-310.pyc
ADDED
Binary file (1.98 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/peft/tuners/p_tuning/__pycache__/model.cpython-310.pyc
ADDED
Binary file (3.9 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/peft/tuners/p_tuning/config.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023-present the HuggingFace Inc. team.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
import enum
|
16 |
+
from dataclasses import dataclass, field
|
17 |
+
from typing import Union
|
18 |
+
|
19 |
+
from peft.config import PromptLearningConfig
|
20 |
+
from peft.utils import PeftType
|
21 |
+
|
22 |
+
|
23 |
+
class PromptEncoderReparameterizationType(str, enum.Enum):
|
24 |
+
MLP = "MLP"
|
25 |
+
LSTM = "LSTM"
|
26 |
+
|
27 |
+
|
28 |
+
@dataclass
|
29 |
+
class PromptEncoderConfig(PromptLearningConfig):
|
30 |
+
"""
|
31 |
+
This is the configuration class to store the configuration of a [`PromptEncoder`].
|
32 |
+
|
33 |
+
Args:
|
34 |
+
encoder_reparameterization_type (Union[[`PromptEncoderReparameterizationType`], `str`]):
|
35 |
+
The type of reparameterization to use.
|
36 |
+
encoder_hidden_size (`int`): The hidden size of the prompt encoder.
|
37 |
+
encoder_num_layers (`int`): The number of layers of the prompt encoder.
|
38 |
+
encoder_dropout (`float`): The dropout probability of the prompt encoder.
|
39 |
+
"""
|
40 |
+
|
41 |
+
encoder_reparameterization_type: Union[str, PromptEncoderReparameterizationType] = field(
|
42 |
+
default=PromptEncoderReparameterizationType.MLP,
|
43 |
+
metadata={"help": "How to reparameterize the prompt encoder"},
|
44 |
+
)
|
45 |
+
encoder_hidden_size: int = field(
|
46 |
+
default=None,
|
47 |
+
metadata={"help": "The hidden size of the prompt encoder"},
|
48 |
+
)
|
49 |
+
encoder_num_layers: int = field(
|
50 |
+
default=2,
|
51 |
+
metadata={"help": "The number of layers of the prompt encoder"},
|
52 |
+
)
|
53 |
+
encoder_dropout: float = field(
|
54 |
+
default=0.0,
|
55 |
+
metadata={"help": "The dropout of the prompt encoder"},
|
56 |
+
)
|
57 |
+
|
58 |
+
def __post_init__(self):
|
59 |
+
self.peft_type = PeftType.P_TUNING
|
llmeval-env/lib/python3.10/site-packages/peft/tuners/p_tuning/model.py
ADDED
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023-present the HuggingFace Inc. team.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
# Based on https://github.com/NVIDIA/NeMo/blob/main/nemo/collections/nlp/modules/common/prompt_encoder.py
|
16 |
+
# with some refactor
|
17 |
+
import warnings
|
18 |
+
|
19 |
+
import torch
|
20 |
+
|
21 |
+
from .config import PromptEncoderConfig, PromptEncoderReparameterizationType
|
22 |
+
|
23 |
+
|
24 |
+
class PromptEncoder(torch.nn.Module):
|
25 |
+
"""
|
26 |
+
The prompt encoder network that is used to generate the virtual token embeddings for p-tuning.
|
27 |
+
|
28 |
+
Args:
|
29 |
+
config ([`PromptEncoderConfig`]): The configuration of the prompt encoder.
|
30 |
+
|
31 |
+
Example:
|
32 |
+
|
33 |
+
```py
|
34 |
+
>>> from peft import PromptEncoder, PromptEncoderConfig
|
35 |
+
|
36 |
+
>>> config = PromptEncoderConfig(
|
37 |
+
... peft_type="P_TUNING",
|
38 |
+
... task_type="SEQ_2_SEQ_LM",
|
39 |
+
... num_virtual_tokens=20,
|
40 |
+
... token_dim=768,
|
41 |
+
... num_transformer_submodules=1,
|
42 |
+
... num_attention_heads=12,
|
43 |
+
... num_layers=12,
|
44 |
+
... encoder_reparameterization_type="MLP",
|
45 |
+
... encoder_hidden_size=768,
|
46 |
+
... )
|
47 |
+
|
48 |
+
>>> prompt_encoder = PromptEncoder(config)
|
49 |
+
```
|
50 |
+
|
51 |
+
**Attributes**:
|
52 |
+
- **embedding** (`torch.nn.Embedding`) -- The embedding layer of the prompt encoder.
|
53 |
+
- **mlp_head** (`torch.nn.Sequential`) -- The MLP head of the prompt encoder if `inference_mode=False`.
|
54 |
+
- **lstm_head** (`torch.nn.LSTM`) -- The LSTM head of the prompt encoder if `inference_mode=False` and
|
55 |
+
`encoder_reparameterization_type="LSTM"`.
|
56 |
+
- **token_dim** (`int`) -- The hidden embedding dimension of the base transformer model.
|
57 |
+
- **input_size** (`int`) -- The input size of the prompt encoder.
|
58 |
+
- **output_size** (`int`) -- The output size of the prompt encoder.
|
59 |
+
- **hidden_size** (`int`) -- The hidden size of the prompt encoder.
|
60 |
+
- **total_virtual_tokens** (`int`): The total number of virtual tokens of the
|
61 |
+
prompt encoder.
|
62 |
+
- **encoder_type** (Union[[`PromptEncoderReparameterizationType`], `str`]): The encoder type of the prompt
|
63 |
+
encoder.
|
64 |
+
|
65 |
+
|
66 |
+
Input shape: (`batch_size`, `total_virtual_tokens`)
|
67 |
+
|
68 |
+
Output shape: (`batch_size`, `total_virtual_tokens`, `token_dim`)
|
69 |
+
"""
|
70 |
+
|
71 |
+
def __init__(self, config):
|
72 |
+
super().__init__()
|
73 |
+
self.token_dim = config.token_dim
|
74 |
+
self.input_size = self.token_dim
|
75 |
+
self.output_size = self.token_dim
|
76 |
+
self.hidden_size = config.encoder_hidden_size
|
77 |
+
self.total_virtual_tokens = config.num_virtual_tokens * config.num_transformer_submodules
|
78 |
+
self.encoder_type = config.encoder_reparameterization_type
|
79 |
+
|
80 |
+
# embedding
|
81 |
+
self.embedding = torch.nn.Embedding(self.total_virtual_tokens, self.token_dim)
|
82 |
+
if not config.inference_mode:
|
83 |
+
if self.encoder_type == PromptEncoderReparameterizationType.LSTM:
|
84 |
+
lstm_dropout = config.encoder_dropout
|
85 |
+
num_layers = config.encoder_num_layers
|
86 |
+
# LSTM
|
87 |
+
self.lstm_head = torch.nn.LSTM(
|
88 |
+
input_size=self.input_size,
|
89 |
+
hidden_size=self.hidden_size,
|
90 |
+
num_layers=num_layers,
|
91 |
+
dropout=lstm_dropout,
|
92 |
+
bidirectional=True,
|
93 |
+
batch_first=True,
|
94 |
+
)
|
95 |
+
|
96 |
+
self.mlp_head = torch.nn.Sequential(
|
97 |
+
torch.nn.Linear(self.hidden_size * 2, self.hidden_size * 2),
|
98 |
+
torch.nn.ReLU(),
|
99 |
+
torch.nn.Linear(self.hidden_size * 2, self.output_size),
|
100 |
+
)
|
101 |
+
|
102 |
+
elif self.encoder_type == PromptEncoderReparameterizationType.MLP:
|
103 |
+
encoder_num_layers_default = PromptEncoderConfig.encoder_num_layers
|
104 |
+
if config.encoder_num_layers != encoder_num_layers_default:
|
105 |
+
warnings.warn(
|
106 |
+
f"for {self.encoder_type.value}, the argument `encoder_num_layers` is ignored. "
|
107 |
+
f"Exactly {encoder_num_layers_default} MLP layers are used."
|
108 |
+
)
|
109 |
+
layers = [
|
110 |
+
torch.nn.Linear(self.input_size, self.hidden_size),
|
111 |
+
torch.nn.ReLU(),
|
112 |
+
torch.nn.Linear(self.hidden_size, self.hidden_size),
|
113 |
+
torch.nn.ReLU(),
|
114 |
+
torch.nn.Linear(self.hidden_size, self.output_size),
|
115 |
+
]
|
116 |
+
self.mlp_head = torch.nn.Sequential(*layers)
|
117 |
+
|
118 |
+
else:
|
119 |
+
raise ValueError("Prompt encoder type not recognized. Please use one of MLP (recommended) or LSTM.")
|
120 |
+
|
121 |
+
def forward(self, indices):
|
122 |
+
input_embeds = self.embedding(indices)
|
123 |
+
if self.encoder_type == PromptEncoderReparameterizationType.LSTM:
|
124 |
+
output_embeds = self.mlp_head(self.lstm_head(input_embeds)[0])
|
125 |
+
elif self.encoder_type == PromptEncoderReparameterizationType.MLP:
|
126 |
+
output_embeds = self.mlp_head(input_embeds)
|
127 |
+
else:
|
128 |
+
raise ValueError("Prompt encoder type not recognized. Please use one of MLP (recommended) or LSTM.")
|
129 |
+
|
130 |
+
return output_embeds
|
llmeval-env/lib/python3.10/site-packages/peft/tuners/poly/__init__.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023-present the HuggingFace Inc. team.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
from .config import PolyConfig
|
16 |
+
from .layer import Linear, PolyLayer
|
17 |
+
from .model import PolyModel
|
18 |
+
|
19 |
+
|
20 |
+
__all__ = ["Linear", "PolyConfig", "PolyLayer", "PolyModel"]
|
llmeval-env/lib/python3.10/site-packages/peft/tuners/poly/config.py
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023-present the HuggingFace Inc. team.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
from dataclasses import dataclass, field
|
16 |
+
from typing import List, Literal, Optional, Union
|
17 |
+
|
18 |
+
from peft.config import PeftConfig
|
19 |
+
from peft.utils import PeftType
|
20 |
+
|
21 |
+
|
22 |
+
@dataclass
|
23 |
+
class PolyConfig(PeftConfig):
|
24 |
+
"""
|
25 |
+
This is the configuration class to store the configuration of a [`PolyModel`].
|
26 |
+
- [Polytropon (Poly)](https://arxiv.org/abs/2202.13914)
|
27 |
+
- [Multi-Head Routing (MHR)](https://arxiv.org/abs/2211.03831)
|
28 |
+
|
29 |
+
Args:
|
30 |
+
r (`int`): Attention dimension of each Lora in Poly.
|
31 |
+
target_modules (`Union[List[str],str]`): The names of the modules to apply Poly to.
|
32 |
+
modules_to_save (`List[str]`): List of modules apart from Poly layers to be set as trainable
|
33 |
+
and saved in the final checkpoint.
|
34 |
+
init_weights (bool): Whether to perform initialization of Poly weights.
|
35 |
+
poly_type (`Literal["poly"]`): The variant of the Poly module to use. Currently, only "poly"
|
36 |
+
is supported.
|
37 |
+
n_tasks (`int`): The number of tasks in a multitasking scenario.
|
38 |
+
n_skills (`int`): The number of skills (LoRA) in each Poly layer.
|
39 |
+
n_splits (`int`): The number of splits within each LoRA of a Poly layer. A value greater
|
40 |
+
than 1 indicates the use of Multi-Head Routing (MHR).
|
41 |
+
"""
|
42 |
+
|
43 |
+
r: int = field(default=8, metadata={"help": "Lora attention dimension"})
|
44 |
+
target_modules: Optional[Union[List[str], str]] = field(
|
45 |
+
default=None,
|
46 |
+
metadata={
|
47 |
+
"help": "List of module names or regex expression of the module names to replace with Poly."
|
48 |
+
"For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' "
|
49 |
+
},
|
50 |
+
)
|
51 |
+
modules_to_save: Optional[List[str]] = field(
|
52 |
+
default=None,
|
53 |
+
metadata={
|
54 |
+
"help": "List of modules apart from Poly layers to be set as trainable and saved in the final checkpoint. "
|
55 |
+
"For example, in Sequence Classification or Token Classification tasks, "
|
56 |
+
"the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved."
|
57 |
+
},
|
58 |
+
)
|
59 |
+
init_weights: bool = field(
|
60 |
+
default=True,
|
61 |
+
metadata={
|
62 |
+
"help": (
|
63 |
+
"Whether to initialize the weights of the Poly layers with their default initialization. Don't change "
|
64 |
+
"this setting, except if you know exactly what you're doing."
|
65 |
+
),
|
66 |
+
},
|
67 |
+
)
|
68 |
+
poly_type: Literal["poly"] = field(
|
69 |
+
default="poly",
|
70 |
+
metadata={"help": 'Type of Poly modules to be used. Currently only "poly" is supported.'},
|
71 |
+
)
|
72 |
+
n_tasks: int = field(
|
73 |
+
default=1,
|
74 |
+
metadata={"help": "Number of tasks in multitasking scenario."},
|
75 |
+
)
|
76 |
+
n_skills: int = field(
|
77 |
+
default=4,
|
78 |
+
metadata={"help": "Number of skills (LoRA) in each Poly layer."},
|
79 |
+
)
|
80 |
+
n_splits: int = field(
|
81 |
+
default=1,
|
82 |
+
metadata={"help": "Number of splits within each LoRA of a Poly layer."},
|
83 |
+
)
|
84 |
+
|
85 |
+
def __post_init__(self):
|
86 |
+
self.peft_type = PeftType.POLY
|
87 |
+
self.target_modules = (
|
88 |
+
set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules
|
89 |
+
)
|
llmeval-env/lib/python3.10/site-packages/peft/tuners/poly/layer.py
ADDED
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023-present the HuggingFace Inc. team.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
import math
|
16 |
+
from typing import Any
|
17 |
+
|
18 |
+
import torch
|
19 |
+
import torch.nn as nn
|
20 |
+
|
21 |
+
from peft.tuners.tuners_utils import BaseTunerLayer
|
22 |
+
|
23 |
+
from .config import PolyConfig
|
24 |
+
from .router import get_router
|
25 |
+
|
26 |
+
|
27 |
+
class PolyLayer(BaseTunerLayer):
|
28 |
+
# All names of layers that may contain (trainable) adapter weights
|
29 |
+
adapter_layer_names = ("poly_lora_A", "poly_lora_B", "poly_router")
|
30 |
+
# All names of other parameters that may contain adapter-related parameters
|
31 |
+
other_param_names = ("r", "n_tasks", "n_skills", "n_splits")
|
32 |
+
|
33 |
+
def __init__(self, base_layer: nn.Module, **kwargs):
|
34 |
+
self.base_layer = base_layer
|
35 |
+
self.r = {}
|
36 |
+
self.n_tasks = {}
|
37 |
+
self.n_skills = {}
|
38 |
+
self.n_splits = {}
|
39 |
+
self.poly_type = {}
|
40 |
+
self.poly_router = nn.ModuleDict()
|
41 |
+
self.poly_lora_A = nn.ParameterDict()
|
42 |
+
self.poly_lora_B = nn.ParameterDict()
|
43 |
+
self.kwargs = kwargs
|
44 |
+
|
45 |
+
base_layer = self.get_base_layer()
|
46 |
+
if isinstance(base_layer, nn.Linear):
|
47 |
+
in_features, out_features = base_layer.in_features, base_layer.out_features
|
48 |
+
else:
|
49 |
+
raise ValueError(f"Unsupported layer type {type(base_layer)}")
|
50 |
+
|
51 |
+
self.in_features = in_features
|
52 |
+
self.out_features = out_features
|
53 |
+
|
54 |
+
def update_layer(self, adapter_name, poly_config):
|
55 |
+
if poly_config.r <= 0:
|
56 |
+
raise ValueError(f"`r` should be a positive integer value but the value passed is {poly_config.r}")
|
57 |
+
|
58 |
+
self.r[adapter_name] = poly_config.r
|
59 |
+
self.n_tasks[adapter_name] = poly_config.n_tasks
|
60 |
+
self.n_skills[adapter_name] = poly_config.n_skills
|
61 |
+
self.n_splits[adapter_name] = poly_config.n_splits
|
62 |
+
self.poly_type[adapter_name] = poly_config.poly_type
|
63 |
+
|
64 |
+
self.poly_lora_A[adapter_name] = nn.Parameter(
|
65 |
+
torch.empty(
|
66 |
+
poly_config.n_splits,
|
67 |
+
poly_config.n_skills,
|
68 |
+
self.in_features // poly_config.n_splits,
|
69 |
+
poly_config.r,
|
70 |
+
)
|
71 |
+
)
|
72 |
+
self.poly_lora_B[adapter_name] = nn.Parameter(
|
73 |
+
torch.empty(
|
74 |
+
poly_config.n_splits,
|
75 |
+
poly_config.n_skills,
|
76 |
+
poly_config.r,
|
77 |
+
self.out_features // poly_config.n_splits,
|
78 |
+
)
|
79 |
+
)
|
80 |
+
self.poly_router[adapter_name] = get_router(poly_config)
|
81 |
+
|
82 |
+
self.reset_poly_parameters(adapter_name, init_weights=poly_config.init_weights)
|
83 |
+
|
84 |
+
weight = getattr(self.get_base_layer(), "weight", None)
|
85 |
+
if weight is not None:
|
86 |
+
# the layer is already completely initialized, this is an update
|
87 |
+
if weight.dtype.is_floating_point or weight.dtype.is_complex:
|
88 |
+
self.to(weight.device, dtype=weight.dtype)
|
89 |
+
else:
|
90 |
+
self.to(weight.device)
|
91 |
+
self.set_adapter(self.active_adapters)
|
92 |
+
|
93 |
+
def reset_poly_parameters(self, adapter_name, init_weights):
|
94 |
+
if adapter_name in self.poly_lora_A.keys():
|
95 |
+
# initialize A the same way as the default for nn.Linear
|
96 |
+
# https://github.com/microsoft/mttl/blob/ce4ca51dbca73be656feb9b3e5233633e3c5dec7/mttl/models/poly.py#L269
|
97 |
+
n_splits, n_skills, d, r = self.poly_lora_A[adapter_name].shape
|
98 |
+
for skill in range(n_skills):
|
99 |
+
for split in range(n_splits):
|
100 |
+
param = torch.empty((r, d))
|
101 |
+
torch.nn.init.kaiming_uniform_(param, a=math.sqrt(5))
|
102 |
+
self.poly_lora_A[adapter_name].data[split, skill, :, :] = param.T
|
103 |
+
|
104 |
+
if init_weights:
|
105 |
+
# initialize B to zero
|
106 |
+
torch.nn.init.zeros_(self.poly_lora_B[adapter_name])
|
107 |
+
else:
|
108 |
+
# initialize B the same way as the default for nn.Linear
|
109 |
+
n_splits, n_skills, r, d = self.poly_lora_B[adapter_name].shape
|
110 |
+
for skill in range(n_skills):
|
111 |
+
for split in range(n_splits):
|
112 |
+
param = torch.empty((d, r))
|
113 |
+
torch.nn.init.kaiming_uniform_(param, a=math.sqrt(5))
|
114 |
+
self.poly_lora_B[adapter_name].data[split, skill, :, :] = param.T
|
115 |
+
|
116 |
+
# initialized router
|
117 |
+
self.poly_router[adapter_name].reset()
|
118 |
+
|
119 |
+
|
120 |
+
class Linear(nn.Module, PolyLayer):
|
121 |
+
# Lora implemented in a dense layer
|
122 |
+
def __init__(
|
123 |
+
self,
|
124 |
+
base_layer,
|
125 |
+
adapter_name: str,
|
126 |
+
poly_config: PolyConfig,
|
127 |
+
**kwargs,
|
128 |
+
) -> None:
|
129 |
+
super().__init__()
|
130 |
+
PolyLayer.__init__(self, base_layer, **kwargs)
|
131 |
+
|
132 |
+
self._active_adapter = adapter_name
|
133 |
+
self.update_layer(adapter_name, poly_config)
|
134 |
+
|
135 |
+
def forward(self, x: torch.Tensor, *args: Any, task_ids: torch.Tensor = None, **kwargs: Any) -> torch.Tensor:
|
136 |
+
previous_dtype = x.dtype
|
137 |
+
if self.disable_adapters:
|
138 |
+
result = self.base_layer(x, *args, **kwargs)
|
139 |
+
else:
|
140 |
+
result = self.base_layer(x, *args, **kwargs)
|
141 |
+
for active_adapter in self.active_adapters:
|
142 |
+
if active_adapter not in self.poly_lora_A.keys():
|
143 |
+
continue
|
144 |
+
|
145 |
+
r = self.r[active_adapter]
|
146 |
+
poly_router = self.poly_router[active_adapter]
|
147 |
+
poly_lora_A = self.poly_lora_A[active_adapter]
|
148 |
+
poly_lora_B = self.poly_lora_B[active_adapter]
|
149 |
+
|
150 |
+
# Combine the output of LoRAs
|
151 |
+
# https://github.com/microsoft/mttl/blob/ce4ca51dbca73be656feb9b3e5233633e3c5dec7/mttl/models/poly.py#L293
|
152 |
+
mixing_weights = poly_router(task_ids=task_ids, input_ids=x)
|
153 |
+
bs, n_splits, n_skills = mixing_weights.size()
|
154 |
+
|
155 |
+
# A is n_splits, n_skills, D // n_splits, rank
|
156 |
+
# we want bs, n_splits, D // n_splits, rank
|
157 |
+
A = torch.einsum("bqs,qsdr->bqdr", (mixing_weights, poly_lora_A))
|
158 |
+
B = torch.einsum("bqs,qsrd->bqrd", (mixing_weights, poly_lora_B))
|
159 |
+
|
160 |
+
A = A.reshape(bs, self.in_features, r)
|
161 |
+
B = B.transpose(1, 2).reshape(bs, r, self.out_features)
|
162 |
+
|
163 |
+
x = x.to(A.dtype)
|
164 |
+
result += x.bmm(A).bmm(B) / r
|
165 |
+
|
166 |
+
result = result.to(previous_dtype)
|
167 |
+
return result
|
168 |
+
|
169 |
+
def __repr__(self) -> str:
|
170 |
+
rep = super().__repr__()
|
171 |
+
return "poly." + rep
|
llmeval-env/lib/python3.10/site-packages/peft/tuners/poly/router.py
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023-present the HuggingFace Inc. team.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
from abc import ABC, abstractmethod
|
16 |
+
|
17 |
+
import torch
|
18 |
+
from torch import nn
|
19 |
+
from torch.distributions.relaxed_bernoulli import RelaxedBernoulli
|
20 |
+
|
21 |
+
from .config import PolyConfig
|
22 |
+
|
23 |
+
|
24 |
+
EPS = 1e-12
|
25 |
+
|
26 |
+
|
27 |
+
def get_router(poly_config: PolyConfig) -> nn.Module:
|
28 |
+
if poly_config.poly_type == "poly":
|
29 |
+
return PolyRouter(poly_config)
|
30 |
+
else:
|
31 |
+
raise ValueError(
|
32 |
+
f"Unsupported poly_type: {poly_config.poly_type}. "
|
33 |
+
"Currently, only the following types are supported: "
|
34 |
+
"`poly`."
|
35 |
+
)
|
36 |
+
|
37 |
+
|
38 |
+
class Router(nn.Module, ABC):
|
39 |
+
@abstractmethod
|
40 |
+
def reset(self):
|
41 |
+
...
|
42 |
+
|
43 |
+
@abstractmethod
|
44 |
+
def forward(self, task_ids: torch.Tensor, input_ids: torch.Tensor):
|
45 |
+
...
|
46 |
+
|
47 |
+
|
48 |
+
class PolyRouter(Router):
|
49 |
+
# It's a simplified implementation of
|
50 |
+
# https://github.com/microsoft/mttl/blob/ce4ca51dbca73be656feb9b3e5233633e3c5dec7/mttl/models/poly.py#L138
|
51 |
+
def __init__(self, poly_config: PolyConfig):
|
52 |
+
super().__init__()
|
53 |
+
|
54 |
+
self.poly_type = poly_config.poly_type
|
55 |
+
self.n_tasks = poly_config.n_tasks
|
56 |
+
self.n_skills = poly_config.n_skills
|
57 |
+
self.n_splits = poly_config.n_splits
|
58 |
+
|
59 |
+
self.module_logits = nn.Parameter(torch.empty((self.n_tasks, self.n_splits * self.n_skills)))
|
60 |
+
|
61 |
+
def reset(self):
|
62 |
+
torch.nn.init.uniform_(self.module_logits, -1e-3, 1e-3)
|
63 |
+
|
64 |
+
def forward(self, task_ids: torch.Tensor, input_ids: torch.Tensor):
|
65 |
+
if task_ids is None:
|
66 |
+
raise ValueError("task_ids should not be None.")
|
67 |
+
if task_ids.max().item() >= self.n_tasks:
|
68 |
+
raise ValueError(f"Only {self.n_tasks} tasks available. Found task id = {task_ids.max().item()}")
|
69 |
+
|
70 |
+
# move task id to input's device
|
71 |
+
task_ids = task_ids.to(self.module_logits.device)
|
72 |
+
|
73 |
+
module_logits = self.module_logits[task_ids]
|
74 |
+
module_logits = module_logits.view(-1, self.n_splits, self.n_skills)
|
75 |
+
|
76 |
+
if self.training:
|
77 |
+
module_logits = RelaxedBernoulli(temperature=1.0, logits=module_logits).rsample()
|
78 |
+
else:
|
79 |
+
module_logits = torch.sigmoid(module_logits)
|
80 |
+
|
81 |
+
module_weights = module_logits / (module_logits.sum(dim=-1, keepdim=True) + EPS)
|
82 |
+
|
83 |
+
return module_weights
|
llmeval-env/lib/python3.10/site-packages/peft/tuners/prompt_tuning/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (355 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/peft/tuners/prompt_tuning/__pycache__/config.cpython-310.pyc
ADDED
Binary file (2.77 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/peft/tuners/prompt_tuning/__pycache__/model.cpython-310.pyc
ADDED
Binary file (2.83 kB). View file
|
|