applied-ai-018 commited on
Commit
0851258
·
verified ·
1 Parent(s): 9109735

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/collocations.cpython-310.pyc +0 -0
  2. llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/compat.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/downloader.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/jsontags.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/text.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/toolbox.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/treeprettyprinter.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/treetransforms.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/nltk/app/__pycache__/__init__.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/nltk/app/__pycache__/chartparser_app.cpython-310.pyc +0 -0
  11. llmeval-env/lib/python3.10/site-packages/nltk/app/__pycache__/chunkparser_app.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/nltk/app/__pycache__/collocations_app.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/nltk/app/__pycache__/concordance_app.cpython-310.pyc +0 -0
  14. llmeval-env/lib/python3.10/site-packages/nltk/app/__pycache__/srparser_app.cpython-310.pyc +0 -0
  15. llmeval-env/lib/python3.10/site-packages/nltk/app/__pycache__/wordfreq_app.cpython-310.pyc +0 -0
  16. llmeval-env/lib/python3.10/site-packages/nltk/corpus/__init__.py +529 -0
  17. llmeval-env/lib/python3.10/site-packages/nltk/corpus/europarl_raw.py +56 -0
  18. llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__init__.py +186 -0
  19. llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/aligned.py +154 -0
  20. llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/chasen.py +158 -0
  21. llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/chunked.py +273 -0
  22. llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/cmudict.py +88 -0
  23. llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/conll.py +579 -0
  24. llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/crubadan.py +106 -0
  25. llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/dependency.py +115 -0
  26. llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/framenet.py +0 -0
  27. llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/ieer.py +116 -0
  28. llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/ipipan.py +356 -0
  29. llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/knbc.py +188 -0
  30. llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/lin.py +183 -0
  31. llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/markdown.py +342 -0
  32. llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/mte.py +397 -0
  33. llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/nkjp.py +487 -0
  34. llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/panlex_lite.py +174 -0
  35. llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/panlex_swadesh.py +95 -0
  36. llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/plaintext.py +227 -0
  37. llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/ppattach.py +95 -0
  38. llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/pros_cons.py +133 -0
  39. llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/reviews.py +331 -0
  40. llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/rte.py +146 -0
  41. llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/semcor.py +296 -0
  42. llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/sentiwordnet.py +136 -0
  43. llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/sinica_treebank.py +75 -0
  44. llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/string_category.py +56 -0
  45. llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/switchboard.py +125 -0
  46. llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/tagged.py +354 -0
  47. llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/timit.py +510 -0
  48. llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/util.py +867 -0
  49. llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/verbnet.py +629 -0
  50. llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/wordnet.py +2489 -0
llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/collocations.cpython-310.pyc ADDED
Binary file (15 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/compat.cpython-310.pyc ADDED
Binary file (1.15 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/downloader.cpython-310.pyc ADDED
Binary file (61.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/jsontags.cpython-310.pyc ADDED
Binary file (2.34 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/text.cpython-310.pyc ADDED
Binary file (28.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/toolbox.cpython-310.pyc ADDED
Binary file (15.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/treeprettyprinter.cpython-310.pyc ADDED
Binary file (966 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/treetransforms.cpython-310.pyc ADDED
Binary file (5.01 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/app/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.33 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/app/__pycache__/chartparser_app.cpython-310.pyc ADDED
Binary file (62.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/app/__pycache__/chunkparser_app.cpython-310.pyc ADDED
Binary file (33.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/app/__pycache__/collocations_app.cpython-310.pyc ADDED
Binary file (14.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/app/__pycache__/concordance_app.cpython-310.pyc ADDED
Binary file (22.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/app/__pycache__/srparser_app.cpython-310.pyc ADDED
Binary file (22.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/app/__pycache__/wordfreq_app.cpython-310.pyc ADDED
Binary file (1.48 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/corpus/__init__.py ADDED
@@ -0,0 +1,529 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Corpus Readers
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ # TODO this docstring isn't up-to-date!
9
+ """
10
+ NLTK corpus readers. The modules in this package provide functions
11
+ that can be used to read corpus files in a variety of formats. These
12
+ functions can be used to read both the corpus files that are
13
+ distributed in the NLTK corpus package, and corpus files that are part
14
+ of external corpora.
15
+
16
+ Available Corpora
17
+ =================
18
+
19
+ Please see https://www.nltk.org/nltk_data/ for a complete list.
20
+ Install corpora using nltk.download().
21
+
22
+ Corpus Reader Functions
23
+ =======================
24
+ Each corpus module defines one or more "corpus reader functions",
25
+ which can be used to read documents from that corpus. These functions
26
+ take an argument, ``item``, which is used to indicate which document
27
+ should be read from the corpus:
28
+
29
+ - If ``item`` is one of the unique identifiers listed in the corpus
30
+ module's ``items`` variable, then the corresponding document will
31
+ be loaded from the NLTK corpus package.
32
+ - If ``item`` is a filename, then that file will be read.
33
+
34
+ Additionally, corpus reader functions can be given lists of item
35
+ names; in which case, they will return a concatenation of the
36
+ corresponding documents.
37
+
38
+ Corpus reader functions are named based on the type of information
39
+ they return. Some common examples, and their return types, are:
40
+
41
+ - words(): list of str
42
+ - sents(): list of (list of str)
43
+ - paras(): list of (list of (list of str))
44
+ - tagged_words(): list of (str,str) tuple
45
+ - tagged_sents(): list of (list of (str,str))
46
+ - tagged_paras(): list of (list of (list of (str,str)))
47
+ - chunked_sents(): list of (Tree w/ (str,str) leaves)
48
+ - parsed_sents(): list of (Tree with str leaves)
49
+ - parsed_paras(): list of (list of (Tree with str leaves))
50
+ - xml(): A single xml ElementTree
51
+ - raw(): unprocessed corpus contents
52
+
53
+ For example, to read a list of the words in the Brown Corpus, use
54
+ ``nltk.corpus.brown.words()``:
55
+
56
+ >>> from nltk.corpus import brown
57
+ >>> print(", ".join(brown.words())) # doctest: +ELLIPSIS
58
+ The, Fulton, County, Grand, Jury, said, ...
59
+
60
+ """
61
+
62
+ import re
63
+
64
+ from nltk.corpus.reader import *
65
+ from nltk.corpus.util import LazyCorpusLoader
66
+ from nltk.tokenize import RegexpTokenizer
67
+
68
+ abc: PlaintextCorpusReader = LazyCorpusLoader(
69
+ "abc",
70
+ PlaintextCorpusReader,
71
+ r"(?!\.).*\.txt",
72
+ encoding=[("science", "latin_1"), ("rural", "utf8")],
73
+ )
74
+ alpino: AlpinoCorpusReader = LazyCorpusLoader(
75
+ "alpino", AlpinoCorpusReader, tagset="alpino"
76
+ )
77
+ bcp47: BCP47CorpusReader = LazyCorpusLoader(
78
+ "bcp47", BCP47CorpusReader, r"(cldr|iana)/*"
79
+ )
80
+ brown: CategorizedTaggedCorpusReader = LazyCorpusLoader(
81
+ "brown",
82
+ CategorizedTaggedCorpusReader,
83
+ r"c[a-z]\d\d",
84
+ cat_file="cats.txt",
85
+ tagset="brown",
86
+ encoding="ascii",
87
+ )
88
+ cess_cat: BracketParseCorpusReader = LazyCorpusLoader(
89
+ "cess_cat",
90
+ BracketParseCorpusReader,
91
+ r"(?!\.).*\.tbf",
92
+ tagset="unknown",
93
+ encoding="ISO-8859-15",
94
+ )
95
+ cess_esp: BracketParseCorpusReader = LazyCorpusLoader(
96
+ "cess_esp",
97
+ BracketParseCorpusReader,
98
+ r"(?!\.).*\.tbf",
99
+ tagset="unknown",
100
+ encoding="ISO-8859-15",
101
+ )
102
+ cmudict: CMUDictCorpusReader = LazyCorpusLoader(
103
+ "cmudict", CMUDictCorpusReader, ["cmudict"]
104
+ )
105
+ comtrans: AlignedCorpusReader = LazyCorpusLoader(
106
+ "comtrans", AlignedCorpusReader, r"(?!\.).*\.txt"
107
+ )
108
+ comparative_sentences: ComparativeSentencesCorpusReader = LazyCorpusLoader(
109
+ "comparative_sentences",
110
+ ComparativeSentencesCorpusReader,
111
+ r"labeledSentences\.txt",
112
+ encoding="latin-1",
113
+ )
114
+ conll2000: ConllChunkCorpusReader = LazyCorpusLoader(
115
+ "conll2000",
116
+ ConllChunkCorpusReader,
117
+ ["train.txt", "test.txt"],
118
+ ("NP", "VP", "PP"),
119
+ tagset="wsj",
120
+ encoding="ascii",
121
+ )
122
+ conll2002: ConllChunkCorpusReader = LazyCorpusLoader(
123
+ "conll2002",
124
+ ConllChunkCorpusReader,
125
+ r".*\.(test|train).*",
126
+ ("LOC", "PER", "ORG", "MISC"),
127
+ encoding="utf-8",
128
+ )
129
+ conll2007: DependencyCorpusReader = LazyCorpusLoader(
130
+ "conll2007",
131
+ DependencyCorpusReader,
132
+ r".*\.(test|train).*",
133
+ encoding=[("eus", "ISO-8859-2"), ("esp", "utf8")],
134
+ )
135
+ crubadan: CrubadanCorpusReader = LazyCorpusLoader(
136
+ "crubadan", CrubadanCorpusReader, r".*\.txt"
137
+ )
138
+ dependency_treebank: DependencyCorpusReader = LazyCorpusLoader(
139
+ "dependency_treebank", DependencyCorpusReader, r".*\.dp", encoding="ascii"
140
+ )
141
+ extended_omw: CorpusReader = LazyCorpusLoader(
142
+ "extended_omw", CorpusReader, r".*/wn-[a-z\-]*\.tab", encoding="utf8"
143
+ )
144
+ floresta: BracketParseCorpusReader = LazyCorpusLoader(
145
+ "floresta",
146
+ BracketParseCorpusReader,
147
+ r"(?!\.).*\.ptb",
148
+ "#",
149
+ tagset="unknown",
150
+ encoding="ISO-8859-15",
151
+ )
152
+ framenet15: FramenetCorpusReader = LazyCorpusLoader(
153
+ "framenet_v15",
154
+ FramenetCorpusReader,
155
+ [
156
+ "frRelation.xml",
157
+ "frameIndex.xml",
158
+ "fulltextIndex.xml",
159
+ "luIndex.xml",
160
+ "semTypes.xml",
161
+ ],
162
+ )
163
+ framenet: FramenetCorpusReader = LazyCorpusLoader(
164
+ "framenet_v17",
165
+ FramenetCorpusReader,
166
+ [
167
+ "frRelation.xml",
168
+ "frameIndex.xml",
169
+ "fulltextIndex.xml",
170
+ "luIndex.xml",
171
+ "semTypes.xml",
172
+ ],
173
+ )
174
+ gazetteers: WordListCorpusReader = LazyCorpusLoader(
175
+ "gazetteers", WordListCorpusReader, r"(?!LICENSE|\.).*\.txt", encoding="ISO-8859-2"
176
+ )
177
+ genesis: PlaintextCorpusReader = LazyCorpusLoader(
178
+ "genesis",
179
+ PlaintextCorpusReader,
180
+ r"(?!\.).*\.txt",
181
+ encoding=[
182
+ ("finnish|french|german", "latin_1"),
183
+ ("swedish", "cp865"),
184
+ (".*", "utf_8"),
185
+ ],
186
+ )
187
+ gutenberg: PlaintextCorpusReader = LazyCorpusLoader(
188
+ "gutenberg", PlaintextCorpusReader, r"(?!\.).*\.txt", encoding="latin1"
189
+ )
190
+ ieer: IEERCorpusReader = LazyCorpusLoader("ieer", IEERCorpusReader, r"(?!README|\.).*")
191
+ inaugural: PlaintextCorpusReader = LazyCorpusLoader(
192
+ "inaugural", PlaintextCorpusReader, r"(?!\.).*\.txt", encoding="latin1"
193
+ )
194
+ # [XX] This should probably just use TaggedCorpusReader:
195
+ indian: IndianCorpusReader = LazyCorpusLoader(
196
+ "indian", IndianCorpusReader, r"(?!\.).*\.pos", tagset="unknown", encoding="utf8"
197
+ )
198
+
199
+ jeita: ChasenCorpusReader = LazyCorpusLoader(
200
+ "jeita", ChasenCorpusReader, r".*\.chasen", encoding="utf-8"
201
+ )
202
+ knbc: KNBCorpusReader = LazyCorpusLoader(
203
+ "knbc/corpus1", KNBCorpusReader, r".*/KN.*", encoding="euc-jp"
204
+ )
205
+ lin_thesaurus: LinThesaurusCorpusReader = LazyCorpusLoader(
206
+ "lin_thesaurus", LinThesaurusCorpusReader, r".*\.lsp"
207
+ )
208
+ mac_morpho: MacMorphoCorpusReader = LazyCorpusLoader(
209
+ "mac_morpho",
210
+ MacMorphoCorpusReader,
211
+ r"(?!\.).*\.txt",
212
+ tagset="unknown",
213
+ encoding="latin-1",
214
+ )
215
+ machado: PortugueseCategorizedPlaintextCorpusReader = LazyCorpusLoader(
216
+ "machado",
217
+ PortugueseCategorizedPlaintextCorpusReader,
218
+ r"(?!\.).*\.txt",
219
+ cat_pattern=r"([a-z]*)/.*",
220
+ encoding="latin-1",
221
+ )
222
+ masc_tagged: CategorizedTaggedCorpusReader = LazyCorpusLoader(
223
+ "masc_tagged",
224
+ CategorizedTaggedCorpusReader,
225
+ r"(spoken|written)/.*\.txt",
226
+ cat_file="categories.txt",
227
+ tagset="wsj",
228
+ encoding="utf-8",
229
+ sep="_",
230
+ )
231
+ movie_reviews: CategorizedPlaintextCorpusReader = LazyCorpusLoader(
232
+ "movie_reviews",
233
+ CategorizedPlaintextCorpusReader,
234
+ r"(?!\.).*\.txt",
235
+ cat_pattern=r"(neg|pos)/.*",
236
+ encoding="ascii",
237
+ )
238
+ multext_east: MTECorpusReader = LazyCorpusLoader(
239
+ "mte_teip5", MTECorpusReader, r"(oana).*\.xml", encoding="utf-8"
240
+ )
241
+ names: WordListCorpusReader = LazyCorpusLoader(
242
+ "names", WordListCorpusReader, r"(?!\.).*\.txt", encoding="ascii"
243
+ )
244
+ nps_chat: NPSChatCorpusReader = LazyCorpusLoader(
245
+ "nps_chat", NPSChatCorpusReader, r"(?!README|\.).*\.xml", tagset="wsj"
246
+ )
247
+ opinion_lexicon: OpinionLexiconCorpusReader = LazyCorpusLoader(
248
+ "opinion_lexicon",
249
+ OpinionLexiconCorpusReader,
250
+ r"(\w+)\-words\.txt",
251
+ encoding="ISO-8859-2",
252
+ )
253
+ ppattach: PPAttachmentCorpusReader = LazyCorpusLoader(
254
+ "ppattach", PPAttachmentCorpusReader, ["training", "test", "devset"]
255
+ )
256
+ product_reviews_1: ReviewsCorpusReader = LazyCorpusLoader(
257
+ "product_reviews_1", ReviewsCorpusReader, r"^(?!Readme).*\.txt", encoding="utf8"
258
+ )
259
+ product_reviews_2: ReviewsCorpusReader = LazyCorpusLoader(
260
+ "product_reviews_2", ReviewsCorpusReader, r"^(?!Readme).*\.txt", encoding="utf8"
261
+ )
262
+ pros_cons: ProsConsCorpusReader = LazyCorpusLoader(
263
+ "pros_cons",
264
+ ProsConsCorpusReader,
265
+ r"Integrated(Cons|Pros)\.txt",
266
+ cat_pattern=r"Integrated(Cons|Pros)\.txt",
267
+ encoding="ISO-8859-2",
268
+ )
269
+ ptb: CategorizedBracketParseCorpusReader = (
270
+ LazyCorpusLoader( # Penn Treebank v3: WSJ and Brown portions
271
+ "ptb",
272
+ CategorizedBracketParseCorpusReader,
273
+ r"(WSJ/\d\d/WSJ_\d\d|BROWN/C[A-Z]/C[A-Z])\d\d.MRG",
274
+ cat_file="allcats.txt",
275
+ tagset="wsj",
276
+ )
277
+ )
278
+ qc: StringCategoryCorpusReader = LazyCorpusLoader(
279
+ "qc", StringCategoryCorpusReader, ["train.txt", "test.txt"], encoding="ISO-8859-2"
280
+ )
281
+ reuters: CategorizedPlaintextCorpusReader = LazyCorpusLoader(
282
+ "reuters",
283
+ CategorizedPlaintextCorpusReader,
284
+ "(training|test).*",
285
+ cat_file="cats.txt",
286
+ encoding="ISO-8859-2",
287
+ )
288
+ rte: RTECorpusReader = LazyCorpusLoader("rte", RTECorpusReader, r"(?!\.).*\.xml")
289
+ senseval: SensevalCorpusReader = LazyCorpusLoader(
290
+ "senseval", SensevalCorpusReader, r"(?!\.).*\.pos"
291
+ )
292
+ sentence_polarity: CategorizedSentencesCorpusReader = LazyCorpusLoader(
293
+ "sentence_polarity",
294
+ CategorizedSentencesCorpusReader,
295
+ r"rt-polarity\.(neg|pos)",
296
+ cat_pattern=r"rt-polarity\.(neg|pos)",
297
+ encoding="utf-8",
298
+ )
299
+ sentiwordnet: SentiWordNetCorpusReader = LazyCorpusLoader(
300
+ "sentiwordnet", SentiWordNetCorpusReader, "SentiWordNet_3.0.0.txt", encoding="utf-8"
301
+ )
302
+ shakespeare: XMLCorpusReader = LazyCorpusLoader(
303
+ "shakespeare", XMLCorpusReader, r"(?!\.).*\.xml"
304
+ )
305
+ sinica_treebank: SinicaTreebankCorpusReader = LazyCorpusLoader(
306
+ "sinica_treebank",
307
+ SinicaTreebankCorpusReader,
308
+ ["parsed"],
309
+ tagset="unknown",
310
+ encoding="utf-8",
311
+ )
312
+ state_union: PlaintextCorpusReader = LazyCorpusLoader(
313
+ "state_union", PlaintextCorpusReader, r"(?!\.).*\.txt", encoding="ISO-8859-2"
314
+ )
315
+ stopwords: WordListCorpusReader = LazyCorpusLoader(
316
+ "stopwords", WordListCorpusReader, r"(?!README|\.).*", encoding="utf8"
317
+ )
318
+ subjectivity: CategorizedSentencesCorpusReader = LazyCorpusLoader(
319
+ "subjectivity",
320
+ CategorizedSentencesCorpusReader,
321
+ r"(quote.tok.gt9|plot.tok.gt9)\.5000",
322
+ cat_map={"quote.tok.gt9.5000": ["subj"], "plot.tok.gt9.5000": ["obj"]},
323
+ encoding="latin-1",
324
+ )
325
+ swadesh: SwadeshCorpusReader = LazyCorpusLoader(
326
+ "swadesh", SwadeshCorpusReader, r"(?!README|\.).*", encoding="utf8"
327
+ )
328
+ swadesh110: PanlexSwadeshCorpusReader = LazyCorpusLoader(
329
+ "panlex_swadesh", PanlexSwadeshCorpusReader, r"swadesh110/.*\.txt", encoding="utf8"
330
+ )
331
+ swadesh207: PanlexSwadeshCorpusReader = LazyCorpusLoader(
332
+ "panlex_swadesh", PanlexSwadeshCorpusReader, r"swadesh207/.*\.txt", encoding="utf8"
333
+ )
334
+ switchboard: SwitchboardCorpusReader = LazyCorpusLoader(
335
+ "switchboard", SwitchboardCorpusReader, tagset="wsj"
336
+ )
337
+ timit: TimitCorpusReader = LazyCorpusLoader("timit", TimitCorpusReader)
338
+ timit_tagged: TimitTaggedCorpusReader = LazyCorpusLoader(
339
+ "timit", TimitTaggedCorpusReader, r".+\.tags", tagset="wsj", encoding="ascii"
340
+ )
341
+ toolbox: ToolboxCorpusReader = LazyCorpusLoader(
342
+ "toolbox", ToolboxCorpusReader, r"(?!.*(README|\.)).*\.(dic|txt)"
343
+ )
344
+ treebank: BracketParseCorpusReader = LazyCorpusLoader(
345
+ "treebank/combined",
346
+ BracketParseCorpusReader,
347
+ r"wsj_.*\.mrg",
348
+ tagset="wsj",
349
+ encoding="ascii",
350
+ )
351
+ treebank_chunk: ChunkedCorpusReader = LazyCorpusLoader(
352
+ "treebank/tagged",
353
+ ChunkedCorpusReader,
354
+ r"wsj_.*\.pos",
355
+ sent_tokenizer=RegexpTokenizer(r"(?<=/\.)\s*(?![^\[]*\])", gaps=True),
356
+ para_block_reader=tagged_treebank_para_block_reader,
357
+ tagset="wsj",
358
+ encoding="ascii",
359
+ )
360
+ treebank_raw: PlaintextCorpusReader = LazyCorpusLoader(
361
+ "treebank/raw", PlaintextCorpusReader, r"wsj_.*", encoding="ISO-8859-2"
362
+ )
363
+ twitter_samples: TwitterCorpusReader = LazyCorpusLoader(
364
+ "twitter_samples", TwitterCorpusReader, r".*\.json"
365
+ )
366
+ udhr: UdhrCorpusReader = LazyCorpusLoader("udhr", UdhrCorpusReader)
367
+ udhr2: PlaintextCorpusReader = LazyCorpusLoader(
368
+ "udhr2", PlaintextCorpusReader, r".*\.txt", encoding="utf8"
369
+ )
370
+ universal_treebanks: ConllCorpusReader = LazyCorpusLoader(
371
+ "universal_treebanks_v20",
372
+ ConllCorpusReader,
373
+ r".*\.conll",
374
+ columntypes=(
375
+ "ignore",
376
+ "words",
377
+ "ignore",
378
+ "ignore",
379
+ "pos",
380
+ "ignore",
381
+ "ignore",
382
+ "ignore",
383
+ "ignore",
384
+ "ignore",
385
+ ),
386
+ )
387
+ verbnet: VerbnetCorpusReader = LazyCorpusLoader(
388
+ "verbnet", VerbnetCorpusReader, r"(?!\.).*\.xml"
389
+ )
390
+ webtext: PlaintextCorpusReader = LazyCorpusLoader(
391
+ "webtext", PlaintextCorpusReader, r"(?!README|\.).*\.txt", encoding="ISO-8859-2"
392
+ )
393
+ wordnet: WordNetCorpusReader = LazyCorpusLoader(
394
+ "wordnet",
395
+ WordNetCorpusReader,
396
+ LazyCorpusLoader("omw-1.4", CorpusReader, r".*/wn-data-.*\.tab", encoding="utf8"),
397
+ )
398
+ wordnet31: WordNetCorpusReader = LazyCorpusLoader(
399
+ "wordnet31",
400
+ WordNetCorpusReader,
401
+ LazyCorpusLoader("omw-1.4", CorpusReader, r".*/wn-data-.*\.tab", encoding="utf8"),
402
+ )
403
+ wordnet2021: WordNetCorpusReader = LazyCorpusLoader(
404
+ "wordnet2021",
405
+ WordNetCorpusReader,
406
+ LazyCorpusLoader("omw-1.4", CorpusReader, r".*/wn-data-.*\.tab", encoding="utf8"),
407
+ )
408
+ wordnet_ic: WordNetICCorpusReader = LazyCorpusLoader(
409
+ "wordnet_ic", WordNetICCorpusReader, r".*\.dat"
410
+ )
411
+ words: WordListCorpusReader = LazyCorpusLoader(
412
+ "words", WordListCorpusReader, r"(?!README|\.).*", encoding="ascii"
413
+ )
414
+
415
+ # defined after treebank
416
+ propbank: PropbankCorpusReader = LazyCorpusLoader(
417
+ "propbank",
418
+ PropbankCorpusReader,
419
+ "prop.txt",
420
+ r"frames/.*\.xml",
421
+ "verbs.txt",
422
+ lambda filename: re.sub(r"^wsj/\d\d/", "", filename),
423
+ treebank,
424
+ ) # Must be defined *after* treebank corpus.
425
+ nombank: NombankCorpusReader = LazyCorpusLoader(
426
+ "nombank.1.0",
427
+ NombankCorpusReader,
428
+ "nombank.1.0",
429
+ r"frames/.*\.xml",
430
+ "nombank.1.0.words",
431
+ lambda filename: re.sub(r"^wsj/\d\d/", "", filename),
432
+ treebank,
433
+ ) # Must be defined *after* treebank corpus.
434
+ propbank_ptb: PropbankCorpusReader = LazyCorpusLoader(
435
+ "propbank",
436
+ PropbankCorpusReader,
437
+ "prop.txt",
438
+ r"frames/.*\.xml",
439
+ "verbs.txt",
440
+ lambda filename: filename.upper(),
441
+ ptb,
442
+ ) # Must be defined *after* ptb corpus.
443
+ nombank_ptb: NombankCorpusReader = LazyCorpusLoader(
444
+ "nombank.1.0",
445
+ NombankCorpusReader,
446
+ "nombank.1.0",
447
+ r"frames/.*\.xml",
448
+ "nombank.1.0.words",
449
+ lambda filename: filename.upper(),
450
+ ptb,
451
+ ) # Must be defined *after* ptb corpus.
452
+ semcor: SemcorCorpusReader = LazyCorpusLoader(
453
+ "semcor", SemcorCorpusReader, r"brown./tagfiles/br-.*\.xml", wordnet
454
+ ) # Must be defined *after* wordnet corpus.
455
+
456
+ nonbreaking_prefixes: NonbreakingPrefixesCorpusReader = LazyCorpusLoader(
457
+ "nonbreaking_prefixes",
458
+ NonbreakingPrefixesCorpusReader,
459
+ r"(?!README|\.).*",
460
+ encoding="utf8",
461
+ )
462
+ perluniprops: UnicharsCorpusReader = LazyCorpusLoader(
463
+ "perluniprops",
464
+ UnicharsCorpusReader,
465
+ r"(?!README|\.).*",
466
+ nltk_data_subdir="misc",
467
+ encoding="utf8",
468
+ )
469
+
470
+ # mwa_ppdb = LazyCorpusLoader(
471
+ # 'mwa_ppdb', MWAPPDBCorpusReader, r'(?!README|\.).*', nltk_data_subdir='misc', encoding='utf8')
472
+
473
+ # See https://github.com/nltk/nltk/issues/1579
474
+ # and https://github.com/nltk/nltk/issues/1716
475
+ #
476
+ # pl196x = LazyCorpusLoader(
477
+ # 'pl196x', Pl196xCorpusReader, r'[a-z]-.*\.xml',
478
+ # cat_file='cats.txt', textid_file='textids.txt', encoding='utf8')
479
+ #
480
+ # ipipan = LazyCorpusLoader(
481
+ # 'ipipan', IPIPANCorpusReader, r'(?!\.).*morph\.xml')
482
+ #
483
+ # nkjp = LazyCorpusLoader(
484
+ # 'nkjp', NKJPCorpusReader, r'', encoding='utf8')
485
+ #
486
+ # panlex_lite = LazyCorpusLoader(
487
+ # 'panlex_lite', PanLexLiteCorpusReader)
488
+ #
489
+ # ycoe = LazyCorpusLoader(
490
+ # 'ycoe', YCOECorpusReader)
491
+ #
492
+ # corpus not available with NLTK; these lines caused help(nltk.corpus) to break
493
+ # hebrew_treebank = LazyCorpusLoader(
494
+ # 'hebrew_treebank', BracketParseCorpusReader, r'.*\.txt')
495
+
496
+ # FIXME: override any imported demo from various corpora, see https://github.com/nltk/nltk/issues/2116
497
+ def demo():
498
+ # This is out-of-date:
499
+ abc.demo()
500
+ brown.demo()
501
+ # chat80.demo()
502
+ cmudict.demo()
503
+ conll2000.demo()
504
+ conll2002.demo()
505
+ genesis.demo()
506
+ gutenberg.demo()
507
+ ieer.demo()
508
+ inaugural.demo()
509
+ indian.demo()
510
+ names.demo()
511
+ ppattach.demo()
512
+ senseval.demo()
513
+ shakespeare.demo()
514
+ sinica_treebank.demo()
515
+ state_union.demo()
516
+ stopwords.demo()
517
+ timit.demo()
518
+ toolbox.demo()
519
+ treebank.demo()
520
+ udhr.demo()
521
+ webtext.demo()
522
+ words.demo()
523
+
524
+
525
+ # ycoe.demo()
526
+
527
+ if __name__ == "__main__":
528
+ # demo()
529
+ pass
llmeval-env/lib/python3.10/site-packages/nltk/corpus/europarl_raw.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Europarl Corpus Readers
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Nitin Madnani <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ import re
9
+
10
+ from nltk.corpus.reader import *
11
+ from nltk.corpus.util import LazyCorpusLoader
12
+
13
+ # Create a new corpus reader instance for each European language
14
+ danish: EuroparlCorpusReader = LazyCorpusLoader(
15
+ "europarl_raw/danish", EuroparlCorpusReader, r"ep-.*\.da", encoding="utf-8"
16
+ )
17
+
18
+ dutch: EuroparlCorpusReader = LazyCorpusLoader(
19
+ "europarl_raw/dutch", EuroparlCorpusReader, r"ep-.*\.nl", encoding="utf-8"
20
+ )
21
+
22
+ english: EuroparlCorpusReader = LazyCorpusLoader(
23
+ "europarl_raw/english", EuroparlCorpusReader, r"ep-.*\.en", encoding="utf-8"
24
+ )
25
+
26
+ finnish: EuroparlCorpusReader = LazyCorpusLoader(
27
+ "europarl_raw/finnish", EuroparlCorpusReader, r"ep-.*\.fi", encoding="utf-8"
28
+ )
29
+
30
+ french: EuroparlCorpusReader = LazyCorpusLoader(
31
+ "europarl_raw/french", EuroparlCorpusReader, r"ep-.*\.fr", encoding="utf-8"
32
+ )
33
+
34
+ german: EuroparlCorpusReader = LazyCorpusLoader(
35
+ "europarl_raw/german", EuroparlCorpusReader, r"ep-.*\.de", encoding="utf-8"
36
+ )
37
+
38
+ greek: EuroparlCorpusReader = LazyCorpusLoader(
39
+ "europarl_raw/greek", EuroparlCorpusReader, r"ep-.*\.el", encoding="utf-8"
40
+ )
41
+
42
+ italian: EuroparlCorpusReader = LazyCorpusLoader(
43
+ "europarl_raw/italian", EuroparlCorpusReader, r"ep-.*\.it", encoding="utf-8"
44
+ )
45
+
46
+ portuguese: EuroparlCorpusReader = LazyCorpusLoader(
47
+ "europarl_raw/portuguese", EuroparlCorpusReader, r"ep-.*\.pt", encoding="utf-8"
48
+ )
49
+
50
+ spanish: EuroparlCorpusReader = LazyCorpusLoader(
51
+ "europarl_raw/spanish", EuroparlCorpusReader, r"ep-.*\.es", encoding="utf-8"
52
+ )
53
+
54
+ swedish: EuroparlCorpusReader = LazyCorpusLoader(
55
+ "europarl_raw/swedish", EuroparlCorpusReader, r"ep-.*\.sv", encoding="utf-8"
56
+ )
llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__init__.py ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Corpus Readers
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ NLTK corpus readers. The modules in this package provide functions
11
+ that can be used to read corpus fileids in a variety of formats. These
12
+ functions can be used to read both the corpus fileids that are
13
+ distributed in the NLTK corpus package, and corpus fileids that are part
14
+ of external corpora.
15
+
16
+ Corpus Reader Functions
17
+ =======================
18
+ Each corpus module defines one or more "corpus reader functions",
19
+ which can be used to read documents from that corpus. These functions
20
+ take an argument, ``item``, which is used to indicate which document
21
+ should be read from the corpus:
22
+
23
+ - If ``item`` is one of the unique identifiers listed in the corpus
24
+ module's ``items`` variable, then the corresponding document will
25
+ be loaded from the NLTK corpus package.
26
+ - If ``item`` is a fileid, then that file will be read.
27
+
28
+ Additionally, corpus reader functions can be given lists of item
29
+ names; in which case, they will return a concatenation of the
30
+ corresponding documents.
31
+
32
+ Corpus reader functions are named based on the type of information
33
+ they return. Some common examples, and their return types, are:
34
+
35
+ - words(): list of str
36
+ - sents(): list of (list of str)
37
+ - paras(): list of (list of (list of str))
38
+ - tagged_words(): list of (str,str) tuple
39
+ - tagged_sents(): list of (list of (str,str))
40
+ - tagged_paras(): list of (list of (list of (str,str)))
41
+ - chunked_sents(): list of (Tree w/ (str,str) leaves)
42
+ - parsed_sents(): list of (Tree with str leaves)
43
+ - parsed_paras(): list of (list of (Tree with str leaves))
44
+ - xml(): A single xml ElementTree
45
+ - raw(): unprocessed corpus contents
46
+
47
+ For example, to read a list of the words in the Brown Corpus, use
48
+ ``nltk.corpus.brown.words()``:
49
+
50
+ >>> from nltk.corpus import brown
51
+ >>> print(", ".join(brown.words()[:6])) # only first 6 words
52
+ The, Fulton, County, Grand, Jury, said
53
+
54
+ isort:skip_file
55
+ """
56
+
57
+ from nltk.corpus.reader.plaintext import *
58
+ from nltk.corpus.reader.util import *
59
+ from nltk.corpus.reader.api import *
60
+ from nltk.corpus.reader.tagged import *
61
+ from nltk.corpus.reader.cmudict import *
62
+ from nltk.corpus.reader.conll import *
63
+ from nltk.corpus.reader.chunked import *
64
+ from nltk.corpus.reader.wordlist import *
65
+ from nltk.corpus.reader.xmldocs import *
66
+ from nltk.corpus.reader.ppattach import *
67
+ from nltk.corpus.reader.senseval import *
68
+ from nltk.corpus.reader.ieer import *
69
+ from nltk.corpus.reader.sinica_treebank import *
70
+ from nltk.corpus.reader.bracket_parse import *
71
+ from nltk.corpus.reader.indian import *
72
+ from nltk.corpus.reader.toolbox import *
73
+ from nltk.corpus.reader.timit import *
74
+ from nltk.corpus.reader.ycoe import *
75
+ from nltk.corpus.reader.rte import *
76
+ from nltk.corpus.reader.string_category import *
77
+ from nltk.corpus.reader.propbank import *
78
+ from nltk.corpus.reader.verbnet import *
79
+ from nltk.corpus.reader.bnc import *
80
+ from nltk.corpus.reader.nps_chat import *
81
+ from nltk.corpus.reader.wordnet import *
82
+ from nltk.corpus.reader.switchboard import *
83
+ from nltk.corpus.reader.dependency import *
84
+ from nltk.corpus.reader.nombank import *
85
+ from nltk.corpus.reader.ipipan import *
86
+ from nltk.corpus.reader.pl196x import *
87
+ from nltk.corpus.reader.knbc import *
88
+ from nltk.corpus.reader.chasen import *
89
+ from nltk.corpus.reader.childes import *
90
+ from nltk.corpus.reader.aligned import *
91
+ from nltk.corpus.reader.lin import *
92
+ from nltk.corpus.reader.semcor import *
93
+ from nltk.corpus.reader.framenet import *
94
+ from nltk.corpus.reader.udhr import *
95
+ from nltk.corpus.reader.bnc import *
96
+ from nltk.corpus.reader.sentiwordnet import *
97
+ from nltk.corpus.reader.twitter import *
98
+ from nltk.corpus.reader.nkjp import *
99
+ from nltk.corpus.reader.crubadan import *
100
+ from nltk.corpus.reader.mte import *
101
+ from nltk.corpus.reader.reviews import *
102
+ from nltk.corpus.reader.opinion_lexicon import *
103
+ from nltk.corpus.reader.pros_cons import *
104
+ from nltk.corpus.reader.categorized_sents import *
105
+ from nltk.corpus.reader.comparative_sents import *
106
+ from nltk.corpus.reader.panlex_lite import *
107
+ from nltk.corpus.reader.panlex_swadesh import *
108
+ from nltk.corpus.reader.bcp47 import *
109
+
110
+ # Make sure that nltk.corpus.reader.bracket_parse gives the module, not
111
+ # the function bracket_parse() defined in nltk.tree:
112
+ from nltk.corpus.reader import bracket_parse
113
+
114
+ __all__ = [
115
+ "CorpusReader",
116
+ "CategorizedCorpusReader",
117
+ "PlaintextCorpusReader",
118
+ "find_corpus_fileids",
119
+ "TaggedCorpusReader",
120
+ "CMUDictCorpusReader",
121
+ "ConllChunkCorpusReader",
122
+ "WordListCorpusReader",
123
+ "PPAttachmentCorpusReader",
124
+ "SensevalCorpusReader",
125
+ "IEERCorpusReader",
126
+ "ChunkedCorpusReader",
127
+ "SinicaTreebankCorpusReader",
128
+ "BracketParseCorpusReader",
129
+ "IndianCorpusReader",
130
+ "ToolboxCorpusReader",
131
+ "TimitCorpusReader",
132
+ "YCOECorpusReader",
133
+ "MacMorphoCorpusReader",
134
+ "SyntaxCorpusReader",
135
+ "AlpinoCorpusReader",
136
+ "RTECorpusReader",
137
+ "StringCategoryCorpusReader",
138
+ "EuroparlCorpusReader",
139
+ "CategorizedBracketParseCorpusReader",
140
+ "CategorizedTaggedCorpusReader",
141
+ "CategorizedPlaintextCorpusReader",
142
+ "PortugueseCategorizedPlaintextCorpusReader",
143
+ "tagged_treebank_para_block_reader",
144
+ "PropbankCorpusReader",
145
+ "VerbnetCorpusReader",
146
+ "BNCCorpusReader",
147
+ "ConllCorpusReader",
148
+ "XMLCorpusReader",
149
+ "NPSChatCorpusReader",
150
+ "SwadeshCorpusReader",
151
+ "WordNetCorpusReader",
152
+ "WordNetICCorpusReader",
153
+ "SwitchboardCorpusReader",
154
+ "DependencyCorpusReader",
155
+ "NombankCorpusReader",
156
+ "IPIPANCorpusReader",
157
+ "Pl196xCorpusReader",
158
+ "TEICorpusView",
159
+ "KNBCorpusReader",
160
+ "ChasenCorpusReader",
161
+ "CHILDESCorpusReader",
162
+ "AlignedCorpusReader",
163
+ "TimitTaggedCorpusReader",
164
+ "LinThesaurusCorpusReader",
165
+ "SemcorCorpusReader",
166
+ "FramenetCorpusReader",
167
+ "UdhrCorpusReader",
168
+ "BNCCorpusReader",
169
+ "SentiWordNetCorpusReader",
170
+ "SentiSynset",
171
+ "TwitterCorpusReader",
172
+ "NKJPCorpusReader",
173
+ "CrubadanCorpusReader",
174
+ "MTECorpusReader",
175
+ "ReviewsCorpusReader",
176
+ "OpinionLexiconCorpusReader",
177
+ "ProsConsCorpusReader",
178
+ "CategorizedSentencesCorpusReader",
179
+ "ComparativeSentencesCorpusReader",
180
+ "PanLexLiteCorpusReader",
181
+ "NonbreakingPrefixesCorpusReader",
182
+ "UnicharsCorpusReader",
183
+ "MWAPPDBCorpusReader",
184
+ "PanlexSwadeshCorpusReader",
185
+ "BCP47CorpusReader",
186
+ ]
llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/aligned.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Aligned Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # URL: <https://www.nltk.org/>
5
+ # Author: Steven Bird <[email protected]>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ from nltk.corpus.reader.api import CorpusReader
9
+ from nltk.corpus.reader.util import (
10
+ StreamBackedCorpusView,
11
+ concat,
12
+ read_alignedsent_block,
13
+ )
14
+ from nltk.tokenize import RegexpTokenizer, WhitespaceTokenizer
15
+ from nltk.translate import AlignedSent, Alignment
16
+
17
+
18
+ class AlignedCorpusReader(CorpusReader):
19
+ """
20
+ Reader for corpora of word-aligned sentences. Tokens are assumed
21
+ to be separated by whitespace. Sentences begin on separate lines.
22
+ """
23
+
24
+ def __init__(
25
+ self,
26
+ root,
27
+ fileids,
28
+ sep="/",
29
+ word_tokenizer=WhitespaceTokenizer(),
30
+ sent_tokenizer=RegexpTokenizer("\n", gaps=True),
31
+ alignedsent_block_reader=read_alignedsent_block,
32
+ encoding="latin1",
33
+ ):
34
+ """
35
+ Construct a new Aligned Corpus reader for a set of documents
36
+ located at the given root directory. Example usage:
37
+
38
+ >>> root = '/...path to corpus.../'
39
+ >>> reader = AlignedCorpusReader(root, '.*', '.txt') # doctest: +SKIP
40
+
41
+ :param root: The root directory for this corpus.
42
+ :param fileids: A list or regexp specifying the fileids in this corpus.
43
+ """
44
+ CorpusReader.__init__(self, root, fileids, encoding)
45
+ self._sep = sep
46
+ self._word_tokenizer = word_tokenizer
47
+ self._sent_tokenizer = sent_tokenizer
48
+ self._alignedsent_block_reader = alignedsent_block_reader
49
+
50
+ def words(self, fileids=None):
51
+ """
52
+ :return: the given file(s) as a list of words
53
+ and punctuation symbols.
54
+ :rtype: list(str)
55
+ """
56
+ return concat(
57
+ [
58
+ AlignedSentCorpusView(
59
+ fileid,
60
+ enc,
61
+ False,
62
+ False,
63
+ self._word_tokenizer,
64
+ self._sent_tokenizer,
65
+ self._alignedsent_block_reader,
66
+ )
67
+ for (fileid, enc) in self.abspaths(fileids, True)
68
+ ]
69
+ )
70
+
71
+ def sents(self, fileids=None):
72
+ """
73
+ :return: the given file(s) as a list of
74
+ sentences or utterances, each encoded as a list of word
75
+ strings.
76
+ :rtype: list(list(str))
77
+ """
78
+ return concat(
79
+ [
80
+ AlignedSentCorpusView(
81
+ fileid,
82
+ enc,
83
+ False,
84
+ True,
85
+ self._word_tokenizer,
86
+ self._sent_tokenizer,
87
+ self._alignedsent_block_reader,
88
+ )
89
+ for (fileid, enc) in self.abspaths(fileids, True)
90
+ ]
91
+ )
92
+
93
+ def aligned_sents(self, fileids=None):
94
+ """
95
+ :return: the given file(s) as a list of AlignedSent objects.
96
+ :rtype: list(AlignedSent)
97
+ """
98
+ return concat(
99
+ [
100
+ AlignedSentCorpusView(
101
+ fileid,
102
+ enc,
103
+ True,
104
+ True,
105
+ self._word_tokenizer,
106
+ self._sent_tokenizer,
107
+ self._alignedsent_block_reader,
108
+ )
109
+ for (fileid, enc) in self.abspaths(fileids, True)
110
+ ]
111
+ )
112
+
113
+
114
+ class AlignedSentCorpusView(StreamBackedCorpusView):
115
+ """
116
+ A specialized corpus view for aligned sentences.
117
+ ``AlignedSentCorpusView`` objects are typically created by
118
+ ``AlignedCorpusReader`` (not directly by nltk users).
119
+ """
120
+
121
+ def __init__(
122
+ self,
123
+ corpus_file,
124
+ encoding,
125
+ aligned,
126
+ group_by_sent,
127
+ word_tokenizer,
128
+ sent_tokenizer,
129
+ alignedsent_block_reader,
130
+ ):
131
+ self._aligned = aligned
132
+ self._group_by_sent = group_by_sent
133
+ self._word_tokenizer = word_tokenizer
134
+ self._sent_tokenizer = sent_tokenizer
135
+ self._alignedsent_block_reader = alignedsent_block_reader
136
+ StreamBackedCorpusView.__init__(self, corpus_file, encoding=encoding)
137
+
138
+ def read_block(self, stream):
139
+ block = [
140
+ self._word_tokenizer.tokenize(sent_str)
141
+ for alignedsent_str in self._alignedsent_block_reader(stream)
142
+ for sent_str in self._sent_tokenizer.tokenize(alignedsent_str)
143
+ ]
144
+ if self._aligned:
145
+ block[2] = Alignment.fromstring(
146
+ " ".join(block[2])
147
+ ) # kludge; we shouldn't have tokenized the alignment string
148
+ block = [AlignedSent(*block)]
149
+ elif self._group_by_sent:
150
+ block = [block[0]]
151
+ else:
152
+ block = block[0]
153
+
154
+ return block
llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/chasen.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (C) 2001-2023 NLTK Project
3
+ # Author: Masato Hagiwara <[email protected]>
4
+ # URL: <https://www.nltk.org/>
5
+ # For license information, see LICENSE.TXT
6
+
7
+ import sys
8
+
9
+ from nltk.corpus.reader import util
10
+ from nltk.corpus.reader.api import *
11
+ from nltk.corpus.reader.util import *
12
+
13
+
14
+ class ChasenCorpusReader(CorpusReader):
15
+ def __init__(self, root, fileids, encoding="utf8", sent_splitter=None):
16
+ self._sent_splitter = sent_splitter
17
+ CorpusReader.__init__(self, root, fileids, encoding)
18
+
19
+ def words(self, fileids=None):
20
+ return concat(
21
+ [
22
+ ChasenCorpusView(fileid, enc, False, False, False, self._sent_splitter)
23
+ for (fileid, enc) in self.abspaths(fileids, True)
24
+ ]
25
+ )
26
+
27
+ def tagged_words(self, fileids=None):
28
+ return concat(
29
+ [
30
+ ChasenCorpusView(fileid, enc, True, False, False, self._sent_splitter)
31
+ for (fileid, enc) in self.abspaths(fileids, True)
32
+ ]
33
+ )
34
+
35
+ def sents(self, fileids=None):
36
+ return concat(
37
+ [
38
+ ChasenCorpusView(fileid, enc, False, True, False, self._sent_splitter)
39
+ for (fileid, enc) in self.abspaths(fileids, True)
40
+ ]
41
+ )
42
+
43
+ def tagged_sents(self, fileids=None):
44
+ return concat(
45
+ [
46
+ ChasenCorpusView(fileid, enc, True, True, False, self._sent_splitter)
47
+ for (fileid, enc) in self.abspaths(fileids, True)
48
+ ]
49
+ )
50
+
51
+ def paras(self, fileids=None):
52
+ return concat(
53
+ [
54
+ ChasenCorpusView(fileid, enc, False, True, True, self._sent_splitter)
55
+ for (fileid, enc) in self.abspaths(fileids, True)
56
+ ]
57
+ )
58
+
59
+ def tagged_paras(self, fileids=None):
60
+ return concat(
61
+ [
62
+ ChasenCorpusView(fileid, enc, True, True, True, self._sent_splitter)
63
+ for (fileid, enc) in self.abspaths(fileids, True)
64
+ ]
65
+ )
66
+
67
+
68
+ class ChasenCorpusView(StreamBackedCorpusView):
69
+ """
70
+ A specialized corpus view for ChasenReader. Similar to ``TaggedCorpusView``,
71
+ but this'll use fixed sets of word and sentence tokenizer.
72
+ """
73
+
74
+ def __init__(
75
+ self,
76
+ corpus_file,
77
+ encoding,
78
+ tagged,
79
+ group_by_sent,
80
+ group_by_para,
81
+ sent_splitter=None,
82
+ ):
83
+ self._tagged = tagged
84
+ self._group_by_sent = group_by_sent
85
+ self._group_by_para = group_by_para
86
+ self._sent_splitter = sent_splitter
87
+ StreamBackedCorpusView.__init__(self, corpus_file, encoding=encoding)
88
+
89
+ def read_block(self, stream):
90
+ """Reads one paragraph at a time."""
91
+ block = []
92
+ for para_str in read_regexp_block(stream, r".", r"^EOS\n"):
93
+
94
+ para = []
95
+
96
+ sent = []
97
+ for line in para_str.splitlines():
98
+
99
+ _eos = line.strip() == "EOS"
100
+ _cells = line.split("\t")
101
+ w = (_cells[0], "\t".join(_cells[1:]))
102
+ if not _eos:
103
+ sent.append(w)
104
+
105
+ if _eos or (self._sent_splitter and self._sent_splitter(w)):
106
+ if not self._tagged:
107
+ sent = [w for (w, t) in sent]
108
+ if self._group_by_sent:
109
+ para.append(sent)
110
+ else:
111
+ para.extend(sent)
112
+ sent = []
113
+
114
+ if len(sent) > 0:
115
+ if not self._tagged:
116
+ sent = [w for (w, t) in sent]
117
+
118
+ if self._group_by_sent:
119
+ para.append(sent)
120
+ else:
121
+ para.extend(sent)
122
+
123
+ if self._group_by_para:
124
+ block.append(para)
125
+ else:
126
+ block.extend(para)
127
+
128
+ return block
129
+
130
+
131
+ def demo():
132
+
133
+ import nltk
134
+ from nltk.corpus.util import LazyCorpusLoader
135
+
136
+ jeita = LazyCorpusLoader("jeita", ChasenCorpusReader, r".*chasen", encoding="utf-8")
137
+ print("/".join(jeita.words()[22100:22140]))
138
+
139
+ print(
140
+ "\nEOS\n".join(
141
+ "\n".join("{}/{}".format(w[0], w[1].split("\t")[2]) for w in sent)
142
+ for sent in jeita.tagged_sents()[2170:2173]
143
+ )
144
+ )
145
+
146
+
147
+ def test():
148
+
149
+ from nltk.corpus.util import LazyCorpusLoader
150
+
151
+ jeita = LazyCorpusLoader("jeita", ChasenCorpusReader, r".*chasen", encoding="utf-8")
152
+
153
+ assert isinstance(jeita.tagged_words()[0][1], str)
154
+
155
+
156
+ if __name__ == "__main__":
157
+ demo()
158
+ test()
llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/chunked.py ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Chunked Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ A reader for corpora that contain chunked (and optionally tagged)
11
+ documents.
12
+ """
13
+
14
+ import codecs
15
+ import os.path
16
+
17
+ import nltk
18
+ from nltk.chunk import tagstr2tree
19
+ from nltk.corpus.reader.api import *
20
+ from nltk.corpus.reader.bracket_parse import BracketParseCorpusReader
21
+ from nltk.corpus.reader.util import *
22
+ from nltk.tokenize import *
23
+ from nltk.tree import Tree
24
+
25
+
26
+ class ChunkedCorpusReader(CorpusReader):
27
+ """
28
+ Reader for chunked (and optionally tagged) corpora. Paragraphs
29
+ are split using a block reader. They are then tokenized into
30
+ sentences using a sentence tokenizer. Finally, these sentences
31
+ are parsed into chunk trees using a string-to-chunktree conversion
32
+ function. Each of these steps can be performed using a default
33
+ function or a custom function. By default, paragraphs are split
34
+ on blank lines; sentences are listed one per line; and sentences
35
+ are parsed into chunk trees using ``nltk.chunk.tagstr2tree``.
36
+ """
37
+
38
+ def __init__(
39
+ self,
40
+ root,
41
+ fileids,
42
+ extension="",
43
+ str2chunktree=tagstr2tree,
44
+ sent_tokenizer=RegexpTokenizer("\n", gaps=True),
45
+ para_block_reader=read_blankline_block,
46
+ encoding="utf8",
47
+ tagset=None,
48
+ ):
49
+ """
50
+ :param root: The root directory for this corpus.
51
+ :param fileids: A list or regexp specifying the fileids in this corpus.
52
+ """
53
+ CorpusReader.__init__(self, root, fileids, encoding)
54
+ self._cv_args = (str2chunktree, sent_tokenizer, para_block_reader, tagset)
55
+ """Arguments for corpus views generated by this corpus: a tuple
56
+ (str2chunktree, sent_tokenizer, para_block_tokenizer)"""
57
+
58
+ def words(self, fileids=None):
59
+ """
60
+ :return: the given file(s) as a list of words
61
+ and punctuation symbols.
62
+ :rtype: list(str)
63
+ """
64
+ return concat(
65
+ [
66
+ ChunkedCorpusView(f, enc, 0, 0, 0, 0, *self._cv_args)
67
+ for (f, enc) in self.abspaths(fileids, True)
68
+ ]
69
+ )
70
+
71
+ def sents(self, fileids=None):
72
+ """
73
+ :return: the given file(s) as a list of
74
+ sentences or utterances, each encoded as a list of word
75
+ strings.
76
+ :rtype: list(list(str))
77
+ """
78
+ return concat(
79
+ [
80
+ ChunkedCorpusView(f, enc, 0, 1, 0, 0, *self._cv_args)
81
+ for (f, enc) in self.abspaths(fileids, True)
82
+ ]
83
+ )
84
+
85
+ def paras(self, fileids=None):
86
+ """
87
+ :return: the given file(s) as a list of
88
+ paragraphs, each encoded as a list of sentences, which are
89
+ in turn encoded as lists of word strings.
90
+ :rtype: list(list(list(str)))
91
+ """
92
+ return concat(
93
+ [
94
+ ChunkedCorpusView(f, enc, 0, 1, 1, 0, *self._cv_args)
95
+ for (f, enc) in self.abspaths(fileids, True)
96
+ ]
97
+ )
98
+
99
+ def tagged_words(self, fileids=None, tagset=None):
100
+ """
101
+ :return: the given file(s) as a list of tagged
102
+ words and punctuation symbols, encoded as tuples
103
+ ``(word,tag)``.
104
+ :rtype: list(tuple(str,str))
105
+ """
106
+ return concat(
107
+ [
108
+ ChunkedCorpusView(
109
+ f, enc, 1, 0, 0, 0, *self._cv_args, target_tagset=tagset
110
+ )
111
+ for (f, enc) in self.abspaths(fileids, True)
112
+ ]
113
+ )
114
+
115
+ def tagged_sents(self, fileids=None, tagset=None):
116
+ """
117
+ :return: the given file(s) as a list of
118
+ sentences, each encoded as a list of ``(word,tag)`` tuples.
119
+
120
+ :rtype: list(list(tuple(str,str)))
121
+ """
122
+ return concat(
123
+ [
124
+ ChunkedCorpusView(
125
+ f, enc, 1, 1, 0, 0, *self._cv_args, target_tagset=tagset
126
+ )
127
+ for (f, enc) in self.abspaths(fileids, True)
128
+ ]
129
+ )
130
+
131
+ def tagged_paras(self, fileids=None, tagset=None):
132
+ """
133
+ :return: the given file(s) as a list of
134
+ paragraphs, each encoded as a list of sentences, which are
135
+ in turn encoded as lists of ``(word,tag)`` tuples.
136
+ :rtype: list(list(list(tuple(str,str))))
137
+ """
138
+ return concat(
139
+ [
140
+ ChunkedCorpusView(
141
+ f, enc, 1, 1, 1, 0, *self._cv_args, target_tagset=tagset
142
+ )
143
+ for (f, enc) in self.abspaths(fileids, True)
144
+ ]
145
+ )
146
+
147
+ def chunked_words(self, fileids=None, tagset=None):
148
+ """
149
+ :return: the given file(s) as a list of tagged
150
+ words and chunks. Words are encoded as ``(word, tag)``
151
+ tuples (if the corpus has tags) or word strings (if the
152
+ corpus has no tags). Chunks are encoded as depth-one
153
+ trees over ``(word,tag)`` tuples or word strings.
154
+ :rtype: list(tuple(str,str) and Tree)
155
+ """
156
+ return concat(
157
+ [
158
+ ChunkedCorpusView(
159
+ f, enc, 1, 0, 0, 1, *self._cv_args, target_tagset=tagset
160
+ )
161
+ for (f, enc) in self.abspaths(fileids, True)
162
+ ]
163
+ )
164
+
165
+ def chunked_sents(self, fileids=None, tagset=None):
166
+ """
167
+ :return: the given file(s) as a list of
168
+ sentences, each encoded as a shallow Tree. The leaves
169
+ of these trees are encoded as ``(word, tag)`` tuples (if
170
+ the corpus has tags) or word strings (if the corpus has no
171
+ tags).
172
+ :rtype: list(Tree)
173
+ """
174
+ return concat(
175
+ [
176
+ ChunkedCorpusView(
177
+ f, enc, 1, 1, 0, 1, *self._cv_args, target_tagset=tagset
178
+ )
179
+ for (f, enc) in self.abspaths(fileids, True)
180
+ ]
181
+ )
182
+
183
+ def chunked_paras(self, fileids=None, tagset=None):
184
+ """
185
+ :return: the given file(s) as a list of
186
+ paragraphs, each encoded as a list of sentences, which are
187
+ in turn encoded as a shallow Tree. The leaves of these
188
+ trees are encoded as ``(word, tag)`` tuples (if the corpus
189
+ has tags) or word strings (if the corpus has no tags).
190
+ :rtype: list(list(Tree))
191
+ """
192
+ return concat(
193
+ [
194
+ ChunkedCorpusView(
195
+ f, enc, 1, 1, 1, 1, *self._cv_args, target_tagset=tagset
196
+ )
197
+ for (f, enc) in self.abspaths(fileids, True)
198
+ ]
199
+ )
200
+
201
+ def _read_block(self, stream):
202
+ return [tagstr2tree(t) for t in read_blankline_block(stream)]
203
+
204
+
205
+ class ChunkedCorpusView(StreamBackedCorpusView):
206
+ def __init__(
207
+ self,
208
+ fileid,
209
+ encoding,
210
+ tagged,
211
+ group_by_sent,
212
+ group_by_para,
213
+ chunked,
214
+ str2chunktree,
215
+ sent_tokenizer,
216
+ para_block_reader,
217
+ source_tagset=None,
218
+ target_tagset=None,
219
+ ):
220
+ StreamBackedCorpusView.__init__(self, fileid, encoding=encoding)
221
+ self._tagged = tagged
222
+ self._group_by_sent = group_by_sent
223
+ self._group_by_para = group_by_para
224
+ self._chunked = chunked
225
+ self._str2chunktree = str2chunktree
226
+ self._sent_tokenizer = sent_tokenizer
227
+ self._para_block_reader = para_block_reader
228
+ self._source_tagset = source_tagset
229
+ self._target_tagset = target_tagset
230
+
231
+ def read_block(self, stream):
232
+ block = []
233
+ for para_str in self._para_block_reader(stream):
234
+ para = []
235
+ for sent_str in self._sent_tokenizer.tokenize(para_str):
236
+ sent = self._str2chunktree(
237
+ sent_str,
238
+ source_tagset=self._source_tagset,
239
+ target_tagset=self._target_tagset,
240
+ )
241
+
242
+ # If requested, throw away the tags.
243
+ if not self._tagged:
244
+ sent = self._untag(sent)
245
+
246
+ # If requested, throw away the chunks.
247
+ if not self._chunked:
248
+ sent = sent.leaves()
249
+
250
+ # Add the sentence to `para`.
251
+ if self._group_by_sent:
252
+ para.append(sent)
253
+ else:
254
+ para.extend(sent)
255
+
256
+ # Add the paragraph to `block`.
257
+ if self._group_by_para:
258
+ block.append(para)
259
+ else:
260
+ block.extend(para)
261
+
262
+ # Return the block
263
+ return block
264
+
265
+ def _untag(self, tree):
266
+ for i, child in enumerate(tree):
267
+ if isinstance(child, Tree):
268
+ self._untag(child)
269
+ elif isinstance(child, tuple):
270
+ tree[i] = child[0]
271
+ else:
272
+ raise ValueError("expected child to be Tree or tuple")
273
+ return tree
llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/cmudict.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Carnegie Mellon Pronouncing Dictionary Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ The Carnegie Mellon Pronouncing Dictionary [cmudict.0.6]
10
+ ftp://ftp.cs.cmu.edu/project/speech/dict/
11
+ Copyright 1998 Carnegie Mellon University
12
+
13
+ File Format: Each line consists of an uppercased word, a counter
14
+ (for alternative pronunciations), and a transcription. Vowels are
15
+ marked for stress (1=primary, 2=secondary, 0=no stress). E.g.:
16
+ NATURAL 1 N AE1 CH ER0 AH0 L
17
+
18
+ The dictionary contains 127069 entries. Of these, 119400 words are assigned
19
+ a unique pronunciation, 6830 words have two pronunciations, and 839 words have
20
+ three or more pronunciations. Many of these are fast-speech variants.
21
+
22
+ Phonemes: There are 39 phonemes, as shown below:
23
+
24
+ Phoneme Example Translation Phoneme Example Translation
25
+ ------- ------- ----------- ------- ------- -----------
26
+ AA odd AA D AE at AE T
27
+ AH hut HH AH T AO ought AO T
28
+ AW cow K AW AY hide HH AY D
29
+ B be B IY CH cheese CH IY Z
30
+ D dee D IY DH thee DH IY
31
+ EH Ed EH D ER hurt HH ER T
32
+ EY ate EY T F fee F IY
33
+ G green G R IY N HH he HH IY
34
+ IH it IH T IY eat IY T
35
+ JH gee JH IY K key K IY
36
+ L lee L IY M me M IY
37
+ N knee N IY NG ping P IH NG
38
+ OW oat OW T OY toy T OY
39
+ P pee P IY R read R IY D
40
+ S sea S IY SH she SH IY
41
+ T tea T IY TH theta TH EY T AH
42
+ UH hood HH UH D UW two T UW
43
+ V vee V IY W we W IY
44
+ Y yield Y IY L D Z zee Z IY
45
+ ZH seizure S IY ZH ER
46
+ """
47
+
48
+ from nltk.corpus.reader.api import *
49
+ from nltk.corpus.reader.util import *
50
+ from nltk.util import Index
51
+
52
+
53
+ class CMUDictCorpusReader(CorpusReader):
54
+ def entries(self):
55
+ """
56
+ :return: the cmudict lexicon as a list of entries
57
+ containing (word, transcriptions) tuples.
58
+ """
59
+ return concat(
60
+ [
61
+ StreamBackedCorpusView(fileid, read_cmudict_block, encoding=enc)
62
+ for fileid, enc in self.abspaths(None, True)
63
+ ]
64
+ )
65
+
66
+ def words(self):
67
+ """
68
+ :return: a list of all words defined in the cmudict lexicon.
69
+ """
70
+ return [word.lower() for (word, _) in self.entries()]
71
+
72
+ def dict(self):
73
+ """
74
+ :return: the cmudict lexicon as a dictionary, whose keys are
75
+ lowercase words and whose values are lists of pronunciations.
76
+ """
77
+ return dict(Index(self.entries()))
78
+
79
+
80
+ def read_cmudict_block(stream):
81
+ entries = []
82
+ while len(entries) < 100: # Read 100 at a time.
83
+ line = stream.readline()
84
+ if line == "":
85
+ return entries # end of file.
86
+ pieces = line.split()
87
+ entries.append((pieces[0].lower(), pieces[2:]))
88
+ return entries
llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/conll.py ADDED
@@ -0,0 +1,579 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: CONLL Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ Read CoNLL-style chunk fileids.
11
+ """
12
+
13
+ import textwrap
14
+
15
+ from nltk.corpus.reader.api import *
16
+ from nltk.corpus.reader.util import *
17
+ from nltk.tag import map_tag
18
+ from nltk.tree import Tree
19
+ from nltk.util import LazyConcatenation, LazyMap
20
+
21
+
22
+ class ConllCorpusReader(CorpusReader):
23
+ """
24
+ A corpus reader for CoNLL-style files. These files consist of a
25
+ series of sentences, separated by blank lines. Each sentence is
26
+ encoded using a table (or "grid") of values, where each line
27
+ corresponds to a single word, and each column corresponds to an
28
+ annotation type. The set of columns used by CoNLL-style files can
29
+ vary from corpus to corpus; the ``ConllCorpusReader`` constructor
30
+ therefore takes an argument, ``columntypes``, which is used to
31
+ specify the columns that are used by a given corpus. By default
32
+ columns are split by consecutive whitespaces, with the
33
+ ``separator`` argument you can set a string to split by (e.g.
34
+ ``\'\t\'``).
35
+
36
+
37
+ @todo: Add support for reading from corpora where different
38
+ parallel files contain different columns.
39
+ @todo: Possibly add caching of the grid corpus view? This would
40
+ allow the same grid view to be used by different data access
41
+ methods (eg words() and parsed_sents() could both share the
42
+ same grid corpus view object).
43
+ @todo: Better support for -DOCSTART-. Currently, we just ignore
44
+ it, but it could be used to define methods that retrieve a
45
+ document at a time (eg parsed_documents()).
46
+ """
47
+
48
+ # /////////////////////////////////////////////////////////////////
49
+ # Column Types
50
+ # /////////////////////////////////////////////////////////////////
51
+
52
+ WORDS = "words" #: column type for words
53
+ POS = "pos" #: column type for part-of-speech tags
54
+ TREE = "tree" #: column type for parse trees
55
+ CHUNK = "chunk" #: column type for chunk structures
56
+ NE = "ne" #: column type for named entities
57
+ SRL = "srl" #: column type for semantic role labels
58
+ IGNORE = "ignore" #: column type for column that should be ignored
59
+
60
+ #: A list of all column types supported by the conll corpus reader.
61
+ COLUMN_TYPES = (WORDS, POS, TREE, CHUNK, NE, SRL, IGNORE)
62
+
63
+ # /////////////////////////////////////////////////////////////////
64
+ # Constructor
65
+ # /////////////////////////////////////////////////////////////////
66
+
67
+ def __init__(
68
+ self,
69
+ root,
70
+ fileids,
71
+ columntypes,
72
+ chunk_types=None,
73
+ root_label="S",
74
+ pos_in_tree=False,
75
+ srl_includes_roleset=True,
76
+ encoding="utf8",
77
+ tree_class=Tree,
78
+ tagset=None,
79
+ separator=None,
80
+ ):
81
+ for columntype in columntypes:
82
+ if columntype not in self.COLUMN_TYPES:
83
+ raise ValueError("Bad column type %r" % columntype)
84
+ if isinstance(chunk_types, str):
85
+ chunk_types = [chunk_types]
86
+ self._chunk_types = chunk_types
87
+ self._colmap = {c: i for (i, c) in enumerate(columntypes)}
88
+ self._pos_in_tree = pos_in_tree
89
+ self._root_label = root_label # for chunks
90
+ self._srl_includes_roleset = srl_includes_roleset
91
+ self._tree_class = tree_class
92
+ CorpusReader.__init__(self, root, fileids, encoding)
93
+ self._tagset = tagset
94
+ self.sep = separator
95
+
96
+ # /////////////////////////////////////////////////////////////////
97
+ # Data Access Methods
98
+ # /////////////////////////////////////////////////////////////////
99
+
100
+ def words(self, fileids=None):
101
+ self._require(self.WORDS)
102
+ return LazyConcatenation(LazyMap(self._get_words, self._grids(fileids)))
103
+
104
+ def sents(self, fileids=None):
105
+ self._require(self.WORDS)
106
+ return LazyMap(self._get_words, self._grids(fileids))
107
+
108
+ def tagged_words(self, fileids=None, tagset=None):
109
+ self._require(self.WORDS, self.POS)
110
+
111
+ def get_tagged_words(grid):
112
+ return self._get_tagged_words(grid, tagset)
113
+
114
+ return LazyConcatenation(LazyMap(get_tagged_words, self._grids(fileids)))
115
+
116
+ def tagged_sents(self, fileids=None, tagset=None):
117
+ self._require(self.WORDS, self.POS)
118
+
119
+ def get_tagged_words(grid):
120
+ return self._get_tagged_words(grid, tagset)
121
+
122
+ return LazyMap(get_tagged_words, self._grids(fileids))
123
+
124
+ def chunked_words(self, fileids=None, chunk_types=None, tagset=None):
125
+ self._require(self.WORDS, self.POS, self.CHUNK)
126
+ if chunk_types is None:
127
+ chunk_types = self._chunk_types
128
+
129
+ def get_chunked_words(grid): # capture chunk_types as local var
130
+ return self._get_chunked_words(grid, chunk_types, tagset)
131
+
132
+ return LazyConcatenation(LazyMap(get_chunked_words, self._grids(fileids)))
133
+
134
+ def chunked_sents(self, fileids=None, chunk_types=None, tagset=None):
135
+ self._require(self.WORDS, self.POS, self.CHUNK)
136
+ if chunk_types is None:
137
+ chunk_types = self._chunk_types
138
+
139
+ def get_chunked_words(grid): # capture chunk_types as local var
140
+ return self._get_chunked_words(grid, chunk_types, tagset)
141
+
142
+ return LazyMap(get_chunked_words, self._grids(fileids))
143
+
144
+ def parsed_sents(self, fileids=None, pos_in_tree=None, tagset=None):
145
+ self._require(self.WORDS, self.POS, self.TREE)
146
+ if pos_in_tree is None:
147
+ pos_in_tree = self._pos_in_tree
148
+
149
+ def get_parsed_sent(grid): # capture pos_in_tree as local var
150
+ return self._get_parsed_sent(grid, pos_in_tree, tagset)
151
+
152
+ return LazyMap(get_parsed_sent, self._grids(fileids))
153
+
154
+ def srl_spans(self, fileids=None):
155
+ self._require(self.SRL)
156
+ return LazyMap(self._get_srl_spans, self._grids(fileids))
157
+
158
+ def srl_instances(self, fileids=None, pos_in_tree=None, flatten=True):
159
+ self._require(self.WORDS, self.POS, self.TREE, self.SRL)
160
+ if pos_in_tree is None:
161
+ pos_in_tree = self._pos_in_tree
162
+
163
+ def get_srl_instances(grid): # capture pos_in_tree as local var
164
+ return self._get_srl_instances(grid, pos_in_tree)
165
+
166
+ result = LazyMap(get_srl_instances, self._grids(fileids))
167
+ if flatten:
168
+ result = LazyConcatenation(result)
169
+ return result
170
+
171
+ def iob_words(self, fileids=None, tagset=None):
172
+ """
173
+ :return: a list of word/tag/IOB tuples
174
+ :rtype: list(tuple)
175
+ :param fileids: the list of fileids that make up this corpus
176
+ :type fileids: None or str or list
177
+ """
178
+ self._require(self.WORDS, self.POS, self.CHUNK)
179
+
180
+ def get_iob_words(grid):
181
+ return self._get_iob_words(grid, tagset)
182
+
183
+ return LazyConcatenation(LazyMap(get_iob_words, self._grids(fileids)))
184
+
185
+ def iob_sents(self, fileids=None, tagset=None):
186
+ """
187
+ :return: a list of lists of word/tag/IOB tuples
188
+ :rtype: list(list)
189
+ :param fileids: the list of fileids that make up this corpus
190
+ :type fileids: None or str or list
191
+ """
192
+ self._require(self.WORDS, self.POS, self.CHUNK)
193
+
194
+ def get_iob_words(grid):
195
+ return self._get_iob_words(grid, tagset)
196
+
197
+ return LazyMap(get_iob_words, self._grids(fileids))
198
+
199
+ # /////////////////////////////////////////////////////////////////
200
+ # Grid Reading
201
+ # /////////////////////////////////////////////////////////////////
202
+
203
+ def _grids(self, fileids=None):
204
+ # n.b.: we could cache the object returned here (keyed on
205
+ # fileids), which would let us reuse the same corpus view for
206
+ # different things (eg srl and parse trees).
207
+ return concat(
208
+ [
209
+ StreamBackedCorpusView(fileid, self._read_grid_block, encoding=enc)
210
+ for (fileid, enc) in self.abspaths(fileids, True)
211
+ ]
212
+ )
213
+
214
+ def _read_grid_block(self, stream):
215
+ grids = []
216
+ for block in read_blankline_block(stream):
217
+ block = block.strip()
218
+ if not block:
219
+ continue
220
+
221
+ grid = [line.split(self.sep) for line in block.split("\n")]
222
+
223
+ # If there's a docstart row, then discard. ([xx] eventually it
224
+ # would be good to actually use it)
225
+ if grid[0][self._colmap.get("words", 0)] == "-DOCSTART-":
226
+ del grid[0]
227
+
228
+ # Check that the grid is consistent.
229
+ for row in grid:
230
+ if len(row) != len(grid[0]):
231
+ raise ValueError("Inconsistent number of columns:\n%s" % block)
232
+ grids.append(grid)
233
+ return grids
234
+
235
+ # /////////////////////////////////////////////////////////////////
236
+ # Transforms
237
+ # /////////////////////////////////////////////////////////////////
238
+ # given a grid, transform it into some representation (e.g.,
239
+ # a list of words or a parse tree).
240
+
241
+ def _get_words(self, grid):
242
+ return self._get_column(grid, self._colmap["words"])
243
+
244
+ def _get_tagged_words(self, grid, tagset=None):
245
+ pos_tags = self._get_column(grid, self._colmap["pos"])
246
+ if tagset and tagset != self._tagset:
247
+ pos_tags = [map_tag(self._tagset, tagset, t) for t in pos_tags]
248
+ return list(zip(self._get_column(grid, self._colmap["words"]), pos_tags))
249
+
250
+ def _get_iob_words(self, grid, tagset=None):
251
+ pos_tags = self._get_column(grid, self._colmap["pos"])
252
+ if tagset and tagset != self._tagset:
253
+ pos_tags = [map_tag(self._tagset, tagset, t) for t in pos_tags]
254
+ return list(
255
+ zip(
256
+ self._get_column(grid, self._colmap["words"]),
257
+ pos_tags,
258
+ self._get_column(grid, self._colmap["chunk"]),
259
+ )
260
+ )
261
+
262
+ def _get_chunked_words(self, grid, chunk_types, tagset=None):
263
+ # n.b.: this method is very similar to conllstr2tree.
264
+ words = self._get_column(grid, self._colmap["words"])
265
+ pos_tags = self._get_column(grid, self._colmap["pos"])
266
+ if tagset and tagset != self._tagset:
267
+ pos_tags = [map_tag(self._tagset, tagset, t) for t in pos_tags]
268
+ chunk_tags = self._get_column(grid, self._colmap["chunk"])
269
+
270
+ stack = [Tree(self._root_label, [])]
271
+
272
+ for (word, pos_tag, chunk_tag) in zip(words, pos_tags, chunk_tags):
273
+ if chunk_tag == "O":
274
+ state, chunk_type = "O", ""
275
+ else:
276
+ (state, chunk_type) = chunk_tag.split("-")
277
+ # If it's a chunk we don't care about, treat it as O.
278
+ if chunk_types is not None and chunk_type not in chunk_types:
279
+ state = "O"
280
+ # Treat a mismatching I like a B.
281
+ if state == "I" and chunk_type != stack[-1].label():
282
+ state = "B"
283
+ # For B or I: close any open chunks
284
+ if state in "BO" and len(stack) == 2:
285
+ stack.pop()
286
+ # For B: start a new chunk.
287
+ if state == "B":
288
+ new_chunk = Tree(chunk_type, [])
289
+ stack[-1].append(new_chunk)
290
+ stack.append(new_chunk)
291
+ # Add the word token.
292
+ stack[-1].append((word, pos_tag))
293
+
294
+ return stack[0]
295
+
296
+ def _get_parsed_sent(self, grid, pos_in_tree, tagset=None):
297
+ words = self._get_column(grid, self._colmap["words"])
298
+ pos_tags = self._get_column(grid, self._colmap["pos"])
299
+ if tagset and tagset != self._tagset:
300
+ pos_tags = [map_tag(self._tagset, tagset, t) for t in pos_tags]
301
+ parse_tags = self._get_column(grid, self._colmap["tree"])
302
+
303
+ treestr = ""
304
+ for (word, pos_tag, parse_tag) in zip(words, pos_tags, parse_tags):
305
+ if word == "(":
306
+ word = "-LRB-"
307
+ if word == ")":
308
+ word = "-RRB-"
309
+ if pos_tag == "(":
310
+ pos_tag = "-LRB-"
311
+ if pos_tag == ")":
312
+ pos_tag = "-RRB-"
313
+ (left, right) = parse_tag.split("*")
314
+ right = right.count(")") * ")" # only keep ')'.
315
+ treestr += f"{left} ({pos_tag} {word}) {right}"
316
+ try:
317
+ tree = self._tree_class.fromstring(treestr)
318
+ except (ValueError, IndexError):
319
+ tree = self._tree_class.fromstring(f"({self._root_label} {treestr})")
320
+
321
+ if not pos_in_tree:
322
+ for subtree in tree.subtrees():
323
+ for i, child in enumerate(subtree):
324
+ if (
325
+ isinstance(child, Tree)
326
+ and len(child) == 1
327
+ and isinstance(child[0], str)
328
+ ):
329
+ subtree[i] = (child[0], child.label())
330
+
331
+ return tree
332
+
333
+ def _get_srl_spans(self, grid):
334
+ """
335
+ list of list of (start, end), tag) tuples
336
+ """
337
+ if self._srl_includes_roleset:
338
+ predicates = self._get_column(grid, self._colmap["srl"] + 1)
339
+ start_col = self._colmap["srl"] + 2
340
+ else:
341
+ predicates = self._get_column(grid, self._colmap["srl"])
342
+ start_col = self._colmap["srl"] + 1
343
+
344
+ # Count how many predicates there are. This tells us how many
345
+ # columns to expect for SRL data.
346
+ num_preds = len([p for p in predicates if p != "-"])
347
+
348
+ spanlists = []
349
+ for i in range(num_preds):
350
+ col = self._get_column(grid, start_col + i)
351
+ spanlist = []
352
+ stack = []
353
+ for wordnum, srl_tag in enumerate(col):
354
+ (left, right) = srl_tag.split("*")
355
+ for tag in left.split("("):
356
+ if tag:
357
+ stack.append((tag, wordnum))
358
+ for i in range(right.count(")")):
359
+ (tag, start) = stack.pop()
360
+ spanlist.append(((start, wordnum + 1), tag))
361
+ spanlists.append(spanlist)
362
+
363
+ return spanlists
364
+
365
+ def _get_srl_instances(self, grid, pos_in_tree):
366
+ tree = self._get_parsed_sent(grid, pos_in_tree)
367
+ spanlists = self._get_srl_spans(grid)
368
+ if self._srl_includes_roleset:
369
+ predicates = self._get_column(grid, self._colmap["srl"] + 1)
370
+ rolesets = self._get_column(grid, self._colmap["srl"])
371
+ else:
372
+ predicates = self._get_column(grid, self._colmap["srl"])
373
+ rolesets = [None] * len(predicates)
374
+
375
+ instances = ConllSRLInstanceList(tree)
376
+ for wordnum, predicate in enumerate(predicates):
377
+ if predicate == "-":
378
+ continue
379
+ # Decide which spanlist to use. Don't assume that they're
380
+ # sorted in the same order as the predicates (even though
381
+ # they usually are).
382
+ for spanlist in spanlists:
383
+ for (start, end), tag in spanlist:
384
+ if wordnum in range(start, end) and tag in ("V", "C-V"):
385
+ break
386
+ else:
387
+ continue
388
+ break
389
+ else:
390
+ raise ValueError("No srl column found for %r" % predicate)
391
+ instances.append(
392
+ ConllSRLInstance(tree, wordnum, predicate, rolesets[wordnum], spanlist)
393
+ )
394
+
395
+ return instances
396
+
397
+ # /////////////////////////////////////////////////////////////////
398
+ # Helper Methods
399
+ # /////////////////////////////////////////////////////////////////
400
+
401
+ def _require(self, *columntypes):
402
+ for columntype in columntypes:
403
+ if columntype not in self._colmap:
404
+ raise ValueError(
405
+ "This corpus does not contain a %s " "column." % columntype
406
+ )
407
+
408
+ @staticmethod
409
+ def _get_column(grid, column_index):
410
+ return [grid[i][column_index] for i in range(len(grid))]
411
+
412
+
413
+ class ConllSRLInstance:
414
+ """
415
+ An SRL instance from a CoNLL corpus, which identifies and
416
+ providing labels for the arguments of a single verb.
417
+ """
418
+
419
+ # [xx] add inst.core_arguments, inst.argm_arguments?
420
+
421
+ def __init__(self, tree, verb_head, verb_stem, roleset, tagged_spans):
422
+ self.verb = []
423
+ """A list of the word indices of the words that compose the
424
+ verb whose arguments are identified by this instance.
425
+ This will contain multiple word indices when multi-word
426
+ verbs are used (e.g. 'turn on')."""
427
+
428
+ self.verb_head = verb_head
429
+ """The word index of the head word of the verb whose arguments
430
+ are identified by this instance. E.g., for a sentence that
431
+ uses the verb 'turn on,' ``verb_head`` will be the word index
432
+ of the word 'turn'."""
433
+
434
+ self.verb_stem = verb_stem
435
+
436
+ self.roleset = roleset
437
+
438
+ self.arguments = []
439
+ """A list of ``(argspan, argid)`` tuples, specifying the location
440
+ and type for each of the arguments identified by this
441
+ instance. ``argspan`` is a tuple ``start, end``, indicating
442
+ that the argument consists of the ``words[start:end]``."""
443
+
444
+ self.tagged_spans = tagged_spans
445
+ """A list of ``(span, id)`` tuples, specifying the location and
446
+ type for each of the arguments, as well as the verb pieces,
447
+ that make up this instance."""
448
+
449
+ self.tree = tree
450
+ """The parse tree for the sentence containing this instance."""
451
+
452
+ self.words = tree.leaves()
453
+ """A list of the words in the sentence containing this
454
+ instance."""
455
+
456
+ # Fill in the self.verb and self.arguments values.
457
+ for (start, end), tag in tagged_spans:
458
+ if tag in ("V", "C-V"):
459
+ self.verb += list(range(start, end))
460
+ else:
461
+ self.arguments.append(((start, end), tag))
462
+
463
+ def __repr__(self):
464
+ # Originally, its:
465
+ ##plural = 's' if len(self.arguments) != 1 else ''
466
+ plural = "s" if len(self.arguments) != 1 else ""
467
+ return "<ConllSRLInstance for %r with %d argument%s>" % (
468
+ (self.verb_stem, len(self.arguments), plural)
469
+ )
470
+
471
+ def pprint(self):
472
+ verbstr = " ".join(self.words[i][0] for i in self.verb)
473
+ hdr = f"SRL for {verbstr!r} (stem={self.verb_stem!r}):\n"
474
+ s = ""
475
+ for i, word in enumerate(self.words):
476
+ if isinstance(word, tuple):
477
+ word = word[0]
478
+ for (start, end), argid in self.arguments:
479
+ if i == start:
480
+ s += "[%s " % argid
481
+ if i == end:
482
+ s += "] "
483
+ if i in self.verb:
484
+ word = "<<%s>>" % word
485
+ s += word + " "
486
+ return hdr + textwrap.fill(
487
+ s.replace(" ]", "]"), initial_indent=" ", subsequent_indent=" "
488
+ )
489
+
490
+
491
+ class ConllSRLInstanceList(list):
492
+ """
493
+ Set of instances for a single sentence
494
+ """
495
+
496
+ def __init__(self, tree, instances=()):
497
+ self.tree = tree
498
+ list.__init__(self, instances)
499
+
500
+ def __str__(self):
501
+ return self.pprint()
502
+
503
+ def pprint(self, include_tree=False):
504
+ # Sanity check: trees should be the same
505
+ for inst in self:
506
+ if inst.tree != self.tree:
507
+ raise ValueError("Tree mismatch!")
508
+
509
+ # If desired, add trees:
510
+ if include_tree:
511
+ words = self.tree.leaves()
512
+ pos = [None] * len(words)
513
+ synt = ["*"] * len(words)
514
+ self._tree2conll(self.tree, 0, words, pos, synt)
515
+
516
+ s = ""
517
+ for i in range(len(words)):
518
+ # optional tree columns
519
+ if include_tree:
520
+ s += "%-20s " % words[i]
521
+ s += "%-8s " % pos[i]
522
+ s += "%15s*%-8s " % tuple(synt[i].split("*"))
523
+
524
+ # verb head column
525
+ for inst in self:
526
+ if i == inst.verb_head:
527
+ s += "%-20s " % inst.verb_stem
528
+ break
529
+ else:
530
+ s += "%-20s " % "-"
531
+ # Remaining columns: self
532
+ for inst in self:
533
+ argstr = "*"
534
+ for (start, end), argid in inst.tagged_spans:
535
+ if i == start:
536
+ argstr = f"({argid}{argstr}"
537
+ if i == (end - 1):
538
+ argstr += ")"
539
+ s += "%-12s " % argstr
540
+ s += "\n"
541
+ return s
542
+
543
+ def _tree2conll(self, tree, wordnum, words, pos, synt):
544
+ assert isinstance(tree, Tree)
545
+ if len(tree) == 1 and isinstance(tree[0], str):
546
+ pos[wordnum] = tree.label()
547
+ assert words[wordnum] == tree[0]
548
+ return wordnum + 1
549
+ elif len(tree) == 1 and isinstance(tree[0], tuple):
550
+ assert len(tree[0]) == 2
551
+ pos[wordnum], pos[wordnum] = tree[0]
552
+ return wordnum + 1
553
+ else:
554
+ synt[wordnum] = f"({tree.label()}{synt[wordnum]}"
555
+ for child in tree:
556
+ wordnum = self._tree2conll(child, wordnum, words, pos, synt)
557
+ synt[wordnum - 1] += ")"
558
+ return wordnum
559
+
560
+
561
+ class ConllChunkCorpusReader(ConllCorpusReader):
562
+ """
563
+ A ConllCorpusReader whose data file contains three columns: words,
564
+ pos, and chunk.
565
+ """
566
+
567
+ def __init__(
568
+ self, root, fileids, chunk_types, encoding="utf8", tagset=None, separator=None
569
+ ):
570
+ ConllCorpusReader.__init__(
571
+ self,
572
+ root,
573
+ fileids,
574
+ ("words", "pos", "chunk"),
575
+ chunk_types=chunk_types,
576
+ encoding=encoding,
577
+ tagset=tagset,
578
+ separator=separator,
579
+ )
llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/crubadan.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: An Crubadan N-grams Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Avital Pekker <[email protected]>
5
+ #
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ An NLTK interface for the n-gram statistics gathered from
11
+ the corpora for each language using An Crubadan.
12
+
13
+ There are multiple potential applications for the data but
14
+ this reader was created with the goal of using it in the
15
+ context of language identification.
16
+
17
+ For details about An Crubadan, this data, and its potential uses, see:
18
+ http://borel.slu.edu/crubadan/index.html
19
+ """
20
+
21
+ import re
22
+ from os import path
23
+
24
+ from nltk.corpus.reader import CorpusReader
25
+ from nltk.data import ZipFilePathPointer
26
+ from nltk.probability import FreqDist
27
+
28
+
29
+ class CrubadanCorpusReader(CorpusReader):
30
+ """
31
+ A corpus reader used to access language An Crubadan n-gram files.
32
+ """
33
+
34
+ _LANG_MAPPER_FILE = "table.txt"
35
+ _all_lang_freq = {}
36
+
37
+ def __init__(self, root, fileids, encoding="utf8", tagset=None):
38
+ super().__init__(root, fileids, encoding="utf8")
39
+ self._lang_mapping_data = []
40
+ self._load_lang_mapping_data()
41
+
42
+ def lang_freq(self, lang):
43
+ """Return n-gram FreqDist for a specific language
44
+ given ISO 639-3 language code"""
45
+
46
+ if lang not in self._all_lang_freq:
47
+ self._all_lang_freq[lang] = self._load_lang_ngrams(lang)
48
+
49
+ return self._all_lang_freq[lang]
50
+
51
+ def langs(self):
52
+ """Return a list of supported languages as ISO 639-3 codes"""
53
+ return [row[1] for row in self._lang_mapping_data]
54
+
55
+ def iso_to_crubadan(self, lang):
56
+ """Return internal Crubadan code based on ISO 639-3 code"""
57
+ for i in self._lang_mapping_data:
58
+ if i[1].lower() == lang.lower():
59
+ return i[0]
60
+
61
+ def crubadan_to_iso(self, lang):
62
+ """Return ISO 639-3 code given internal Crubadan code"""
63
+ for i in self._lang_mapping_data:
64
+ if i[0].lower() == lang.lower():
65
+ return i[1]
66
+
67
+ def _load_lang_mapping_data(self):
68
+ """Load language mappings between codes and description from table.txt"""
69
+ if isinstance(self.root, ZipFilePathPointer):
70
+ raise RuntimeError(
71
+ "Please install the 'crubadan' corpus first, use nltk.download()"
72
+ )
73
+
74
+ mapper_file = path.join(self.root, self._LANG_MAPPER_FILE)
75
+ if self._LANG_MAPPER_FILE not in self.fileids():
76
+ raise RuntimeError("Could not find language mapper file: " + mapper_file)
77
+
78
+ with open(mapper_file, encoding="utf-8") as raw:
79
+ strip_raw = raw.read().strip()
80
+
81
+ self._lang_mapping_data = [row.split("\t") for row in strip_raw.split("\n")]
82
+
83
+ def _load_lang_ngrams(self, lang):
84
+ """Load single n-gram language file given the ISO 639-3 language code
85
+ and return its FreqDist"""
86
+
87
+ if lang not in self.langs():
88
+ raise RuntimeError("Unsupported language.")
89
+
90
+ crubadan_code = self.iso_to_crubadan(lang)
91
+ ngram_file = path.join(self.root, crubadan_code + "-3grams.txt")
92
+
93
+ if not path.isfile(ngram_file):
94
+ raise RuntimeError("No N-gram file found for requested language.")
95
+
96
+ counts = FreqDist()
97
+ with open(ngram_file, encoding="utf-8") as f:
98
+ for line in f:
99
+ data = line.split(" ")
100
+
101
+ ngram = data[1].strip("\n")
102
+ freq = int(data[0])
103
+
104
+ counts[ngram] = freq
105
+
106
+ return counts
llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/dependency.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Dependency Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Kepa Sarasola <[email protected]>
5
+ # Iker Manterola <[email protected]>
6
+ #
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+ from nltk.corpus.reader.api import *
11
+ from nltk.corpus.reader.util import *
12
+ from nltk.parse import DependencyGraph
13
+ from nltk.tokenize import *
14
+
15
+
16
+ class DependencyCorpusReader(SyntaxCorpusReader):
17
+ def __init__(
18
+ self,
19
+ root,
20
+ fileids,
21
+ encoding="utf8",
22
+ word_tokenizer=TabTokenizer(),
23
+ sent_tokenizer=RegexpTokenizer("\n", gaps=True),
24
+ para_block_reader=read_blankline_block,
25
+ ):
26
+ SyntaxCorpusReader.__init__(self, root, fileids, encoding)
27
+
28
+ #########################################################
29
+
30
+ def words(self, fileids=None):
31
+ return concat(
32
+ [
33
+ DependencyCorpusView(fileid, False, False, False, encoding=enc)
34
+ for fileid, enc in self.abspaths(fileids, include_encoding=True)
35
+ ]
36
+ )
37
+
38
+ def tagged_words(self, fileids=None):
39
+ return concat(
40
+ [
41
+ DependencyCorpusView(fileid, True, False, False, encoding=enc)
42
+ for fileid, enc in self.abspaths(fileids, include_encoding=True)
43
+ ]
44
+ )
45
+
46
+ def sents(self, fileids=None):
47
+ return concat(
48
+ [
49
+ DependencyCorpusView(fileid, False, True, False, encoding=enc)
50
+ for fileid, enc in self.abspaths(fileids, include_encoding=True)
51
+ ]
52
+ )
53
+
54
+ def tagged_sents(self, fileids=None):
55
+ return concat(
56
+ [
57
+ DependencyCorpusView(fileid, True, True, False, encoding=enc)
58
+ for fileid, enc in self.abspaths(fileids, include_encoding=True)
59
+ ]
60
+ )
61
+
62
+ def parsed_sents(self, fileids=None):
63
+ sents = concat(
64
+ [
65
+ DependencyCorpusView(fileid, False, True, True, encoding=enc)
66
+ for fileid, enc in self.abspaths(fileids, include_encoding=True)
67
+ ]
68
+ )
69
+ return [DependencyGraph(sent) for sent in sents]
70
+
71
+
72
+ class DependencyCorpusView(StreamBackedCorpusView):
73
+ _DOCSTART = "-DOCSTART- -DOCSTART- O\n" # dokumentu hasiera definitzen da
74
+
75
+ def __init__(
76
+ self,
77
+ corpus_file,
78
+ tagged,
79
+ group_by_sent,
80
+ dependencies,
81
+ chunk_types=None,
82
+ encoding="utf8",
83
+ ):
84
+ self._tagged = tagged
85
+ self._dependencies = dependencies
86
+ self._group_by_sent = group_by_sent
87
+ self._chunk_types = chunk_types
88
+ StreamBackedCorpusView.__init__(self, corpus_file, encoding=encoding)
89
+
90
+ def read_block(self, stream):
91
+ # Read the next sentence.
92
+ sent = read_blankline_block(stream)[0].strip()
93
+ # Strip off the docstart marker, if present.
94
+ if sent.startswith(self._DOCSTART):
95
+ sent = sent[len(self._DOCSTART) :].lstrip()
96
+
97
+ # extract word and tag from any of the formats
98
+ if not self._dependencies:
99
+ lines = [line.split("\t") for line in sent.split("\n")]
100
+ if len(lines[0]) == 3 or len(lines[0]) == 4:
101
+ sent = [(line[0], line[1]) for line in lines]
102
+ elif len(lines[0]) == 10:
103
+ sent = [(line[1], line[4]) for line in lines]
104
+ else:
105
+ raise ValueError("Unexpected number of fields in dependency tree file")
106
+
107
+ # discard tags if they weren't requested
108
+ if not self._tagged:
109
+ sent = [word for (word, tag) in sent]
110
+
111
+ # Return the result.
112
+ if self._group_by_sent:
113
+ return [sent]
114
+ else:
115
+ return list(sent)
llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/framenet.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/ieer.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: IEER Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ Corpus reader for the Information Extraction and Entity Recognition Corpus.
11
+
12
+ NIST 1999 Information Extraction: Entity Recognition Evaluation
13
+ https://www.itl.nist.gov/iad/894.01/tests/ie-er/er_99/er_99.htm
14
+
15
+ This corpus contains the NEWSWIRE development test data for the
16
+ NIST 1999 IE-ER Evaluation. The files were taken from the
17
+ subdirectory: ``/ie_er_99/english/devtest/newswire/*.ref.nwt``
18
+ and filenames were shortened.
19
+
20
+ The corpus contains the following files: APW_19980314, APW_19980424,
21
+ APW_19980429, NYT_19980315, NYT_19980403, and NYT_19980407.
22
+ """
23
+
24
+ import nltk
25
+ from nltk.corpus.reader.api import *
26
+
27
+ #: A dictionary whose keys are the names of documents in this corpus;
28
+ #: and whose values are descriptions of those documents' contents.
29
+ titles = {
30
+ "APW_19980314": "Associated Press Weekly, 14 March 1998",
31
+ "APW_19980424": "Associated Press Weekly, 24 April 1998",
32
+ "APW_19980429": "Associated Press Weekly, 29 April 1998",
33
+ "NYT_19980315": "New York Times, 15 March 1998",
34
+ "NYT_19980403": "New York Times, 3 April 1998",
35
+ "NYT_19980407": "New York Times, 7 April 1998",
36
+ }
37
+
38
+ #: A list of all documents in this corpus.
39
+ documents = sorted(titles)
40
+
41
+
42
+ class IEERDocument:
43
+ def __init__(self, text, docno=None, doctype=None, date_time=None, headline=""):
44
+ self.text = text
45
+ self.docno = docno
46
+ self.doctype = doctype
47
+ self.date_time = date_time
48
+ self.headline = headline
49
+
50
+ def __repr__(self):
51
+ if self.headline:
52
+ headline = " ".join(self.headline.leaves())
53
+ else:
54
+ headline = (
55
+ " ".join([w for w in self.text.leaves() if w[:1] != "<"][:12]) + "..."
56
+ )
57
+ if self.docno is not None:
58
+ return f"<IEERDocument {self.docno}: {headline!r}>"
59
+ else:
60
+ return "<IEERDocument: %r>" % headline
61
+
62
+
63
+ class IEERCorpusReader(CorpusReader):
64
+ """ """
65
+
66
+ def docs(self, fileids=None):
67
+ return concat(
68
+ [
69
+ StreamBackedCorpusView(fileid, self._read_block, encoding=enc)
70
+ for (fileid, enc) in self.abspaths(fileids, True)
71
+ ]
72
+ )
73
+
74
+ def parsed_docs(self, fileids=None):
75
+ return concat(
76
+ [
77
+ StreamBackedCorpusView(fileid, self._read_parsed_block, encoding=enc)
78
+ for (fileid, enc) in self.abspaths(fileids, True)
79
+ ]
80
+ )
81
+
82
+ def _read_parsed_block(self, stream):
83
+ # TODO: figure out while empty documents are being returned
84
+ return [
85
+ self._parse(doc)
86
+ for doc in self._read_block(stream)
87
+ if self._parse(doc).docno is not None
88
+ ]
89
+
90
+ def _parse(self, doc):
91
+ val = nltk.chunk.ieerstr2tree(doc, root_label="DOCUMENT")
92
+ if isinstance(val, dict):
93
+ return IEERDocument(**val)
94
+ else:
95
+ return IEERDocument(val)
96
+
97
+ def _read_block(self, stream):
98
+ out = []
99
+ # Skip any preamble.
100
+ while True:
101
+ line = stream.readline()
102
+ if not line:
103
+ break
104
+ if line.strip() == "<DOC>":
105
+ break
106
+ out.append(line)
107
+ # Read the document
108
+ while True:
109
+ line = stream.readline()
110
+ if not line:
111
+ break
112
+ out.append(line)
113
+ if line.strip() == "</DOC>":
114
+ break
115
+ # Return the document
116
+ return ["\n".join(out)]
llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/ipipan.py ADDED
@@ -0,0 +1,356 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: IPI PAN Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Konrad Goluchowski <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ import functools
9
+
10
+ from nltk.corpus.reader.api import CorpusReader
11
+ from nltk.corpus.reader.util import StreamBackedCorpusView, concat
12
+
13
+
14
+ def _parse_args(fun):
15
+ @functools.wraps(fun)
16
+ def decorator(self, fileids=None, **kwargs):
17
+ kwargs.pop("tags", None)
18
+ if not fileids:
19
+ fileids = self.fileids()
20
+ return fun(self, fileids, **kwargs)
21
+
22
+ return decorator
23
+
24
+
25
+ class IPIPANCorpusReader(CorpusReader):
26
+ """
27
+ Corpus reader designed to work with corpus created by IPI PAN.
28
+ See http://korpus.pl/en/ for more details about IPI PAN corpus.
29
+
30
+ The corpus includes information about text domain, channel and categories.
31
+ You can access possible values using ``domains()``, ``channels()`` and
32
+ ``categories()``. You can use also this metadata to filter files, e.g.:
33
+ ``fileids(channel='prasa')``, ``fileids(categories='publicystyczny')``.
34
+
35
+ The reader supports methods: words, sents, paras and their tagged versions.
36
+ You can get part of speech instead of full tag by giving "simplify_tags=True"
37
+ parameter, e.g.: ``tagged_sents(simplify_tags=True)``.
38
+
39
+ Also you can get all tags disambiguated tags specifying parameter
40
+ "one_tag=False", e.g.: ``tagged_paras(one_tag=False)``.
41
+
42
+ You can get all tags that were assigned by a morphological analyzer specifying
43
+ parameter "disamb_only=False", e.g. ``tagged_words(disamb_only=False)``.
44
+
45
+ The IPIPAN Corpus contains tags indicating if there is a space between two
46
+ tokens. To add special "no space" markers, you should specify parameter
47
+ "append_no_space=True", e.g. ``tagged_words(append_no_space=True)``.
48
+ As a result in place where there should be no space between two tokens new
49
+ pair ('', 'no-space') will be inserted (for tagged data) and just '' for
50
+ methods without tags.
51
+
52
+ The corpus reader can also try to append spaces between words. To enable this
53
+ option, specify parameter "append_space=True", e.g. ``words(append_space=True)``.
54
+ As a result either ' ' or (' ', 'space') will be inserted between tokens.
55
+
56
+ By default, xml entities like &quot; and &amp; are replaced by corresponding
57
+ characters. You can turn off this feature, specifying parameter
58
+ "replace_xmlentities=False", e.g. ``words(replace_xmlentities=False)``.
59
+ """
60
+
61
+ def __init__(self, root, fileids):
62
+ CorpusReader.__init__(self, root, fileids, None, None)
63
+
64
+ def channels(self, fileids=None):
65
+ if not fileids:
66
+ fileids = self.fileids()
67
+ return self._parse_header(fileids, "channel")
68
+
69
+ def domains(self, fileids=None):
70
+ if not fileids:
71
+ fileids = self.fileids()
72
+ return self._parse_header(fileids, "domain")
73
+
74
+ def categories(self, fileids=None):
75
+ if not fileids:
76
+ fileids = self.fileids()
77
+ return [
78
+ self._map_category(cat) for cat in self._parse_header(fileids, "keyTerm")
79
+ ]
80
+
81
+ def fileids(self, channels=None, domains=None, categories=None):
82
+ if channels is not None and domains is not None and categories is not None:
83
+ raise ValueError(
84
+ "You can specify only one of channels, domains "
85
+ "and categories parameter at once"
86
+ )
87
+ if channels is None and domains is None and categories is None:
88
+ return CorpusReader.fileids(self)
89
+ if isinstance(channels, str):
90
+ channels = [channels]
91
+ if isinstance(domains, str):
92
+ domains = [domains]
93
+ if isinstance(categories, str):
94
+ categories = [categories]
95
+ if channels:
96
+ return self._list_morph_files_by("channel", channels)
97
+ elif domains:
98
+ return self._list_morph_files_by("domain", domains)
99
+ else:
100
+ return self._list_morph_files_by(
101
+ "keyTerm", categories, map=self._map_category
102
+ )
103
+
104
+ @_parse_args
105
+ def sents(self, fileids=None, **kwargs):
106
+ return concat(
107
+ [
108
+ self._view(
109
+ fileid, mode=IPIPANCorpusView.SENTS_MODE, tags=False, **kwargs
110
+ )
111
+ for fileid in self._list_morph_files(fileids)
112
+ ]
113
+ )
114
+
115
+ @_parse_args
116
+ def paras(self, fileids=None, **kwargs):
117
+ return concat(
118
+ [
119
+ self._view(
120
+ fileid, mode=IPIPANCorpusView.PARAS_MODE, tags=False, **kwargs
121
+ )
122
+ for fileid in self._list_morph_files(fileids)
123
+ ]
124
+ )
125
+
126
+ @_parse_args
127
+ def words(self, fileids=None, **kwargs):
128
+ return concat(
129
+ [
130
+ self._view(fileid, tags=False, **kwargs)
131
+ for fileid in self._list_morph_files(fileids)
132
+ ]
133
+ )
134
+
135
+ @_parse_args
136
+ def tagged_sents(self, fileids=None, **kwargs):
137
+ return concat(
138
+ [
139
+ self._view(fileid, mode=IPIPANCorpusView.SENTS_MODE, **kwargs)
140
+ for fileid in self._list_morph_files(fileids)
141
+ ]
142
+ )
143
+
144
+ @_parse_args
145
+ def tagged_paras(self, fileids=None, **kwargs):
146
+ return concat(
147
+ [
148
+ self._view(fileid, mode=IPIPANCorpusView.PARAS_MODE, **kwargs)
149
+ for fileid in self._list_morph_files(fileids)
150
+ ]
151
+ )
152
+
153
+ @_parse_args
154
+ def tagged_words(self, fileids=None, **kwargs):
155
+ return concat(
156
+ [self._view(fileid, **kwargs) for fileid in self._list_morph_files(fileids)]
157
+ )
158
+
159
+ def _list_morph_files(self, fileids):
160
+ return [f for f in self.abspaths(fileids)]
161
+
162
+ def _list_header_files(self, fileids):
163
+ return [
164
+ f.replace("morph.xml", "header.xml")
165
+ for f in self._list_morph_files(fileids)
166
+ ]
167
+
168
+ def _parse_header(self, fileids, tag):
169
+ values = set()
170
+ for f in self._list_header_files(fileids):
171
+ values_list = self._get_tag(f, tag)
172
+ for v in values_list:
173
+ values.add(v)
174
+ return list(values)
175
+
176
+ def _list_morph_files_by(self, tag, values, map=None):
177
+ fileids = self.fileids()
178
+ ret_fileids = set()
179
+ for f in fileids:
180
+ fp = self.abspath(f).replace("morph.xml", "header.xml")
181
+ values_list = self._get_tag(fp, tag)
182
+ for value in values_list:
183
+ if map is not None:
184
+ value = map(value)
185
+ if value in values:
186
+ ret_fileids.add(f)
187
+ return list(ret_fileids)
188
+
189
+ def _get_tag(self, f, tag):
190
+ tags = []
191
+ with open(f) as infile:
192
+ header = infile.read()
193
+ tag_end = 0
194
+ while True:
195
+ tag_pos = header.find("<" + tag, tag_end)
196
+ if tag_pos < 0:
197
+ return tags
198
+ tag_end = header.find("</" + tag + ">", tag_pos)
199
+ tags.append(header[tag_pos + len(tag) + 2 : tag_end])
200
+
201
+ def _map_category(self, cat):
202
+ pos = cat.find(">")
203
+ if pos == -1:
204
+ return cat
205
+ else:
206
+ return cat[pos + 1 :]
207
+
208
+ def _view(self, filename, **kwargs):
209
+ tags = kwargs.pop("tags", True)
210
+ mode = kwargs.pop("mode", 0)
211
+ simplify_tags = kwargs.pop("simplify_tags", False)
212
+ one_tag = kwargs.pop("one_tag", True)
213
+ disamb_only = kwargs.pop("disamb_only", True)
214
+ append_no_space = kwargs.pop("append_no_space", False)
215
+ append_space = kwargs.pop("append_space", False)
216
+ replace_xmlentities = kwargs.pop("replace_xmlentities", True)
217
+
218
+ if len(kwargs) > 0:
219
+ raise ValueError("Unexpected arguments: %s" % kwargs.keys())
220
+ if not one_tag and not disamb_only:
221
+ raise ValueError(
222
+ "You cannot specify both one_tag=False and " "disamb_only=False"
223
+ )
224
+ if not tags and (simplify_tags or not one_tag or not disamb_only):
225
+ raise ValueError(
226
+ "You cannot specify simplify_tags, one_tag or "
227
+ "disamb_only with functions other than tagged_*"
228
+ )
229
+
230
+ return IPIPANCorpusView(
231
+ filename,
232
+ tags=tags,
233
+ mode=mode,
234
+ simplify_tags=simplify_tags,
235
+ one_tag=one_tag,
236
+ disamb_only=disamb_only,
237
+ append_no_space=append_no_space,
238
+ append_space=append_space,
239
+ replace_xmlentities=replace_xmlentities,
240
+ )
241
+
242
+
243
+ class IPIPANCorpusView(StreamBackedCorpusView):
244
+
245
+ WORDS_MODE = 0
246
+ SENTS_MODE = 1
247
+ PARAS_MODE = 2
248
+
249
+ def __init__(self, filename, startpos=0, **kwargs):
250
+ StreamBackedCorpusView.__init__(self, filename, None, startpos, None)
251
+ self.in_sentence = False
252
+ self.position = 0
253
+
254
+ self.show_tags = kwargs.pop("tags", True)
255
+ self.disamb_only = kwargs.pop("disamb_only", True)
256
+ self.mode = kwargs.pop("mode", IPIPANCorpusView.WORDS_MODE)
257
+ self.simplify_tags = kwargs.pop("simplify_tags", False)
258
+ self.one_tag = kwargs.pop("one_tag", True)
259
+ self.append_no_space = kwargs.pop("append_no_space", False)
260
+ self.append_space = kwargs.pop("append_space", False)
261
+ self.replace_xmlentities = kwargs.pop("replace_xmlentities", True)
262
+
263
+ def read_block(self, stream):
264
+ sentence = []
265
+ sentences = []
266
+ space = False
267
+ no_space = False
268
+
269
+ tags = set()
270
+
271
+ lines = self._read_data(stream)
272
+
273
+ while True:
274
+
275
+ # we may have only part of last line
276
+ if len(lines) <= 1:
277
+ self._seek(stream)
278
+ lines = self._read_data(stream)
279
+
280
+ if lines == [""]:
281
+ assert not sentences
282
+ return []
283
+
284
+ line = lines.pop()
285
+ self.position += len(line) + 1
286
+
287
+ if line.startswith('<chunk type="s"'):
288
+ self.in_sentence = True
289
+ elif line.startswith('<chunk type="p"'):
290
+ pass
291
+ elif line.startswith("<tok"):
292
+ if self.append_space and space and not no_space:
293
+ self._append_space(sentence)
294
+ space = True
295
+ no_space = False
296
+ orth = ""
297
+ tags = set()
298
+ elif line.startswith("</chunk"):
299
+ if self.in_sentence:
300
+ self.in_sentence = False
301
+ self._seek(stream)
302
+ if self.mode == self.SENTS_MODE:
303
+ return [sentence]
304
+ elif self.mode == self.WORDS_MODE:
305
+ if self.append_space:
306
+ self._append_space(sentence)
307
+ return sentence
308
+ else:
309
+ sentences.append(sentence)
310
+ elif self.mode == self.PARAS_MODE:
311
+ self._seek(stream)
312
+ return [sentences]
313
+ elif line.startswith("<orth"):
314
+ orth = line[6:-7]
315
+ if self.replace_xmlentities:
316
+ orth = orth.replace("&quot;", '"').replace("&amp;", "&")
317
+ elif line.startswith("<lex"):
318
+ if not self.disamb_only or line.find("disamb=") != -1:
319
+ tag = line[line.index("<ctag") + 6 : line.index("</ctag")]
320
+ tags.add(tag)
321
+ elif line.startswith("</tok"):
322
+ if self.show_tags:
323
+ if self.simplify_tags:
324
+ tags = [t.split(":")[0] for t in tags]
325
+ if not self.one_tag or not self.disamb_only:
326
+ sentence.append((orth, tuple(tags)))
327
+ else:
328
+ sentence.append((orth, tags.pop()))
329
+ else:
330
+ sentence.append(orth)
331
+ elif line.startswith("<ns/>"):
332
+ if self.append_space:
333
+ no_space = True
334
+ if self.append_no_space:
335
+ if self.show_tags:
336
+ sentence.append(("", "no-space"))
337
+ else:
338
+ sentence.append("")
339
+ elif line.startswith("</cesAna"):
340
+ pass
341
+
342
+ def _read_data(self, stream):
343
+ self.position = stream.tell()
344
+ buff = stream.read(4096)
345
+ lines = buff.split("\n")
346
+ lines.reverse()
347
+ return lines
348
+
349
+ def _seek(self, stream):
350
+ stream.seek(self.position)
351
+
352
+ def _append_space(self, sentence):
353
+ if self.show_tags:
354
+ sentence.append((" ", "space"))
355
+ else:
356
+ sentence.append(" ")
llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/knbc.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #! /usr/bin/env python
2
+ # KNB Corpus reader
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Masato Hagiwara <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ # For more information, see http://lilyx.net/pages/nltkjapanesecorpus.html
9
+
10
+ import re
11
+
12
+ from nltk.corpus.reader.api import CorpusReader, SyntaxCorpusReader
13
+ from nltk.corpus.reader.util import (
14
+ FileSystemPathPointer,
15
+ find_corpus_fileids,
16
+ read_blankline_block,
17
+ )
18
+ from nltk.parse import DependencyGraph
19
+
20
+ # default function to convert morphlist to str for tree representation
21
+ _morphs2str_default = lambda morphs: "/".join(m[0] for m in morphs if m[0] != "EOS")
22
+
23
+
24
+ class KNBCorpusReader(SyntaxCorpusReader):
25
+ """
26
+ This class implements:
27
+ - ``__init__``, which specifies the location of the corpus
28
+ and a method for detecting the sentence blocks in corpus files.
29
+ - ``_read_block``, which reads a block from the input stream.
30
+ - ``_word``, which takes a block and returns a list of list of words.
31
+ - ``_tag``, which takes a block and returns a list of list of tagged
32
+ words.
33
+ - ``_parse``, which takes a block and returns a list of parsed
34
+ sentences.
35
+
36
+ The structure of tagged words:
37
+ tagged_word = (word(str), tags(tuple))
38
+ tags = (surface, reading, lemma, pos1, posid1, pos2, posid2, pos3, posid3, others ...)
39
+
40
+ Usage example
41
+
42
+ >>> from nltk.corpus.util import LazyCorpusLoader
43
+ >>> knbc = LazyCorpusLoader(
44
+ ... 'knbc/corpus1',
45
+ ... KNBCorpusReader,
46
+ ... r'.*/KN.*',
47
+ ... encoding='euc-jp',
48
+ ... )
49
+
50
+ >>> len(knbc.sents()[0])
51
+ 9
52
+
53
+ """
54
+
55
+ def __init__(self, root, fileids, encoding="utf8", morphs2str=_morphs2str_default):
56
+ """
57
+ Initialize KNBCorpusReader
58
+ morphs2str is a function to convert morphlist to str for tree representation
59
+ for _parse()
60
+ """
61
+ SyntaxCorpusReader.__init__(self, root, fileids, encoding)
62
+ self.morphs2str = morphs2str
63
+
64
+ def _read_block(self, stream):
65
+ # blocks are split by blankline (or EOF) - default
66
+ return read_blankline_block(stream)
67
+
68
+ def _word(self, t):
69
+ res = []
70
+ for line in t.splitlines():
71
+ # ignore the Bunsets headers
72
+ if not re.match(r"EOS|\*|\#|\+", line):
73
+ cells = line.strip().split(" ")
74
+ res.append(cells[0])
75
+
76
+ return res
77
+
78
+ # ignores tagset argument
79
+ def _tag(self, t, tagset=None):
80
+ res = []
81
+ for line in t.splitlines():
82
+ # ignore the Bunsets headers
83
+ if not re.match(r"EOS|\*|\#|\+", line):
84
+ cells = line.strip().split(" ")
85
+ # convert cells to morph tuples
86
+ res.append((cells[0], " ".join(cells[1:])))
87
+
88
+ return res
89
+
90
+ def _parse(self, t):
91
+ dg = DependencyGraph()
92
+ i = 0
93
+ for line in t.splitlines():
94
+ if line[0] in "*+":
95
+ # start of bunsetsu or tag
96
+
97
+ cells = line.strip().split(" ", 3)
98
+ m = re.match(r"([\-0-9]*)([ADIP])", cells[1])
99
+
100
+ assert m is not None
101
+
102
+ node = dg.nodes[i]
103
+ node.update({"address": i, "rel": m.group(2), "word": []})
104
+
105
+ dep_parent = int(m.group(1))
106
+
107
+ if dep_parent == -1:
108
+ dg.root = node
109
+ else:
110
+ dg.nodes[dep_parent]["deps"].append(i)
111
+
112
+ i += 1
113
+ elif line[0] != "#":
114
+ # normal morph
115
+ cells = line.strip().split(" ")
116
+ # convert cells to morph tuples
117
+ morph = cells[0], " ".join(cells[1:])
118
+ dg.nodes[i - 1]["word"].append(morph)
119
+
120
+ if self.morphs2str:
121
+ for node in dg.nodes.values():
122
+ node["word"] = self.morphs2str(node["word"])
123
+
124
+ return dg.tree()
125
+
126
+
127
+ ######################################################################
128
+ # Demo
129
+ ######################################################################
130
+
131
+
132
+ def demo():
133
+
134
+ import nltk
135
+ from nltk.corpus.util import LazyCorpusLoader
136
+
137
+ root = nltk.data.find("corpora/knbc/corpus1")
138
+ fileids = [
139
+ f
140
+ for f in find_corpus_fileids(FileSystemPathPointer(root), ".*")
141
+ if re.search(r"\d\-\d\-[\d]+\-[\d]+", f)
142
+ ]
143
+
144
+ def _knbc_fileids_sort(x):
145
+ cells = x.split("-")
146
+ return (cells[0], int(cells[1]), int(cells[2]), int(cells[3]))
147
+
148
+ knbc = LazyCorpusLoader(
149
+ "knbc/corpus1",
150
+ KNBCorpusReader,
151
+ sorted(fileids, key=_knbc_fileids_sort),
152
+ encoding="euc-jp",
153
+ )
154
+
155
+ print(knbc.fileids()[:10])
156
+ print("".join(knbc.words()[:100]))
157
+
158
+ print("\n\n".join(str(tree) for tree in knbc.parsed_sents()[:2]))
159
+
160
+ knbc.morphs2str = lambda morphs: "/".join(
161
+ "{}({})".format(m[0], m[1].split(" ")[2]) for m in morphs if m[0] != "EOS"
162
+ ).encode("utf-8")
163
+
164
+ print("\n\n".join("%s" % tree for tree in knbc.parsed_sents()[:2]))
165
+
166
+ print(
167
+ "\n".join(
168
+ " ".join("{}/{}".format(w[0], w[1].split(" ")[2]) for w in sent)
169
+ for sent in knbc.tagged_sents()[0:2]
170
+ )
171
+ )
172
+
173
+
174
+ def test():
175
+
176
+ from nltk.corpus.util import LazyCorpusLoader
177
+
178
+ knbc = LazyCorpusLoader(
179
+ "knbc/corpus1", KNBCorpusReader, r".*/KN.*", encoding="euc-jp"
180
+ )
181
+ assert isinstance(knbc.words()[0], str)
182
+ assert isinstance(knbc.sents()[0][0], str)
183
+ assert isinstance(knbc.tagged_words()[0], tuple)
184
+ assert isinstance(knbc.tagged_sents()[0][0], tuple)
185
+
186
+
187
+ if __name__ == "__main__":
188
+ demo()
llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/lin.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Lin's Thesaurus
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Dan Blanchard <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.txt
7
+
8
+ import re
9
+ from collections import defaultdict
10
+ from functools import reduce
11
+
12
+ from nltk.corpus.reader import CorpusReader
13
+
14
+
15
+ class LinThesaurusCorpusReader(CorpusReader):
16
+ """Wrapper for the LISP-formatted thesauruses distributed by Dekang Lin."""
17
+
18
+ # Compiled regular expression for extracting the key from the first line of each
19
+ # thesaurus entry
20
+ _key_re = re.compile(r'\("?([^"]+)"? \(desc [0-9.]+\).+')
21
+
22
+ @staticmethod
23
+ def __defaultdict_factory():
24
+ """Factory for creating defaultdict of defaultdict(dict)s"""
25
+ return defaultdict(dict)
26
+
27
+ def __init__(self, root, badscore=0.0):
28
+ """
29
+ Initialize the thesaurus.
30
+
31
+ :param root: root directory containing thesaurus LISP files
32
+ :type root: C{string}
33
+ :param badscore: the score to give to words which do not appear in each other's sets of synonyms
34
+ :type badscore: C{float}
35
+ """
36
+
37
+ super().__init__(root, r"sim[A-Z]\.lsp")
38
+ self._thesaurus = defaultdict(LinThesaurusCorpusReader.__defaultdict_factory)
39
+ self._badscore = badscore
40
+ for path, encoding, fileid in self.abspaths(
41
+ include_encoding=True, include_fileid=True
42
+ ):
43
+ with open(path) as lin_file:
44
+ first = True
45
+ for line in lin_file:
46
+ line = line.strip()
47
+ # Start of entry
48
+ if first:
49
+ key = LinThesaurusCorpusReader._key_re.sub(r"\1", line)
50
+ first = False
51
+ # End of entry
52
+ elif line == "))":
53
+ first = True
54
+ # Lines with pairs of ngrams and scores
55
+ else:
56
+ split_line = line.split("\t")
57
+ if len(split_line) == 2:
58
+ ngram, score = split_line
59
+ self._thesaurus[fileid][key][ngram.strip('"')] = float(
60
+ score
61
+ )
62
+
63
+ def similarity(self, ngram1, ngram2, fileid=None):
64
+ """
65
+ Returns the similarity score for two ngrams.
66
+
67
+ :param ngram1: first ngram to compare
68
+ :type ngram1: C{string}
69
+ :param ngram2: second ngram to compare
70
+ :type ngram2: C{string}
71
+ :param fileid: thesaurus fileid to search in. If None, search all fileids.
72
+ :type fileid: C{string}
73
+ :return: If fileid is specified, just the score for the two ngrams; otherwise,
74
+ list of tuples of fileids and scores.
75
+ """
76
+ # Entries don't contain themselves, so make sure similarity between item and itself is 1.0
77
+ if ngram1 == ngram2:
78
+ if fileid:
79
+ return 1.0
80
+ else:
81
+ return [(fid, 1.0) for fid in self._fileids]
82
+ else:
83
+ if fileid:
84
+ return (
85
+ self._thesaurus[fileid][ngram1][ngram2]
86
+ if ngram2 in self._thesaurus[fileid][ngram1]
87
+ else self._badscore
88
+ )
89
+ else:
90
+ return [
91
+ (
92
+ fid,
93
+ (
94
+ self._thesaurus[fid][ngram1][ngram2]
95
+ if ngram2 in self._thesaurus[fid][ngram1]
96
+ else self._badscore
97
+ ),
98
+ )
99
+ for fid in self._fileids
100
+ ]
101
+
102
+ def scored_synonyms(self, ngram, fileid=None):
103
+ """
104
+ Returns a list of scored synonyms (tuples of synonyms and scores) for the current ngram
105
+
106
+ :param ngram: ngram to lookup
107
+ :type ngram: C{string}
108
+ :param fileid: thesaurus fileid to search in. If None, search all fileids.
109
+ :type fileid: C{string}
110
+ :return: If fileid is specified, list of tuples of scores and synonyms; otherwise,
111
+ list of tuples of fileids and lists, where inner lists consist of tuples of
112
+ scores and synonyms.
113
+ """
114
+ if fileid:
115
+ return self._thesaurus[fileid][ngram].items()
116
+ else:
117
+ return [
118
+ (fileid, self._thesaurus[fileid][ngram].items())
119
+ for fileid in self._fileids
120
+ ]
121
+
122
+ def synonyms(self, ngram, fileid=None):
123
+ """
124
+ Returns a list of synonyms for the current ngram.
125
+
126
+ :param ngram: ngram to lookup
127
+ :type ngram: C{string}
128
+ :param fileid: thesaurus fileid to search in. If None, search all fileids.
129
+ :type fileid: C{string}
130
+ :return: If fileid is specified, list of synonyms; otherwise, list of tuples of fileids and
131
+ lists, where inner lists contain synonyms.
132
+ """
133
+ if fileid:
134
+ return self._thesaurus[fileid][ngram].keys()
135
+ else:
136
+ return [
137
+ (fileid, self._thesaurus[fileid][ngram].keys())
138
+ for fileid in self._fileids
139
+ ]
140
+
141
+ def __contains__(self, ngram):
142
+ """
143
+ Determines whether or not the given ngram is in the thesaurus.
144
+
145
+ :param ngram: ngram to lookup
146
+ :type ngram: C{string}
147
+ :return: whether the given ngram is in the thesaurus.
148
+ """
149
+ return reduce(
150
+ lambda accum, fileid: accum or (ngram in self._thesaurus[fileid]),
151
+ self._fileids,
152
+ False,
153
+ )
154
+
155
+
156
+ ######################################################################
157
+ # Demo
158
+ ######################################################################
159
+
160
+
161
+ def demo():
162
+ from nltk.corpus import lin_thesaurus as thes
163
+
164
+ word1 = "business"
165
+ word2 = "enterprise"
166
+ print("Getting synonyms for " + word1)
167
+ print(thes.synonyms(word1))
168
+
169
+ print("Getting scored synonyms for " + word1)
170
+ print(thes.scored_synonyms(word1))
171
+
172
+ print("Getting synonyms from simN.lsp (noun subsection) for " + word1)
173
+ print(thes.synonyms(word1, fileid="simN.lsp"))
174
+
175
+ print("Getting synonyms from simN.lsp (noun subsection) for " + word1)
176
+ print(thes.synonyms(word1, fileid="simN.lsp"))
177
+
178
+ print(f"Similarity score for {word1} and {word2}:")
179
+ print(thes.similarity(word1, word2))
180
+
181
+
182
+ if __name__ == "__main__":
183
+ demo()
llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/markdown.py ADDED
@@ -0,0 +1,342 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import namedtuple
2
+ from functools import partial, wraps
3
+
4
+ from nltk.corpus.reader.api import CategorizedCorpusReader
5
+ from nltk.corpus.reader.plaintext import PlaintextCorpusReader
6
+ from nltk.corpus.reader.util import concat, read_blankline_block
7
+ from nltk.tokenize import blankline_tokenize, sent_tokenize, word_tokenize
8
+
9
+
10
+ def comma_separated_string_args(func):
11
+ """
12
+ A decorator that allows a function to be called with
13
+ a single string of comma-separated values which become
14
+ individual function arguments.
15
+ """
16
+
17
+ @wraps(func)
18
+ def wrapper(*args, **kwargs):
19
+ _args = list()
20
+ for arg in args:
21
+ if isinstance(arg, str):
22
+ _args.append({part.strip() for part in arg.split(",")})
23
+ elif isinstance(arg, list):
24
+ _args.append(set(arg))
25
+ else:
26
+ _args.append(arg)
27
+ for name, value in kwargs.items():
28
+ if isinstance(value, str):
29
+ kwargs[name] = {part.strip() for part in value.split(",")}
30
+ return func(*_args, **kwargs)
31
+
32
+ return wrapper
33
+
34
+
35
+ def read_parse_blankline_block(stream, parser):
36
+ block = read_blankline_block(stream)
37
+ if block:
38
+ return [parser.render(block[0])]
39
+ return block
40
+
41
+
42
+ class MarkdownBlock:
43
+ def __init__(self, content):
44
+ self.content = content
45
+ self.truncate_at = 16
46
+
47
+ def __repr__(self):
48
+ return f"{self.__class__.__name__}(content={repr(str(self))})"
49
+
50
+ def __str__(self):
51
+ return (
52
+ f"{self.content[:self.truncate_at]}"
53
+ f"{'...' if len(self.content) > self.truncate_at else ''}"
54
+ )
55
+
56
+ @property
57
+ def raw(self):
58
+ return self.content
59
+
60
+ @property
61
+ def words(self):
62
+ return word_tokenize(self.content)
63
+
64
+ @property
65
+ def sents(self):
66
+ return [word_tokenize(sent) for sent in sent_tokenize(self.content)]
67
+
68
+ @property
69
+ def paras(self):
70
+ return [
71
+ [word_tokenize(sent) for sent in sent_tokenize(para)]
72
+ for para in blankline_tokenize(self.content)
73
+ ]
74
+
75
+
76
+ class CodeBlock(MarkdownBlock):
77
+ def __init__(self, language, *args):
78
+ self.language = language
79
+ super().__init__(*args)
80
+
81
+ @property
82
+ def sents(self):
83
+ return [word_tokenize(line) for line in self.content.splitlines()]
84
+
85
+ @property
86
+ def lines(self):
87
+ return self.content.splitlines()
88
+
89
+ @property
90
+ def paras(self):
91
+ return [
92
+ [word_tokenize(line) for line in para.splitlines()]
93
+ for para in blankline_tokenize(self.content)
94
+ ]
95
+
96
+
97
+ class MarkdownSection(MarkdownBlock):
98
+ def __init__(self, heading, level, *args):
99
+ self.heading = heading
100
+ self.level = level
101
+ super().__init__(*args)
102
+
103
+
104
+ Image = namedtuple("Image", "label, src, title")
105
+ Link = namedtuple("Link", "label, href, title")
106
+ List = namedtuple("List", "is_ordered, items")
107
+
108
+
109
+ class MarkdownCorpusReader(PlaintextCorpusReader):
110
+ def __init__(self, *args, parser=None, **kwargs):
111
+ from markdown_it import MarkdownIt
112
+ from mdit_plain.renderer import RendererPlain
113
+ from mdit_py_plugins.front_matter import front_matter_plugin
114
+
115
+ self.parser = parser
116
+ if self.parser is None:
117
+ self.parser = MarkdownIt("commonmark", renderer_cls=RendererPlain)
118
+ self.parser.use(front_matter_plugin)
119
+
120
+ kwargs.setdefault(
121
+ "para_block_reader", partial(read_parse_blankline_block, parser=self.parser)
122
+ )
123
+ super().__init__(*args, **kwargs)
124
+
125
+ # This override takes care of removing markup.
126
+ def _read_word_block(self, stream):
127
+ words = list()
128
+ for para in self._para_block_reader(stream):
129
+ words.extend(self._word_tokenizer.tokenize(para))
130
+ return words
131
+
132
+
133
+ class CategorizedMarkdownCorpusReader(CategorizedCorpusReader, MarkdownCorpusReader):
134
+ """
135
+ A reader for markdown corpora whose documents are divided into
136
+ categories based on their file identifiers.
137
+
138
+ Based on nltk.corpus.reader.plaintext.CategorizedPlaintextCorpusReader:
139
+ https://www.nltk.org/_modules/nltk/corpus/reader/api.html#CategorizedCorpusReader
140
+ """
141
+
142
+ def __init__(self, *args, cat_field="tags", **kwargs):
143
+ """
144
+ Initialize the corpus reader. Categorization arguments
145
+ (``cat_pattern``, ``cat_map``, and ``cat_file``) are passed to
146
+ the ``CategorizedCorpusReader`` constructor. The remaining arguments
147
+ are passed to the ``MarkdownCorpusReader`` constructor.
148
+ """
149
+ cat_args = ["cat_pattern", "cat_map", "cat_file"]
150
+ if not any(arg in kwargs for arg in cat_args):
151
+ # Initialize with a blank map now,
152
+ # and try to build categories from document metadata later.
153
+ kwargs["cat_map"] = dict()
154
+ CategorizedCorpusReader.__init__(self, kwargs)
155
+ MarkdownCorpusReader.__init__(self, *args, **kwargs)
156
+
157
+ # Map file IDs to categories if self._map exists but is still empty:
158
+ if self._map is not None and not self._map:
159
+ for file_id in self._fileids:
160
+ metadata = self.metadata(file_id)
161
+ if metadata:
162
+ self._map[file_id] = metadata[0].get(cat_field, [])
163
+
164
+ ### Begin CategorizedCorpusReader Overrides
165
+ @comma_separated_string_args
166
+ def categories(self, fileids=None):
167
+ return super().categories(fileids)
168
+
169
+ @comma_separated_string_args
170
+ def fileids(self, categories=None):
171
+ if categories is None:
172
+ return self._fileids
173
+ return super().fileids(categories)
174
+
175
+ ### End CategorizedCorpusReader Overrides
176
+
177
+ ### Begin MarkdownCorpusReader Overrides
178
+ @comma_separated_string_args
179
+ def raw(self, fileids=None, categories=None):
180
+ return super().raw(self._resolve(fileids, categories))
181
+
182
+ @comma_separated_string_args
183
+ def words(self, fileids=None, categories=None):
184
+ return super().words(self._resolve(fileids, categories))
185
+
186
+ @comma_separated_string_args
187
+ def sents(self, fileids=None, categories=None):
188
+ return super().sents(self._resolve(fileids, categories))
189
+
190
+ @comma_separated_string_args
191
+ def paras(self, fileids=None, categories=None):
192
+ return super().paras(self._resolve(fileids, categories))
193
+
194
+ ### End MarkdownCorpusReader Overrides
195
+
196
+ def concatenated_view(self, reader, fileids, categories):
197
+ return concat(
198
+ [
199
+ self.CorpusView(path, reader, encoding=enc)
200
+ for (path, enc) in self.abspaths(
201
+ self._resolve(fileids, categories), include_encoding=True
202
+ )
203
+ ]
204
+ )
205
+
206
+ def metadata_reader(self, stream):
207
+ from yaml import safe_load
208
+
209
+ return [
210
+ safe_load(t.content)
211
+ for t in self.parser.parse(stream.read())
212
+ if t.type == "front_matter"
213
+ ]
214
+
215
+ @comma_separated_string_args
216
+ def metadata(self, fileids=None, categories=None):
217
+ return self.concatenated_view(self.metadata_reader, fileids, categories)
218
+
219
+ def blockquote_reader(self, stream):
220
+ tokens = self.parser.parse(stream.read())
221
+ opening_tokens = filter(
222
+ lambda t: t.level == 0 and t.type == "blockquote_open", tokens
223
+ )
224
+ closing_tokens = filter(
225
+ lambda t: t.level == 0 and t.type == "blockquote_close", tokens
226
+ )
227
+ blockquotes = list()
228
+ for o, c in zip(opening_tokens, closing_tokens):
229
+ opening_index = tokens.index(o)
230
+ closing_index = tokens.index(c, opening_index)
231
+ blockquotes.append(tokens[opening_index : closing_index + 1])
232
+ return [
233
+ MarkdownBlock(
234
+ self.parser.renderer.render(block, self.parser.options, env=None)
235
+ )
236
+ for block in blockquotes
237
+ ]
238
+
239
+ @comma_separated_string_args
240
+ def blockquotes(self, fileids=None, categories=None):
241
+ return self.concatenated_view(self.blockquote_reader, fileids, categories)
242
+
243
+ def code_block_reader(self, stream):
244
+ return [
245
+ CodeBlock(
246
+ t.info,
247
+ t.content,
248
+ )
249
+ for t in self.parser.parse(stream.read())
250
+ if t.level == 0 and t.type in ("fence", "code_block")
251
+ ]
252
+
253
+ @comma_separated_string_args
254
+ def code_blocks(self, fileids=None, categories=None):
255
+ return self.concatenated_view(self.code_block_reader, fileids, categories)
256
+
257
+ def image_reader(self, stream):
258
+ return [
259
+ Image(
260
+ child_token.content,
261
+ child_token.attrGet("src"),
262
+ child_token.attrGet("title"),
263
+ )
264
+ for inline_token in filter(
265
+ lambda t: t.type == "inline", self.parser.parse(stream.read())
266
+ )
267
+ for child_token in inline_token.children
268
+ if child_token.type == "image"
269
+ ]
270
+
271
+ @comma_separated_string_args
272
+ def images(self, fileids=None, categories=None):
273
+ return self.concatenated_view(self.image_reader, fileids, categories)
274
+
275
+ def link_reader(self, stream):
276
+ return [
277
+ Link(
278
+ inline_token.children[i + 1].content,
279
+ child_token.attrGet("href"),
280
+ child_token.attrGet("title"),
281
+ )
282
+ for inline_token in filter(
283
+ lambda t: t.type == "inline", self.parser.parse(stream.read())
284
+ )
285
+ for i, child_token in enumerate(inline_token.children)
286
+ if child_token.type == "link_open"
287
+ ]
288
+
289
+ @comma_separated_string_args
290
+ def links(self, fileids=None, categories=None):
291
+ return self.concatenated_view(self.link_reader, fileids, categories)
292
+
293
+ def list_reader(self, stream):
294
+ tokens = self.parser.parse(stream.read())
295
+ opening_types = ("bullet_list_open", "ordered_list_open")
296
+ opening_tokens = filter(
297
+ lambda t: t.level == 0 and t.type in opening_types, tokens
298
+ )
299
+ closing_types = ("bullet_list_close", "ordered_list_close")
300
+ closing_tokens = filter(
301
+ lambda t: t.level == 0 and t.type in closing_types, tokens
302
+ )
303
+ list_blocks = list()
304
+ for o, c in zip(opening_tokens, closing_tokens):
305
+ opening_index = tokens.index(o)
306
+ closing_index = tokens.index(c, opening_index)
307
+ list_blocks.append(tokens[opening_index : closing_index + 1])
308
+ return [
309
+ List(
310
+ tokens[0].type == "ordered_list_open",
311
+ [t.content for t in tokens if t.content],
312
+ )
313
+ for tokens in list_blocks
314
+ ]
315
+
316
+ @comma_separated_string_args
317
+ def lists(self, fileids=None, categories=None):
318
+ return self.concatenated_view(self.list_reader, fileids, categories)
319
+
320
+ def section_reader(self, stream):
321
+ section_blocks, block = list(), list()
322
+ in_heading = False
323
+ for t in self.parser.parse(stream.read()):
324
+ if t.level == 0 and t.type == "heading_open":
325
+ if block:
326
+ section_blocks.append(block)
327
+ block = list()
328
+ in_heading = True
329
+ if in_heading:
330
+ block.append(t)
331
+ return [
332
+ MarkdownSection(
333
+ block[1].content,
334
+ block[0].markup.count("#"),
335
+ self.parser.renderer.render(block, self.parser.options, env=None),
336
+ )
337
+ for block in section_blocks
338
+ ]
339
+
340
+ @comma_separated_string_args
341
+ def sections(self, fileids=None, categories=None):
342
+ return self.concatenated_view(self.section_reader, fileids, categories)
llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/mte.py ADDED
@@ -0,0 +1,397 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A reader for corpora whose documents are in MTE format.
3
+ """
4
+ import os
5
+ import re
6
+ from functools import reduce
7
+
8
+ from nltk.corpus.reader import TaggedCorpusReader, concat
9
+ from nltk.corpus.reader.xmldocs import XMLCorpusView
10
+
11
+
12
+ def xpath(root, path, ns):
13
+ return root.findall(path, ns)
14
+
15
+
16
+ class MTECorpusView(XMLCorpusView):
17
+ """
18
+ Class for lazy viewing the MTE Corpus.
19
+ """
20
+
21
+ def __init__(self, fileid, tagspec, elt_handler=None):
22
+ XMLCorpusView.__init__(self, fileid, tagspec, elt_handler)
23
+
24
+ def read_block(self, stream, tagspec=None, elt_handler=None):
25
+ return list(
26
+ filter(
27
+ lambda x: x is not None,
28
+ XMLCorpusView.read_block(self, stream, tagspec, elt_handler),
29
+ )
30
+ )
31
+
32
+
33
+ class MTEFileReader:
34
+ """
35
+ Class for loading the content of the multext-east corpus. It
36
+ parses the xml files and does some tag-filtering depending on the
37
+ given method parameters.
38
+ """
39
+
40
+ ns = {
41
+ "tei": "https://www.tei-c.org/ns/1.0",
42
+ "xml": "https://www.w3.org/XML/1998/namespace",
43
+ }
44
+ tag_ns = "{https://www.tei-c.org/ns/1.0}"
45
+ xml_ns = "{https://www.w3.org/XML/1998/namespace}"
46
+ word_path = "TEI/text/body/div/div/p/s/(w|c)"
47
+ sent_path = "TEI/text/body/div/div/p/s"
48
+ para_path = "TEI/text/body/div/div/p"
49
+
50
+ def __init__(self, file_path):
51
+ self.__file_path = file_path
52
+
53
+ @classmethod
54
+ def _word_elt(cls, elt, context):
55
+ return elt.text
56
+
57
+ @classmethod
58
+ def _sent_elt(cls, elt, context):
59
+ return [cls._word_elt(w, None) for w in xpath(elt, "*", cls.ns)]
60
+
61
+ @classmethod
62
+ def _para_elt(cls, elt, context):
63
+ return [cls._sent_elt(s, None) for s in xpath(elt, "*", cls.ns)]
64
+
65
+ @classmethod
66
+ def _tagged_word_elt(cls, elt, context):
67
+ if "ana" not in elt.attrib:
68
+ return (elt.text, "")
69
+
70
+ if cls.__tags == "" and cls.__tagset == "msd":
71
+ return (elt.text, elt.attrib["ana"])
72
+ elif cls.__tags == "" and cls.__tagset == "universal":
73
+ return (elt.text, MTETagConverter.msd_to_universal(elt.attrib["ana"]))
74
+ else:
75
+ tags = re.compile("^" + re.sub("-", ".", cls.__tags) + ".*$")
76
+ if tags.match(elt.attrib["ana"]):
77
+ if cls.__tagset == "msd":
78
+ return (elt.text, elt.attrib["ana"])
79
+ else:
80
+ return (
81
+ elt.text,
82
+ MTETagConverter.msd_to_universal(elt.attrib["ana"]),
83
+ )
84
+ else:
85
+ return None
86
+
87
+ @classmethod
88
+ def _tagged_sent_elt(cls, elt, context):
89
+ return list(
90
+ filter(
91
+ lambda x: x is not None,
92
+ [cls._tagged_word_elt(w, None) for w in xpath(elt, "*", cls.ns)],
93
+ )
94
+ )
95
+
96
+ @classmethod
97
+ def _tagged_para_elt(cls, elt, context):
98
+ return list(
99
+ filter(
100
+ lambda x: x is not None,
101
+ [cls._tagged_sent_elt(s, None) for s in xpath(elt, "*", cls.ns)],
102
+ )
103
+ )
104
+
105
+ @classmethod
106
+ def _lemma_word_elt(cls, elt, context):
107
+ if "lemma" not in elt.attrib:
108
+ return (elt.text, "")
109
+ else:
110
+ return (elt.text, elt.attrib["lemma"])
111
+
112
+ @classmethod
113
+ def _lemma_sent_elt(cls, elt, context):
114
+ return [cls._lemma_word_elt(w, None) for w in xpath(elt, "*", cls.ns)]
115
+
116
+ @classmethod
117
+ def _lemma_para_elt(cls, elt, context):
118
+ return [cls._lemma_sent_elt(s, None) for s in xpath(elt, "*", cls.ns)]
119
+
120
+ def words(self):
121
+ return MTECorpusView(
122
+ self.__file_path, MTEFileReader.word_path, MTEFileReader._word_elt
123
+ )
124
+
125
+ def sents(self):
126
+ return MTECorpusView(
127
+ self.__file_path, MTEFileReader.sent_path, MTEFileReader._sent_elt
128
+ )
129
+
130
+ def paras(self):
131
+ return MTECorpusView(
132
+ self.__file_path, MTEFileReader.para_path, MTEFileReader._para_elt
133
+ )
134
+
135
+ def lemma_words(self):
136
+ return MTECorpusView(
137
+ self.__file_path, MTEFileReader.word_path, MTEFileReader._lemma_word_elt
138
+ )
139
+
140
+ def tagged_words(self, tagset, tags):
141
+ MTEFileReader.__tagset = tagset
142
+ MTEFileReader.__tags = tags
143
+ return MTECorpusView(
144
+ self.__file_path, MTEFileReader.word_path, MTEFileReader._tagged_word_elt
145
+ )
146
+
147
+ def lemma_sents(self):
148
+ return MTECorpusView(
149
+ self.__file_path, MTEFileReader.sent_path, MTEFileReader._lemma_sent_elt
150
+ )
151
+
152
+ def tagged_sents(self, tagset, tags):
153
+ MTEFileReader.__tagset = tagset
154
+ MTEFileReader.__tags = tags
155
+ return MTECorpusView(
156
+ self.__file_path, MTEFileReader.sent_path, MTEFileReader._tagged_sent_elt
157
+ )
158
+
159
+ def lemma_paras(self):
160
+ return MTECorpusView(
161
+ self.__file_path, MTEFileReader.para_path, MTEFileReader._lemma_para_elt
162
+ )
163
+
164
+ def tagged_paras(self, tagset, tags):
165
+ MTEFileReader.__tagset = tagset
166
+ MTEFileReader.__tags = tags
167
+ return MTECorpusView(
168
+ self.__file_path, MTEFileReader.para_path, MTEFileReader._tagged_para_elt
169
+ )
170
+
171
+
172
+ class MTETagConverter:
173
+ """
174
+ Class for converting msd tags to universal tags, more conversion
175
+ options are currently not implemented.
176
+ """
177
+
178
+ mapping_msd_universal = {
179
+ "A": "ADJ",
180
+ "S": "ADP",
181
+ "R": "ADV",
182
+ "C": "CONJ",
183
+ "D": "DET",
184
+ "N": "NOUN",
185
+ "M": "NUM",
186
+ "Q": "PRT",
187
+ "P": "PRON",
188
+ "V": "VERB",
189
+ ".": ".",
190
+ "-": "X",
191
+ }
192
+
193
+ @staticmethod
194
+ def msd_to_universal(tag):
195
+ """
196
+ This function converts the annotation from the Multex-East to the universal tagset
197
+ as described in Chapter 5 of the NLTK-Book
198
+
199
+ Unknown Tags will be mapped to X. Punctuation marks are not supported in MSD tags, so
200
+ """
201
+ indicator = tag[0] if not tag[0] == "#" else tag[1]
202
+
203
+ if not indicator in MTETagConverter.mapping_msd_universal:
204
+ indicator = "-"
205
+
206
+ return MTETagConverter.mapping_msd_universal[indicator]
207
+
208
+
209
+ class MTECorpusReader(TaggedCorpusReader):
210
+ """
211
+ Reader for corpora following the TEI-p5 xml scheme, such as MULTEXT-East.
212
+ MULTEXT-East contains part-of-speech-tagged words with a quite precise tagging
213
+ scheme. These tags can be converted to the Universal tagset
214
+ """
215
+
216
+ def __init__(self, root=None, fileids=None, encoding="utf8"):
217
+ """
218
+ Construct a new MTECorpusreader for a set of documents
219
+ located at the given root directory. Example usage:
220
+
221
+ >>> root = '/...path to corpus.../'
222
+ >>> reader = MTECorpusReader(root, 'oana-*.xml', 'utf8') # doctest: +SKIP
223
+
224
+ :param root: The root directory for this corpus. (default points to location in multext config file)
225
+ :param fileids: A list or regexp specifying the fileids in this corpus. (default is oana-en.xml)
226
+ :param encoding: The encoding of the given files (default is utf8)
227
+ """
228
+ TaggedCorpusReader.__init__(self, root, fileids, encoding)
229
+ self._readme = "00README.txt"
230
+
231
+ def __fileids(self, fileids):
232
+ if fileids is None:
233
+ fileids = self._fileids
234
+ elif isinstance(fileids, str):
235
+ fileids = [fileids]
236
+ # filter wrong userinput
237
+ fileids = filter(lambda x: x in self._fileids, fileids)
238
+ # filter multext-east sourcefiles that are not compatible to the teip5 specification
239
+ fileids = filter(lambda x: x not in ["oana-bg.xml", "oana-mk.xml"], fileids)
240
+ if not fileids:
241
+ print("No valid multext-east file specified")
242
+ return fileids
243
+
244
+ def words(self, fileids=None):
245
+ """
246
+ :param fileids: A list specifying the fileids that should be used.
247
+ :return: the given file(s) as a list of words and punctuation symbols.
248
+ :rtype: list(str)
249
+ """
250
+ return concat(
251
+ [
252
+ MTEFileReader(os.path.join(self._root, f)).words()
253
+ for f in self.__fileids(fileids)
254
+ ]
255
+ )
256
+
257
+ def sents(self, fileids=None):
258
+ """
259
+ :param fileids: A list specifying the fileids that should be used.
260
+ :return: the given file(s) as a list of sentences or utterances,
261
+ each encoded as a list of word strings
262
+ :rtype: list(list(str))
263
+ """
264
+ return concat(
265
+ [
266
+ MTEFileReader(os.path.join(self._root, f)).sents()
267
+ for f in self.__fileids(fileids)
268
+ ]
269
+ )
270
+
271
+ def paras(self, fileids=None):
272
+ """
273
+ :param fileids: A list specifying the fileids that should be used.
274
+ :return: the given file(s) as a list of paragraphs, each encoded as a list
275
+ of sentences, which are in turn encoded as lists of word string
276
+ :rtype: list(list(list(str)))
277
+ """
278
+ return concat(
279
+ [
280
+ MTEFileReader(os.path.join(self._root, f)).paras()
281
+ for f in self.__fileids(fileids)
282
+ ]
283
+ )
284
+
285
+ def lemma_words(self, fileids=None):
286
+ """
287
+ :param fileids: A list specifying the fileids that should be used.
288
+ :return: the given file(s) as a list of words, the corresponding lemmas
289
+ and punctuation symbols, encoded as tuples (word, lemma)
290
+ :rtype: list(tuple(str,str))
291
+ """
292
+ return concat(
293
+ [
294
+ MTEFileReader(os.path.join(self._root, f)).lemma_words()
295
+ for f in self.__fileids(fileids)
296
+ ]
297
+ )
298
+
299
+ def tagged_words(self, fileids=None, tagset="msd", tags=""):
300
+ """
301
+ :param fileids: A list specifying the fileids that should be used.
302
+ :param tagset: The tagset that should be used in the returned object,
303
+ either "universal" or "msd", "msd" is the default
304
+ :param tags: An MSD Tag that is used to filter all parts of the used corpus
305
+ that are not more precise or at least equal to the given tag
306
+ :return: the given file(s) as a list of tagged words and punctuation symbols
307
+ encoded as tuples (word, tag)
308
+ :rtype: list(tuple(str, str))
309
+ """
310
+ if tagset == "universal" or tagset == "msd":
311
+ return concat(
312
+ [
313
+ MTEFileReader(os.path.join(self._root, f)).tagged_words(
314
+ tagset, tags
315
+ )
316
+ for f in self.__fileids(fileids)
317
+ ]
318
+ )
319
+ else:
320
+ print("Unknown tagset specified.")
321
+
322
+ def lemma_sents(self, fileids=None):
323
+ """
324
+ :param fileids: A list specifying the fileids that should be used.
325
+ :return: the given file(s) as a list of sentences or utterances, each
326
+ encoded as a list of tuples of the word and the corresponding
327
+ lemma (word, lemma)
328
+ :rtype: list(list(tuple(str, str)))
329
+ """
330
+ return concat(
331
+ [
332
+ MTEFileReader(os.path.join(self._root, f)).lemma_sents()
333
+ for f in self.__fileids(fileids)
334
+ ]
335
+ )
336
+
337
+ def tagged_sents(self, fileids=None, tagset="msd", tags=""):
338
+ """
339
+ :param fileids: A list specifying the fileids that should be used.
340
+ :param tagset: The tagset that should be used in the returned object,
341
+ either "universal" or "msd", "msd" is the default
342
+ :param tags: An MSD Tag that is used to filter all parts of the used corpus
343
+ that are not more precise or at least equal to the given tag
344
+ :return: the given file(s) as a list of sentences or utterances, each
345
+ each encoded as a list of (word,tag) tuples
346
+ :rtype: list(list(tuple(str, str)))
347
+ """
348
+ if tagset == "universal" or tagset == "msd":
349
+ return concat(
350
+ [
351
+ MTEFileReader(os.path.join(self._root, f)).tagged_sents(
352
+ tagset, tags
353
+ )
354
+ for f in self.__fileids(fileids)
355
+ ]
356
+ )
357
+ else:
358
+ print("Unknown tagset specified.")
359
+
360
+ def lemma_paras(self, fileids=None):
361
+ """
362
+ :param fileids: A list specifying the fileids that should be used.
363
+ :return: the given file(s) as a list of paragraphs, each encoded as a
364
+ list of sentences, which are in turn encoded as a list of
365
+ tuples of the word and the corresponding lemma (word, lemma)
366
+ :rtype: list(List(List(tuple(str, str))))
367
+ """
368
+ return concat(
369
+ [
370
+ MTEFileReader(os.path.join(self._root, f)).lemma_paras()
371
+ for f in self.__fileids(fileids)
372
+ ]
373
+ )
374
+
375
+ def tagged_paras(self, fileids=None, tagset="msd", tags=""):
376
+ """
377
+ :param fileids: A list specifying the fileids that should be used.
378
+ :param tagset: The tagset that should be used in the returned object,
379
+ either "universal" or "msd", "msd" is the default
380
+ :param tags: An MSD Tag that is used to filter all parts of the used corpus
381
+ that are not more precise or at least equal to the given tag
382
+ :return: the given file(s) as a list of paragraphs, each encoded as a
383
+ list of sentences, which are in turn encoded as a list
384
+ of (word,tag) tuples
385
+ :rtype: list(list(list(tuple(str, str))))
386
+ """
387
+ if tagset == "universal" or tagset == "msd":
388
+ return concat(
389
+ [
390
+ MTEFileReader(os.path.join(self._root, f)).tagged_paras(
391
+ tagset, tags
392
+ )
393
+ for f in self.__fileids(fileids)
394
+ ]
395
+ )
396
+ else:
397
+ print("Unknown tagset specified.")
llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/nkjp.py ADDED
@@ -0,0 +1,487 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: NKJP Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Gabriela Kaczka
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ import functools
9
+ import os
10
+ import re
11
+ import tempfile
12
+
13
+ from nltk.corpus.reader.util import concat
14
+ from nltk.corpus.reader.xmldocs import XMLCorpusReader, XMLCorpusView
15
+
16
+
17
+ def _parse_args(fun):
18
+ """
19
+ Wraps function arguments:
20
+ if fileids not specified then function set NKJPCorpusReader paths.
21
+ """
22
+
23
+ @functools.wraps(fun)
24
+ def decorator(self, fileids=None, **kwargs):
25
+ if not fileids:
26
+ fileids = self._paths
27
+ return fun(self, fileids, **kwargs)
28
+
29
+ return decorator
30
+
31
+
32
+ class NKJPCorpusReader(XMLCorpusReader):
33
+ WORDS_MODE = 0
34
+ SENTS_MODE = 1
35
+ HEADER_MODE = 2
36
+ RAW_MODE = 3
37
+
38
+ def __init__(self, root, fileids=".*"):
39
+ """
40
+ Corpus reader designed to work with National Corpus of Polish.
41
+ See http://nkjp.pl/ for more details about NKJP.
42
+ use example:
43
+ import nltk
44
+ import nkjp
45
+ from nkjp import NKJPCorpusReader
46
+ x = NKJPCorpusReader(root='/home/USER/nltk_data/corpora/nkjp/', fileids='') # obtain the whole corpus
47
+ x.header()
48
+ x.raw()
49
+ x.words()
50
+ x.tagged_words(tags=['subst', 'comp']) #Link to find more tags: nkjp.pl/poliqarp/help/ense2.html
51
+ x.sents()
52
+ x = NKJPCorpusReader(root='/home/USER/nltk_data/corpora/nkjp/', fileids='Wilk*') # obtain particular file(s)
53
+ x.header(fileids=['WilkDom', '/home/USER/nltk_data/corpora/nkjp/WilkWilczy'])
54
+ x.tagged_words(fileids=['WilkDom', '/home/USER/nltk_data/corpora/nkjp/WilkWilczy'], tags=['subst', 'comp'])
55
+ """
56
+ if isinstance(fileids, str):
57
+ XMLCorpusReader.__init__(self, root, fileids + ".*/header.xml")
58
+ else:
59
+ XMLCorpusReader.__init__(
60
+ self, root, [fileid + "/header.xml" for fileid in fileids]
61
+ )
62
+ self._paths = self.get_paths()
63
+
64
+ def get_paths(self):
65
+ return [
66
+ os.path.join(str(self._root), f.split("header.xml")[0])
67
+ for f in self._fileids
68
+ ]
69
+
70
+ def fileids(self):
71
+ """
72
+ Returns a list of file identifiers for the fileids that make up
73
+ this corpus.
74
+ """
75
+ return [f.split("header.xml")[0] for f in self._fileids]
76
+
77
+ def _view(self, filename, tags=None, **kwargs):
78
+ """
79
+ Returns a view specialised for use with particular corpus file.
80
+ """
81
+ mode = kwargs.pop("mode", NKJPCorpusReader.WORDS_MODE)
82
+ if mode is NKJPCorpusReader.WORDS_MODE:
83
+ return NKJPCorpus_Morph_View(filename, tags=tags)
84
+ elif mode is NKJPCorpusReader.SENTS_MODE:
85
+ return NKJPCorpus_Segmentation_View(filename, tags=tags)
86
+ elif mode is NKJPCorpusReader.HEADER_MODE:
87
+ return NKJPCorpus_Header_View(filename, tags=tags)
88
+ elif mode is NKJPCorpusReader.RAW_MODE:
89
+ return NKJPCorpus_Text_View(
90
+ filename, tags=tags, mode=NKJPCorpus_Text_View.RAW_MODE
91
+ )
92
+
93
+ else:
94
+ raise NameError("No such mode!")
95
+
96
+ def add_root(self, fileid):
97
+ """
98
+ Add root if necessary to specified fileid.
99
+ """
100
+ if self.root in fileid:
101
+ return fileid
102
+ return self.root + fileid
103
+
104
+ @_parse_args
105
+ def header(self, fileids=None, **kwargs):
106
+ """
107
+ Returns header(s) of specified fileids.
108
+ """
109
+ return concat(
110
+ [
111
+ self._view(
112
+ self.add_root(fileid), mode=NKJPCorpusReader.HEADER_MODE, **kwargs
113
+ ).handle_query()
114
+ for fileid in fileids
115
+ ]
116
+ )
117
+
118
+ @_parse_args
119
+ def sents(self, fileids=None, **kwargs):
120
+ """
121
+ Returns sentences in specified fileids.
122
+ """
123
+ return concat(
124
+ [
125
+ self._view(
126
+ self.add_root(fileid), mode=NKJPCorpusReader.SENTS_MODE, **kwargs
127
+ ).handle_query()
128
+ for fileid in fileids
129
+ ]
130
+ )
131
+
132
+ @_parse_args
133
+ def words(self, fileids=None, **kwargs):
134
+ """
135
+ Returns words in specified fileids.
136
+ """
137
+
138
+ return concat(
139
+ [
140
+ self._view(
141
+ self.add_root(fileid), mode=NKJPCorpusReader.WORDS_MODE, **kwargs
142
+ ).handle_query()
143
+ for fileid in fileids
144
+ ]
145
+ )
146
+
147
+ @_parse_args
148
+ def tagged_words(self, fileids=None, **kwargs):
149
+ """
150
+ Call with specified tags as a list, e.g. tags=['subst', 'comp'].
151
+ Returns tagged words in specified fileids.
152
+ """
153
+ tags = kwargs.pop("tags", [])
154
+ return concat(
155
+ [
156
+ self._view(
157
+ self.add_root(fileid),
158
+ mode=NKJPCorpusReader.WORDS_MODE,
159
+ tags=tags,
160
+ **kwargs
161
+ ).handle_query()
162
+ for fileid in fileids
163
+ ]
164
+ )
165
+
166
+ @_parse_args
167
+ def raw(self, fileids=None, **kwargs):
168
+ """
169
+ Returns words in specified fileids.
170
+ """
171
+ return concat(
172
+ [
173
+ self._view(
174
+ self.add_root(fileid), mode=NKJPCorpusReader.RAW_MODE, **kwargs
175
+ ).handle_query()
176
+ for fileid in fileids
177
+ ]
178
+ )
179
+
180
+
181
+ class NKJPCorpus_Header_View(XMLCorpusView):
182
+ def __init__(self, filename, **kwargs):
183
+ """
184
+ HEADER_MODE
185
+ A stream backed corpus view specialized for use with
186
+ header.xml files in NKJP corpus.
187
+ """
188
+ self.tagspec = ".*/sourceDesc$"
189
+ XMLCorpusView.__init__(self, filename + "header.xml", self.tagspec)
190
+
191
+ def handle_query(self):
192
+ self._open()
193
+ header = []
194
+ while True:
195
+ segm = XMLCorpusView.read_block(self, self._stream)
196
+ if len(segm) == 0:
197
+ break
198
+ header.extend(segm)
199
+ self.close()
200
+ return header
201
+
202
+ def handle_elt(self, elt, context):
203
+ titles = elt.findall("bibl/title")
204
+ title = []
205
+ if titles:
206
+ title = "\n".join(title.text.strip() for title in titles)
207
+
208
+ authors = elt.findall("bibl/author")
209
+ author = []
210
+ if authors:
211
+ author = "\n".join(author.text.strip() for author in authors)
212
+
213
+ dates = elt.findall("bibl/date")
214
+ date = []
215
+ if dates:
216
+ date = "\n".join(date.text.strip() for date in dates)
217
+
218
+ publishers = elt.findall("bibl/publisher")
219
+ publisher = []
220
+ if publishers:
221
+ publisher = "\n".join(publisher.text.strip() for publisher in publishers)
222
+
223
+ idnos = elt.findall("bibl/idno")
224
+ idno = []
225
+ if idnos:
226
+ idno = "\n".join(idno.text.strip() for idno in idnos)
227
+
228
+ notes = elt.findall("bibl/note")
229
+ note = []
230
+ if notes:
231
+ note = "\n".join(note.text.strip() for note in notes)
232
+
233
+ return {
234
+ "title": title,
235
+ "author": author,
236
+ "date": date,
237
+ "publisher": publisher,
238
+ "idno": idno,
239
+ "note": note,
240
+ }
241
+
242
+
243
+ class XML_Tool:
244
+ """
245
+ Helper class creating xml file to one without references to nkjp: namespace.
246
+ That's needed because the XMLCorpusView assumes that one can find short substrings
247
+ of XML that are valid XML, which is not true if a namespace is declared at top level
248
+ """
249
+
250
+ def __init__(self, root, filename):
251
+ self.read_file = os.path.join(root, filename)
252
+ self.write_file = tempfile.NamedTemporaryFile(delete=False)
253
+
254
+ def build_preprocessed_file(self):
255
+ try:
256
+ fr = open(self.read_file)
257
+ fw = self.write_file
258
+ line = " "
259
+ while len(line):
260
+ line = fr.readline()
261
+ x = re.split(r"nkjp:[^ ]* ", line) # in all files
262
+ ret = " ".join(x)
263
+ x = re.split("<nkjp:paren>", ret) # in ann_segmentation.xml
264
+ ret = " ".join(x)
265
+ x = re.split("</nkjp:paren>", ret) # in ann_segmentation.xml
266
+ ret = " ".join(x)
267
+ x = re.split("<choice>", ret) # in ann_segmentation.xml
268
+ ret = " ".join(x)
269
+ x = re.split("</choice>", ret) # in ann_segmentation.xml
270
+ ret = " ".join(x)
271
+ fw.write(ret)
272
+ fr.close()
273
+ fw.close()
274
+ return self.write_file.name
275
+ except Exception as e:
276
+ self.remove_preprocessed_file()
277
+ raise Exception from e
278
+
279
+ def remove_preprocessed_file(self):
280
+ os.remove(self.write_file.name)
281
+
282
+
283
+ class NKJPCorpus_Segmentation_View(XMLCorpusView):
284
+ """
285
+ A stream backed corpus view specialized for use with
286
+ ann_segmentation.xml files in NKJP corpus.
287
+ """
288
+
289
+ def __init__(self, filename, **kwargs):
290
+ self.tagspec = ".*p/.*s"
291
+ # intersperse NKJPCorpus_Text_View
292
+ self.text_view = NKJPCorpus_Text_View(
293
+ filename, mode=NKJPCorpus_Text_View.SENTS_MODE
294
+ )
295
+ self.text_view.handle_query()
296
+ # xml preprocessing
297
+ self.xml_tool = XML_Tool(filename, "ann_segmentation.xml")
298
+ # base class init
299
+ XMLCorpusView.__init__(
300
+ self, self.xml_tool.build_preprocessed_file(), self.tagspec
301
+ )
302
+
303
+ def get_segm_id(self, example_word):
304
+ return example_word.split("(")[1].split(",")[0]
305
+
306
+ def get_sent_beg(self, beg_word):
307
+ # returns index of beginning letter in sentence
308
+ return int(beg_word.split(",")[1])
309
+
310
+ def get_sent_end(self, end_word):
311
+ # returns index of end letter in sentence
312
+ splitted = end_word.split(")")[0].split(",")
313
+ return int(splitted[1]) + int(splitted[2])
314
+
315
+ def get_sentences(self, sent_segm):
316
+ # returns one sentence
317
+ id = self.get_segm_id(sent_segm[0])
318
+ segm = self.text_view.segm_dict[id] # text segment
319
+ beg = self.get_sent_beg(sent_segm[0])
320
+ end = self.get_sent_end(sent_segm[len(sent_segm) - 1])
321
+ return segm[beg:end]
322
+
323
+ def remove_choice(self, segm):
324
+ ret = []
325
+ prev_txt_end = -1
326
+ prev_txt_nr = -1
327
+ for word in segm:
328
+ txt_nr = self.get_segm_id(word)
329
+ # get increasing sequence of ids: in case of choice get first possibility
330
+ if self.get_sent_beg(word) > prev_txt_end - 1 or prev_txt_nr != txt_nr:
331
+ ret.append(word)
332
+ prev_txt_end = self.get_sent_end(word)
333
+ prev_txt_nr = txt_nr
334
+
335
+ return ret
336
+
337
+ def handle_query(self):
338
+ try:
339
+ self._open()
340
+ sentences = []
341
+ while True:
342
+ sent_segm = XMLCorpusView.read_block(self, self._stream)
343
+ if len(sent_segm) == 0:
344
+ break
345
+ for segm in sent_segm:
346
+ segm = self.remove_choice(segm)
347
+ sentences.append(self.get_sentences(segm))
348
+ self.close()
349
+ self.xml_tool.remove_preprocessed_file()
350
+ return sentences
351
+ except Exception as e:
352
+ self.xml_tool.remove_preprocessed_file()
353
+ raise Exception from e
354
+
355
+ def handle_elt(self, elt, context):
356
+ ret = []
357
+ for seg in elt:
358
+ ret.append(seg.get("corresp"))
359
+ return ret
360
+
361
+
362
+ class NKJPCorpus_Text_View(XMLCorpusView):
363
+ """
364
+ A stream backed corpus view specialized for use with
365
+ text.xml files in NKJP corpus.
366
+ """
367
+
368
+ SENTS_MODE = 0
369
+ RAW_MODE = 1
370
+
371
+ def __init__(self, filename, **kwargs):
372
+ self.mode = kwargs.pop("mode", 0)
373
+ self.tagspec = ".*/div/ab"
374
+ self.segm_dict = dict()
375
+ # xml preprocessing
376
+ self.xml_tool = XML_Tool(filename, "text.xml")
377
+ # base class init
378
+ XMLCorpusView.__init__(
379
+ self, self.xml_tool.build_preprocessed_file(), self.tagspec
380
+ )
381
+
382
+ def handle_query(self):
383
+ try:
384
+ self._open()
385
+ x = self.read_block(self._stream)
386
+ self.close()
387
+ self.xml_tool.remove_preprocessed_file()
388
+ return x
389
+ except Exception as e:
390
+ self.xml_tool.remove_preprocessed_file()
391
+ raise Exception from e
392
+
393
+ def read_block(self, stream, tagspec=None, elt_handler=None):
394
+ """
395
+ Returns text as a list of sentences.
396
+ """
397
+ txt = []
398
+ while True:
399
+ segm = XMLCorpusView.read_block(self, stream)
400
+ if len(segm) == 0:
401
+ break
402
+ for part in segm:
403
+ txt.append(part)
404
+
405
+ return [" ".join([segm for segm in txt])]
406
+
407
+ def get_segm_id(self, elt):
408
+ for attr in elt.attrib:
409
+ if attr.endswith("id"):
410
+ return elt.get(attr)
411
+
412
+ def handle_elt(self, elt, context):
413
+ # fill dictionary to use later in sents mode
414
+ if self.mode is NKJPCorpus_Text_View.SENTS_MODE:
415
+ self.segm_dict[self.get_segm_id(elt)] = elt.text
416
+ return elt.text
417
+
418
+
419
+ class NKJPCorpus_Morph_View(XMLCorpusView):
420
+ """
421
+ A stream backed corpus view specialized for use with
422
+ ann_morphosyntax.xml files in NKJP corpus.
423
+ """
424
+
425
+ def __init__(self, filename, **kwargs):
426
+ self.tags = kwargs.pop("tags", None)
427
+ self.tagspec = ".*/seg/fs"
428
+ self.xml_tool = XML_Tool(filename, "ann_morphosyntax.xml")
429
+ XMLCorpusView.__init__(
430
+ self, self.xml_tool.build_preprocessed_file(), self.tagspec
431
+ )
432
+
433
+ def handle_query(self):
434
+ try:
435
+ self._open()
436
+ words = []
437
+ while True:
438
+ segm = XMLCorpusView.read_block(self, self._stream)
439
+ if len(segm) == 0:
440
+ break
441
+ for part in segm:
442
+ if part is not None:
443
+ words.append(part)
444
+ self.close()
445
+ self.xml_tool.remove_preprocessed_file()
446
+ return words
447
+ except Exception as e:
448
+ self.xml_tool.remove_preprocessed_file()
449
+ raise Exception from e
450
+
451
+ def handle_elt(self, elt, context):
452
+ word = ""
453
+ flag = False
454
+ is_not_interp = True
455
+ # if tags not specified, then always return word
456
+ if self.tags is None:
457
+ flag = True
458
+
459
+ for child in elt:
460
+
461
+ # get word
462
+ if "name" in child.keys() and child.attrib["name"] == "orth":
463
+ for symbol in child:
464
+ if symbol.tag == "string":
465
+ word = symbol.text
466
+ elif "name" in child.keys() and child.attrib["name"] == "interps":
467
+ for symbol in child:
468
+ if "type" in symbol.keys() and symbol.attrib["type"] == "lex":
469
+ for symbol2 in symbol:
470
+ if (
471
+ "name" in symbol2.keys()
472
+ and symbol2.attrib["name"] == "ctag"
473
+ ):
474
+ for symbol3 in symbol2:
475
+ if (
476
+ "value" in symbol3.keys()
477
+ and self.tags is not None
478
+ and symbol3.attrib["value"] in self.tags
479
+ ):
480
+ flag = True
481
+ elif (
482
+ "value" in symbol3.keys()
483
+ and symbol3.attrib["value"] == "interp"
484
+ ):
485
+ is_not_interp = False
486
+ if flag and is_not_interp:
487
+ return word
llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/panlex_lite.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: PanLex Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: David Kamholz <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ CorpusReader for PanLex Lite, a stripped down version of PanLex distributed
10
+ as an SQLite database. See the README.txt in the panlex_lite corpus directory
11
+ for more information on PanLex Lite.
12
+ """
13
+
14
+ import os
15
+ import sqlite3
16
+
17
+ from nltk.corpus.reader.api import CorpusReader
18
+
19
+
20
+ class PanLexLiteCorpusReader(CorpusReader):
21
+ MEANING_Q = """
22
+ SELECT dnx2.mn, dnx2.uq, dnx2.ap, dnx2.ui, ex2.tt, ex2.lv
23
+ FROM dnx
24
+ JOIN ex ON (ex.ex = dnx.ex)
25
+ JOIN dnx dnx2 ON (dnx2.mn = dnx.mn)
26
+ JOIN ex ex2 ON (ex2.ex = dnx2.ex)
27
+ WHERE dnx.ex != dnx2.ex AND ex.tt = ? AND ex.lv = ?
28
+ ORDER BY dnx2.uq DESC
29
+ """
30
+
31
+ TRANSLATION_Q = """
32
+ SELECT s.tt, sum(s.uq) AS trq FROM (
33
+ SELECT ex2.tt, max(dnx.uq) AS uq
34
+ FROM dnx
35
+ JOIN ex ON (ex.ex = dnx.ex)
36
+ JOIN dnx dnx2 ON (dnx2.mn = dnx.mn)
37
+ JOIN ex ex2 ON (ex2.ex = dnx2.ex)
38
+ WHERE dnx.ex != dnx2.ex AND ex.lv = ? AND ex.tt = ? AND ex2.lv = ?
39
+ GROUP BY ex2.tt, dnx.ui
40
+ ) s
41
+ GROUP BY s.tt
42
+ ORDER BY trq DESC, s.tt
43
+ """
44
+
45
+ def __init__(self, root):
46
+ self._c = sqlite3.connect(os.path.join(root, "db.sqlite")).cursor()
47
+
48
+ self._uid_lv = {}
49
+ self._lv_uid = {}
50
+
51
+ for row in self._c.execute("SELECT uid, lv FROM lv"):
52
+ self._uid_lv[row[0]] = row[1]
53
+ self._lv_uid[row[1]] = row[0]
54
+
55
+ def language_varieties(self, lc=None):
56
+ """
57
+ Return a list of PanLex language varieties.
58
+
59
+ :param lc: ISO 639 alpha-3 code. If specified, filters returned varieties
60
+ by this code. If unspecified, all varieties are returned.
61
+ :return: the specified language varieties as a list of tuples. The first
62
+ element is the language variety's seven-character uniform identifier,
63
+ and the second element is its default name.
64
+ :rtype: list(tuple)
65
+ """
66
+
67
+ if lc is None:
68
+ return self._c.execute("SELECT uid, tt FROM lv ORDER BY uid").fetchall()
69
+ else:
70
+ return self._c.execute(
71
+ "SELECT uid, tt FROM lv WHERE lc = ? ORDER BY uid", (lc,)
72
+ ).fetchall()
73
+
74
+ def meanings(self, expr_uid, expr_tt):
75
+ """
76
+ Return a list of meanings for an expression.
77
+
78
+ :param expr_uid: the expression's language variety, as a seven-character
79
+ uniform identifier.
80
+ :param expr_tt: the expression's text.
81
+ :return: a list of Meaning objects.
82
+ :rtype: list(Meaning)
83
+ """
84
+
85
+ expr_lv = self._uid_lv[expr_uid]
86
+
87
+ mn_info = {}
88
+
89
+ for i in self._c.execute(self.MEANING_Q, (expr_tt, expr_lv)):
90
+ mn = i[0]
91
+ uid = self._lv_uid[i[5]]
92
+
93
+ if not mn in mn_info:
94
+ mn_info[mn] = {
95
+ "uq": i[1],
96
+ "ap": i[2],
97
+ "ui": i[3],
98
+ "ex": {expr_uid: [expr_tt]},
99
+ }
100
+
101
+ if not uid in mn_info[mn]["ex"]:
102
+ mn_info[mn]["ex"][uid] = []
103
+
104
+ mn_info[mn]["ex"][uid].append(i[4])
105
+
106
+ return [Meaning(mn, mn_info[mn]) for mn in mn_info]
107
+
108
+ def translations(self, from_uid, from_tt, to_uid):
109
+ """
110
+ Return a list of translations for an expression into a single language
111
+ variety.
112
+
113
+ :param from_uid: the source expression's language variety, as a
114
+ seven-character uniform identifier.
115
+ :param from_tt: the source expression's text.
116
+ :param to_uid: the target language variety, as a seven-character
117
+ uniform identifier.
118
+ :return: a list of translation tuples. The first element is the expression
119
+ text and the second element is the translation quality.
120
+ :rtype: list(tuple)
121
+ """
122
+
123
+ from_lv = self._uid_lv[from_uid]
124
+ to_lv = self._uid_lv[to_uid]
125
+
126
+ return self._c.execute(self.TRANSLATION_Q, (from_lv, from_tt, to_lv)).fetchall()
127
+
128
+
129
+ class Meaning(dict):
130
+ """
131
+ Represents a single PanLex meaning. A meaning is a translation set derived
132
+ from a single source.
133
+ """
134
+
135
+ def __init__(self, mn, attr):
136
+ super().__init__(**attr)
137
+ self["mn"] = mn
138
+
139
+ def id(self):
140
+ """
141
+ :return: the meaning's id.
142
+ :rtype: int
143
+ """
144
+ return self["mn"]
145
+
146
+ def quality(self):
147
+ """
148
+ :return: the meaning's source's quality (0=worst, 9=best).
149
+ :rtype: int
150
+ """
151
+ return self["uq"]
152
+
153
+ def source(self):
154
+ """
155
+ :return: the meaning's source id.
156
+ :rtype: int
157
+ """
158
+ return self["ap"]
159
+
160
+ def source_group(self):
161
+ """
162
+ :return: the meaning's source group id.
163
+ :rtype: int
164
+ """
165
+ return self["ui"]
166
+
167
+ def expressions(self):
168
+ """
169
+ :return: the meaning's expressions as a dictionary whose keys are language
170
+ variety uniform identifiers and whose values are lists of expression
171
+ texts.
172
+ :rtype: dict
173
+ """
174
+ return self["ex"]
llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/panlex_swadesh.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Word List Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+
10
+ import re
11
+ from collections import defaultdict, namedtuple
12
+
13
+ from nltk.corpus.reader.api import *
14
+ from nltk.corpus.reader.util import *
15
+ from nltk.corpus.reader.wordlist import WordListCorpusReader
16
+ from nltk.tokenize import line_tokenize
17
+
18
+ PanlexLanguage = namedtuple(
19
+ "PanlexLanguage",
20
+ [
21
+ "panlex_uid", # (1) PanLex UID
22
+ "iso639", # (2) ISO 639 language code
23
+ "iso639_type", # (3) ISO 639 language type, see README
24
+ "script", # (4) normal scripts of expressions
25
+ "name", # (5) PanLex default name
26
+ "langvar_uid", # (6) UID of the language variety in which the default name is an expression
27
+ ],
28
+ )
29
+
30
+
31
+ class PanlexSwadeshCorpusReader(WordListCorpusReader):
32
+ """
33
+ This is a class to read the PanLex Swadesh list from
34
+
35
+ David Kamholz, Jonathan Pool, and Susan M. Colowick (2014).
36
+ PanLex: Building a Resource for Panlingual Lexical Translation.
37
+ In LREC. http://www.lrec-conf.org/proceedings/lrec2014/pdf/1029_Paper.pdf
38
+
39
+ License: CC0 1.0 Universal
40
+ https://creativecommons.org/publicdomain/zero/1.0/legalcode
41
+ """
42
+
43
+ def __init__(self, *args, **kwargs):
44
+ super().__init__(*args, **kwargs)
45
+ # Find the swadesh size using the fileids' path.
46
+ self.swadesh_size = re.match(r"swadesh([0-9].*)\/", self.fileids()[0]).group(1)
47
+ self._languages = {lang.panlex_uid: lang for lang in self.get_languages()}
48
+ self._macro_langauges = self.get_macrolanguages()
49
+
50
+ def license(self):
51
+ return "CC0 1.0 Universal"
52
+
53
+ def language_codes(self):
54
+ return self._languages.keys()
55
+
56
+ def get_languages(self):
57
+ for line in self.raw(f"langs{self.swadesh_size}.txt").split("\n"):
58
+ if not line.strip(): # Skip empty lines.
59
+ continue
60
+ yield PanlexLanguage(*line.strip().split("\t"))
61
+
62
+ def get_macrolanguages(self):
63
+ macro_langauges = defaultdict(list)
64
+ for lang in self._languages.values():
65
+ macro_langauges[lang.iso639].append(lang.panlex_uid)
66
+ return macro_langauges
67
+
68
+ def words_by_lang(self, lang_code):
69
+ """
70
+ :return: a list of list(str)
71
+ """
72
+ fileid = f"swadesh{self.swadesh_size}/{lang_code}.txt"
73
+ return [concept.split("\t") for concept in self.words(fileid)]
74
+
75
+ def words_by_iso639(self, iso63_code):
76
+ """
77
+ :return: a list of list(str)
78
+ """
79
+ fileids = [
80
+ f"swadesh{self.swadesh_size}/{lang_code}.txt"
81
+ for lang_code in self._macro_langauges[iso63_code]
82
+ ]
83
+ return [
84
+ concept.split("\t") for fileid in fileids for concept in self.words(fileid)
85
+ ]
86
+
87
+ def entries(self, fileids=None):
88
+ """
89
+ :return: a tuple of words for the specified fileids.
90
+ """
91
+ if not fileids:
92
+ fileids = self.fileids()
93
+
94
+ wordlists = [self.words(f) for f in fileids]
95
+ return list(zip(*wordlists))
llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/plaintext.py ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Plaintext Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # Nitin Madnani <[email protected]>
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+ """
11
+ A reader for corpora that consist of plaintext documents.
12
+ """
13
+
14
+ import nltk.data
15
+ from nltk.corpus.reader.api import *
16
+ from nltk.corpus.reader.util import *
17
+ from nltk.tokenize import *
18
+
19
+
20
+ class PlaintextCorpusReader(CorpusReader):
21
+ """
22
+ Reader for corpora that consist of plaintext documents. Paragraphs
23
+ are assumed to be split using blank lines. Sentences and words can
24
+ be tokenized using the default tokenizers, or by custom tokenizers
25
+ specified as parameters to the constructor.
26
+
27
+ This corpus reader can be customized (e.g., to skip preface
28
+ sections of specific document formats) by creating a subclass and
29
+ overriding the ``CorpusView`` class variable.
30
+ """
31
+
32
+ CorpusView = StreamBackedCorpusView
33
+ """The corpus view class used by this reader. Subclasses of
34
+ ``PlaintextCorpusReader`` may specify alternative corpus view
35
+ classes (e.g., to skip the preface sections of documents.)"""
36
+
37
+ def __init__(
38
+ self,
39
+ root,
40
+ fileids,
41
+ word_tokenizer=WordPunctTokenizer(),
42
+ sent_tokenizer=nltk.data.LazyLoader("tokenizers/punkt/english.pickle"),
43
+ para_block_reader=read_blankline_block,
44
+ encoding="utf8",
45
+ ):
46
+ r"""
47
+ Construct a new plaintext corpus reader for a set of documents
48
+ located at the given root directory. Example usage:
49
+
50
+ >>> root = '/usr/local/share/nltk_data/corpora/webtext/'
51
+ >>> reader = PlaintextCorpusReader(root, '.*\.txt') # doctest: +SKIP
52
+
53
+ :param root: The root directory for this corpus.
54
+ :param fileids: A list or regexp specifying the fileids in this corpus.
55
+ :param word_tokenizer: Tokenizer for breaking sentences or
56
+ paragraphs into words.
57
+ :param sent_tokenizer: Tokenizer for breaking paragraphs
58
+ into words.
59
+ :param para_block_reader: The block reader used to divide the
60
+ corpus into paragraph blocks.
61
+ """
62
+ CorpusReader.__init__(self, root, fileids, encoding)
63
+ self._word_tokenizer = word_tokenizer
64
+ self._sent_tokenizer = sent_tokenizer
65
+ self._para_block_reader = para_block_reader
66
+
67
+ def words(self, fileids=None):
68
+ """
69
+ :return: the given file(s) as a list of words
70
+ and punctuation symbols.
71
+ :rtype: list(str)
72
+ """
73
+ return concat(
74
+ [
75
+ self.CorpusView(path, self._read_word_block, encoding=enc)
76
+ for (path, enc, fileid) in self.abspaths(fileids, True, True)
77
+ ]
78
+ )
79
+
80
+ def sents(self, fileids=None):
81
+ """
82
+ :return: the given file(s) as a list of
83
+ sentences or utterances, each encoded as a list of word
84
+ strings.
85
+ :rtype: list(list(str))
86
+ """
87
+ if self._sent_tokenizer is None:
88
+ raise ValueError("No sentence tokenizer for this corpus")
89
+
90
+ return concat(
91
+ [
92
+ self.CorpusView(path, self._read_sent_block, encoding=enc)
93
+ for (path, enc, fileid) in self.abspaths(fileids, True, True)
94
+ ]
95
+ )
96
+
97
+ def paras(self, fileids=None):
98
+ """
99
+ :return: the given file(s) as a list of
100
+ paragraphs, each encoded as a list of sentences, which are
101
+ in turn encoded as lists of word strings.
102
+ :rtype: list(list(list(str)))
103
+ """
104
+ if self._sent_tokenizer is None:
105
+ raise ValueError("No sentence tokenizer for this corpus")
106
+
107
+ return concat(
108
+ [
109
+ self.CorpusView(path, self._read_para_block, encoding=enc)
110
+ for (path, enc, fileid) in self.abspaths(fileids, True, True)
111
+ ]
112
+ )
113
+
114
+ def _read_word_block(self, stream):
115
+ words = []
116
+ for i in range(20): # Read 20 lines at a time.
117
+ words.extend(self._word_tokenizer.tokenize(stream.readline()))
118
+ return words
119
+
120
+ def _read_sent_block(self, stream):
121
+ sents = []
122
+ for para in self._para_block_reader(stream):
123
+ sents.extend(
124
+ [
125
+ self._word_tokenizer.tokenize(sent)
126
+ for sent in self._sent_tokenizer.tokenize(para)
127
+ ]
128
+ )
129
+ return sents
130
+
131
+ def _read_para_block(self, stream):
132
+ paras = []
133
+ for para in self._para_block_reader(stream):
134
+ paras.append(
135
+ [
136
+ self._word_tokenizer.tokenize(sent)
137
+ for sent in self._sent_tokenizer.tokenize(para)
138
+ ]
139
+ )
140
+ return paras
141
+
142
+
143
+ class CategorizedPlaintextCorpusReader(CategorizedCorpusReader, PlaintextCorpusReader):
144
+ """
145
+ A reader for plaintext corpora whose documents are divided into
146
+ categories based on their file identifiers.
147
+ """
148
+
149
+ def __init__(self, *args, **kwargs):
150
+ """
151
+ Initialize the corpus reader. Categorization arguments
152
+ (``cat_pattern``, ``cat_map``, and ``cat_file``) are passed to
153
+ the ``CategorizedCorpusReader`` constructor. The remaining arguments
154
+ are passed to the ``PlaintextCorpusReader`` constructor.
155
+ """
156
+ CategorizedCorpusReader.__init__(self, kwargs)
157
+ PlaintextCorpusReader.__init__(self, *args, **kwargs)
158
+
159
+
160
+ # FIXME: Is there a better way? How to not hardcode this?
161
+ # Possibly, add a language kwargs to CategorizedPlaintextCorpusReader to
162
+ # override the `sent_tokenizer`.
163
+ class PortugueseCategorizedPlaintextCorpusReader(CategorizedPlaintextCorpusReader):
164
+ def __init__(self, *args, **kwargs):
165
+ CategorizedCorpusReader.__init__(self, kwargs)
166
+ kwargs["sent_tokenizer"] = nltk.data.LazyLoader(
167
+ "tokenizers/punkt/portuguese.pickle"
168
+ )
169
+ PlaintextCorpusReader.__init__(self, *args, **kwargs)
170
+
171
+
172
+ class EuroparlCorpusReader(PlaintextCorpusReader):
173
+
174
+ """
175
+ Reader for Europarl corpora that consist of plaintext documents.
176
+ Documents are divided into chapters instead of paragraphs as
177
+ for regular plaintext documents. Chapters are separated using blank
178
+ lines. Everything is inherited from ``PlaintextCorpusReader`` except
179
+ that:
180
+
181
+ - Since the corpus is pre-processed and pre-tokenized, the
182
+ word tokenizer should just split the line at whitespaces.
183
+ - For the same reason, the sentence tokenizer should just
184
+ split the paragraph at line breaks.
185
+ - There is a new 'chapters()' method that returns chapters instead
186
+ instead of paragraphs.
187
+ - The 'paras()' method inherited from PlaintextCorpusReader is
188
+ made non-functional to remove any confusion between chapters
189
+ and paragraphs for Europarl.
190
+ """
191
+
192
+ def _read_word_block(self, stream):
193
+ words = []
194
+ for i in range(20): # Read 20 lines at a time.
195
+ words.extend(stream.readline().split())
196
+ return words
197
+
198
+ def _read_sent_block(self, stream):
199
+ sents = []
200
+ for para in self._para_block_reader(stream):
201
+ sents.extend([sent.split() for sent in para.splitlines()])
202
+ return sents
203
+
204
+ def _read_para_block(self, stream):
205
+ paras = []
206
+ for para in self._para_block_reader(stream):
207
+ paras.append([sent.split() for sent in para.splitlines()])
208
+ return paras
209
+
210
+ def chapters(self, fileids=None):
211
+ """
212
+ :return: the given file(s) as a list of
213
+ chapters, each encoded as a list of sentences, which are
214
+ in turn encoded as lists of word strings.
215
+ :rtype: list(list(list(str)))
216
+ """
217
+ return concat(
218
+ [
219
+ self.CorpusView(fileid, self._read_para_block, encoding=enc)
220
+ for (fileid, enc) in self.abspaths(fileids, True)
221
+ ]
222
+ )
223
+
224
+ def paras(self, fileids=None):
225
+ raise NotImplementedError(
226
+ "The Europarl corpus reader does not support paragraphs. Please use chapters() instead."
227
+ )
llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/ppattach.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: PP Attachment Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ Read lines from the Prepositional Phrase Attachment Corpus.
11
+
12
+ The PP Attachment Corpus contains several files having the format:
13
+
14
+ sentence_id verb noun1 preposition noun2 attachment
15
+
16
+ For example:
17
+
18
+ 42960 gives authority to administration V
19
+ 46742 gives inventors of microchip N
20
+
21
+ The PP attachment is to the verb phrase (V) or noun phrase (N), i.e.:
22
+
23
+ (VP gives (NP authority) (PP to administration))
24
+ (VP gives (NP inventors (PP of microchip)))
25
+
26
+ The corpus contains the following files:
27
+
28
+ training: training set
29
+ devset: development test set, used for algorithm development.
30
+ test: test set, used to report results
31
+ bitstrings: word classes derived from Mutual Information Clustering for the Wall Street Journal.
32
+
33
+ Ratnaparkhi, Adwait (1994). A Maximum Entropy Model for Prepositional
34
+ Phrase Attachment. Proceedings of the ARPA Human Language Technology
35
+ Conference. [http://www.cis.upenn.edu/~adwait/papers/hlt94.ps]
36
+
37
+ The PP Attachment Corpus is distributed with NLTK with the permission
38
+ of the author.
39
+ """
40
+
41
+ from nltk.corpus.reader.api import *
42
+ from nltk.corpus.reader.util import *
43
+
44
+
45
+ class PPAttachment:
46
+ def __init__(self, sent, verb, noun1, prep, noun2, attachment):
47
+ self.sent = sent
48
+ self.verb = verb
49
+ self.noun1 = noun1
50
+ self.prep = prep
51
+ self.noun2 = noun2
52
+ self.attachment = attachment
53
+
54
+ def __repr__(self):
55
+ return (
56
+ "PPAttachment(sent=%r, verb=%r, noun1=%r, prep=%r, "
57
+ "noun2=%r, attachment=%r)"
58
+ % (self.sent, self.verb, self.noun1, self.prep, self.noun2, self.attachment)
59
+ )
60
+
61
+
62
+ class PPAttachmentCorpusReader(CorpusReader):
63
+ """
64
+ sentence_id verb noun1 preposition noun2 attachment
65
+ """
66
+
67
+ def attachments(self, fileids):
68
+ return concat(
69
+ [
70
+ StreamBackedCorpusView(fileid, self._read_obj_block, encoding=enc)
71
+ for (fileid, enc) in self.abspaths(fileids, True)
72
+ ]
73
+ )
74
+
75
+ def tuples(self, fileids):
76
+ return concat(
77
+ [
78
+ StreamBackedCorpusView(fileid, self._read_tuple_block, encoding=enc)
79
+ for (fileid, enc) in self.abspaths(fileids, True)
80
+ ]
81
+ )
82
+
83
+ def _read_tuple_block(self, stream):
84
+ line = stream.readline()
85
+ if line:
86
+ return [tuple(line.split())]
87
+ else:
88
+ return []
89
+
90
+ def _read_obj_block(self, stream):
91
+ line = stream.readline()
92
+ if line:
93
+ return [PPAttachment(*line.split())]
94
+ else:
95
+ return []
llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/pros_cons.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Pros and Cons Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Pierpaolo Pantone <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ CorpusReader for the Pros and Cons dataset.
10
+
11
+ - Pros and Cons dataset information -
12
+
13
+ Contact: Bing Liu, [email protected]
14
+ https://www.cs.uic.edu/~liub
15
+
16
+ Distributed with permission.
17
+
18
+ Related papers:
19
+
20
+ - Murthy Ganapathibhotla and Bing Liu. "Mining Opinions in Comparative Sentences".
21
+ Proceedings of the 22nd International Conference on Computational Linguistics
22
+ (Coling-2008), Manchester, 18-22 August, 2008.
23
+
24
+ - Bing Liu, Minqing Hu and Junsheng Cheng. "Opinion Observer: Analyzing and Comparing
25
+ Opinions on the Web". Proceedings of the 14th international World Wide Web
26
+ conference (WWW-2005), May 10-14, 2005, in Chiba, Japan.
27
+ """
28
+ import re
29
+
30
+ from nltk.corpus.reader.api import *
31
+ from nltk.tokenize import *
32
+
33
+
34
+ class ProsConsCorpusReader(CategorizedCorpusReader, CorpusReader):
35
+ """
36
+ Reader for the Pros and Cons sentence dataset.
37
+
38
+ >>> from nltk.corpus import pros_cons
39
+ >>> pros_cons.sents(categories='Cons') # doctest: +NORMALIZE_WHITESPACE
40
+ [['East', 'batteries', '!', 'On', '-', 'off', 'switch', 'too', 'easy',
41
+ 'to', 'maneuver', '.'], ['Eats', '...', 'no', ',', 'GULPS', 'batteries'],
42
+ ...]
43
+ >>> pros_cons.words('IntegratedPros.txt')
44
+ ['Easy', 'to', 'use', ',', 'economical', '!', ...]
45
+ """
46
+
47
+ CorpusView = StreamBackedCorpusView
48
+
49
+ def __init__(
50
+ self,
51
+ root,
52
+ fileids,
53
+ word_tokenizer=WordPunctTokenizer(),
54
+ encoding="utf8",
55
+ **kwargs
56
+ ):
57
+ """
58
+ :param root: The root directory for the corpus.
59
+ :param fileids: a list or regexp specifying the fileids in the corpus.
60
+ :param word_tokenizer: a tokenizer for breaking sentences or paragraphs
61
+ into words. Default: `WhitespaceTokenizer`
62
+ :param encoding: the encoding that should be used to read the corpus.
63
+ :param kwargs: additional parameters passed to CategorizedCorpusReader.
64
+ """
65
+
66
+ CorpusReader.__init__(self, root, fileids, encoding)
67
+ CategorizedCorpusReader.__init__(self, kwargs)
68
+ self._word_tokenizer = word_tokenizer
69
+
70
+ def sents(self, fileids=None, categories=None):
71
+ """
72
+ Return all sentences in the corpus or in the specified files/categories.
73
+
74
+ :param fileids: a list or regexp specifying the ids of the files whose
75
+ sentences have to be returned.
76
+ :param categories: a list specifying the categories whose sentences
77
+ have to be returned.
78
+ :return: the given file(s) as a list of sentences. Each sentence is
79
+ tokenized using the specified word_tokenizer.
80
+ :rtype: list(list(str))
81
+ """
82
+ fileids = self._resolve(fileids, categories)
83
+ if fileids is None:
84
+ fileids = self._fileids
85
+ elif isinstance(fileids, str):
86
+ fileids = [fileids]
87
+ return concat(
88
+ [
89
+ self.CorpusView(path, self._read_sent_block, encoding=enc)
90
+ for (path, enc, fileid) in self.abspaths(fileids, True, True)
91
+ ]
92
+ )
93
+
94
+ def words(self, fileids=None, categories=None):
95
+ """
96
+ Return all words and punctuation symbols in the corpus or in the specified
97
+ files/categories.
98
+
99
+ :param fileids: a list or regexp specifying the ids of the files whose
100
+ words have to be returned.
101
+ :param categories: a list specifying the categories whose words have
102
+ to be returned.
103
+ :return: the given file(s) as a list of words and punctuation symbols.
104
+ :rtype: list(str)
105
+ """
106
+ fileids = self._resolve(fileids, categories)
107
+ if fileids is None:
108
+ fileids = self._fileids
109
+ elif isinstance(fileids, str):
110
+ fileids = [fileids]
111
+ return concat(
112
+ [
113
+ self.CorpusView(path, self._read_word_block, encoding=enc)
114
+ for (path, enc, fileid) in self.abspaths(fileids, True, True)
115
+ ]
116
+ )
117
+
118
+ def _read_sent_block(self, stream):
119
+ sents = []
120
+ for i in range(20): # Read 20 lines at a time.
121
+ line = stream.readline()
122
+ if not line:
123
+ continue
124
+ sent = re.match(r"^(?!\n)\s*<(Pros|Cons)>(.*)</(?:Pros|Cons)>", line)
125
+ if sent:
126
+ sents.append(self._word_tokenizer.tokenize(sent.group(2).strip()))
127
+ return sents
128
+
129
+ def _read_word_block(self, stream):
130
+ words = []
131
+ for sent in self._read_sent_block(stream):
132
+ words.extend(sent)
133
+ return words
llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/reviews.py ADDED
@@ -0,0 +1,331 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Product Reviews Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Pierpaolo Pantone <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ CorpusReader for reviews corpora (syntax based on Customer Review Corpus).
10
+
11
+ Customer Review Corpus information
12
+ ==================================
13
+
14
+ Annotated by: Minqing Hu and Bing Liu, 2004.
15
+ Department of Computer Science
16
+ University of Illinois at Chicago
17
+
18
+ Contact: Bing Liu, [email protected]
19
+ https://www.cs.uic.edu/~liub
20
+
21
+ Distributed with permission.
22
+
23
+ The "product_reviews_1" and "product_reviews_2" datasets respectively contain
24
+ annotated customer reviews of 5 and 9 products from amazon.com.
25
+
26
+ Related papers:
27
+
28
+ - Minqing Hu and Bing Liu. "Mining and summarizing customer reviews".
29
+ Proceedings of the ACM SIGKDD International Conference on Knowledge
30
+ Discovery & Data Mining (KDD-04), 2004.
31
+
32
+ - Minqing Hu and Bing Liu. "Mining Opinion Features in Customer Reviews".
33
+ Proceedings of Nineteeth National Conference on Artificial Intelligence
34
+ (AAAI-2004), 2004.
35
+
36
+ - Xiaowen Ding, Bing Liu and Philip S. Yu. "A Holistic Lexicon-Based Appraoch to
37
+ Opinion Mining." Proceedings of First ACM International Conference on Web
38
+ Search and Data Mining (WSDM-2008), Feb 11-12, 2008, Stanford University,
39
+ Stanford, California, USA.
40
+
41
+ Symbols used in the annotated reviews:
42
+
43
+ :[t]: the title of the review: Each [t] tag starts a review.
44
+ :xxxx[+|-n]: xxxx is a product feature.
45
+ :[+n]: Positive opinion, n is the opinion strength: 3 strongest, and 1 weakest.
46
+ Note that the strength is quite subjective.
47
+ You may want ignore it, but only considering + and -
48
+ :[-n]: Negative opinion
49
+ :##: start of each sentence. Each line is a sentence.
50
+ :[u]: feature not appeared in the sentence.
51
+ :[p]: feature not appeared in the sentence. Pronoun resolution is needed.
52
+ :[s]: suggestion or recommendation.
53
+ :[cc]: comparison with a competing product from a different brand.
54
+ :[cs]: comparison with a competing product from the same brand.
55
+
56
+ Note: Some of the files (e.g. "ipod.txt", "Canon PowerShot SD500.txt") do not
57
+ provide separation between different reviews. This is due to the fact that
58
+ the dataset was specifically designed for aspect/feature-based sentiment
59
+ analysis, for which sentence-level annotation is sufficient. For document-
60
+ level classification and analysis, this peculiarity should be taken into
61
+ consideration.
62
+ """
63
+
64
+ import re
65
+
66
+ from nltk.corpus.reader.api import *
67
+ from nltk.tokenize import *
68
+
69
+ TITLE = re.compile(r"^\[t\](.*)$") # [t] Title
70
+ FEATURES = re.compile(
71
+ r"((?:(?:\w+\s)+)?\w+)\[((?:\+|\-)\d)\]"
72
+ ) # find 'feature' in feature[+3]
73
+ NOTES = re.compile(r"\[(?!t)(p|u|s|cc|cs)\]") # find 'p' in camera[+2][p]
74
+ SENT = re.compile(r"##(.*)$") # find tokenized sentence
75
+
76
+
77
+ class Review:
78
+ """
79
+ A Review is the main block of a ReviewsCorpusReader.
80
+ """
81
+
82
+ def __init__(self, title=None, review_lines=None):
83
+ """
84
+ :param title: the title of the review.
85
+ :param review_lines: the list of the ReviewLines that belong to the Review.
86
+ """
87
+ self.title = title
88
+ if review_lines is None:
89
+ self.review_lines = []
90
+ else:
91
+ self.review_lines = review_lines
92
+
93
+ def add_line(self, review_line):
94
+ """
95
+ Add a line (ReviewLine) to the review.
96
+
97
+ :param review_line: a ReviewLine instance that belongs to the Review.
98
+ """
99
+ assert isinstance(review_line, ReviewLine)
100
+ self.review_lines.append(review_line)
101
+
102
+ def features(self):
103
+ """
104
+ Return a list of features in the review. Each feature is a tuple made of
105
+ the specific item feature and the opinion strength about that feature.
106
+
107
+ :return: all features of the review as a list of tuples (feat, score).
108
+ :rtype: list(tuple)
109
+ """
110
+ features = []
111
+ for review_line in self.review_lines:
112
+ features.extend(review_line.features)
113
+ return features
114
+
115
+ def sents(self):
116
+ """
117
+ Return all tokenized sentences in the review.
118
+
119
+ :return: all sentences of the review as lists of tokens.
120
+ :rtype: list(list(str))
121
+ """
122
+ return [review_line.sent for review_line in self.review_lines]
123
+
124
+ def __repr__(self):
125
+ return 'Review(title="{}", review_lines={})'.format(
126
+ self.title, self.review_lines
127
+ )
128
+
129
+
130
+ class ReviewLine:
131
+ """
132
+ A ReviewLine represents a sentence of the review, together with (optional)
133
+ annotations of its features and notes about the reviewed item.
134
+ """
135
+
136
+ def __init__(self, sent, features=None, notes=None):
137
+ self.sent = sent
138
+ if features is None:
139
+ self.features = []
140
+ else:
141
+ self.features = features
142
+
143
+ if notes is None:
144
+ self.notes = []
145
+ else:
146
+ self.notes = notes
147
+
148
+ def __repr__(self):
149
+ return "ReviewLine(features={}, notes={}, sent={})".format(
150
+ self.features, self.notes, self.sent
151
+ )
152
+
153
+
154
+ class ReviewsCorpusReader(CorpusReader):
155
+ """
156
+ Reader for the Customer Review Data dataset by Hu, Liu (2004).
157
+ Note: we are not applying any sentence tokenization at the moment, just word
158
+ tokenization.
159
+
160
+ >>> from nltk.corpus import product_reviews_1
161
+ >>> camera_reviews = product_reviews_1.reviews('Canon_G3.txt')
162
+ >>> review = camera_reviews[0]
163
+ >>> review.sents()[0] # doctest: +NORMALIZE_WHITESPACE
164
+ ['i', 'recently', 'purchased', 'the', 'canon', 'powershot', 'g3', 'and', 'am',
165
+ 'extremely', 'satisfied', 'with', 'the', 'purchase', '.']
166
+ >>> review.features() # doctest: +NORMALIZE_WHITESPACE
167
+ [('canon powershot g3', '+3'), ('use', '+2'), ('picture', '+2'),
168
+ ('picture quality', '+1'), ('picture quality', '+1'), ('camera', '+2'),
169
+ ('use', '+2'), ('feature', '+1'), ('picture quality', '+3'), ('use', '+1'),
170
+ ('option', '+1')]
171
+
172
+ We can also reach the same information directly from the stream:
173
+
174
+ >>> product_reviews_1.features('Canon_G3.txt')
175
+ [('canon powershot g3', '+3'), ('use', '+2'), ...]
176
+
177
+ We can compute stats for specific product features:
178
+
179
+ >>> n_reviews = len([(feat,score) for (feat,score) in product_reviews_1.features('Canon_G3.txt') if feat=='picture'])
180
+ >>> tot = sum([int(score) for (feat,score) in product_reviews_1.features('Canon_G3.txt') if feat=='picture'])
181
+ >>> mean = tot / n_reviews
182
+ >>> print(n_reviews, tot, mean)
183
+ 15 24 1.6
184
+ """
185
+
186
+ CorpusView = StreamBackedCorpusView
187
+
188
+ def __init__(
189
+ self, root, fileids, word_tokenizer=WordPunctTokenizer(), encoding="utf8"
190
+ ):
191
+ """
192
+ :param root: The root directory for the corpus.
193
+ :param fileids: a list or regexp specifying the fileids in the corpus.
194
+ :param word_tokenizer: a tokenizer for breaking sentences or paragraphs
195
+ into words. Default: `WordPunctTokenizer`
196
+ :param encoding: the encoding that should be used to read the corpus.
197
+ """
198
+
199
+ CorpusReader.__init__(self, root, fileids, encoding)
200
+ self._word_tokenizer = word_tokenizer
201
+ self._readme = "README.txt"
202
+
203
+ def features(self, fileids=None):
204
+ """
205
+ Return a list of features. Each feature is a tuple made of the specific
206
+ item feature and the opinion strength about that feature.
207
+
208
+ :param fileids: a list or regexp specifying the ids of the files whose
209
+ features have to be returned.
210
+ :return: all features for the item(s) in the given file(s).
211
+ :rtype: list(tuple)
212
+ """
213
+ if fileids is None:
214
+ fileids = self._fileids
215
+ elif isinstance(fileids, str):
216
+ fileids = [fileids]
217
+ return concat(
218
+ [
219
+ self.CorpusView(fileid, self._read_features, encoding=enc)
220
+ for (fileid, enc) in self.abspaths(fileids, True)
221
+ ]
222
+ )
223
+
224
+ def reviews(self, fileids=None):
225
+ """
226
+ Return all the reviews as a list of Review objects. If `fileids` is
227
+ specified, return all the reviews from each of the specified files.
228
+
229
+ :param fileids: a list or regexp specifying the ids of the files whose
230
+ reviews have to be returned.
231
+ :return: the given file(s) as a list of reviews.
232
+ """
233
+ if fileids is None:
234
+ fileids = self._fileids
235
+ return concat(
236
+ [
237
+ self.CorpusView(fileid, self._read_review_block, encoding=enc)
238
+ for (fileid, enc) in self.abspaths(fileids, True)
239
+ ]
240
+ )
241
+
242
+ def sents(self, fileids=None):
243
+ """
244
+ Return all sentences in the corpus or in the specified files.
245
+
246
+ :param fileids: a list or regexp specifying the ids of the files whose
247
+ sentences have to be returned.
248
+ :return: the given file(s) as a list of sentences, each encoded as a
249
+ list of word strings.
250
+ :rtype: list(list(str))
251
+ """
252
+ return concat(
253
+ [
254
+ self.CorpusView(path, self._read_sent_block, encoding=enc)
255
+ for (path, enc, fileid) in self.abspaths(fileids, True, True)
256
+ ]
257
+ )
258
+
259
+ def words(self, fileids=None):
260
+ """
261
+ Return all words and punctuation symbols in the corpus or in the specified
262
+ files.
263
+
264
+ :param fileids: a list or regexp specifying the ids of the files whose
265
+ words have to be returned.
266
+ :return: the given file(s) as a list of words and punctuation symbols.
267
+ :rtype: list(str)
268
+ """
269
+ return concat(
270
+ [
271
+ self.CorpusView(path, self._read_word_block, encoding=enc)
272
+ for (path, enc, fileid) in self.abspaths(fileids, True, True)
273
+ ]
274
+ )
275
+
276
+ def _read_features(self, stream):
277
+ features = []
278
+ for i in range(20):
279
+ line = stream.readline()
280
+ if not line:
281
+ return features
282
+ features.extend(re.findall(FEATURES, line))
283
+ return features
284
+
285
+ def _read_review_block(self, stream):
286
+ while True:
287
+ line = stream.readline()
288
+ if not line:
289
+ return [] # end of file.
290
+ title_match = re.match(TITLE, line)
291
+ if title_match:
292
+ review = Review(
293
+ title=title_match.group(1).strip()
294
+ ) # We create a new review
295
+ break
296
+
297
+ # Scan until we find another line matching the regexp, or EOF.
298
+ while True:
299
+ oldpos = stream.tell()
300
+ line = stream.readline()
301
+ # End of file:
302
+ if not line:
303
+ return [review]
304
+ # Start of a new review: backup to just before it starts, and
305
+ # return the review we've already collected.
306
+ if re.match(TITLE, line):
307
+ stream.seek(oldpos)
308
+ return [review]
309
+ # Anything else is part of the review line.
310
+ feats = re.findall(FEATURES, line)
311
+ notes = re.findall(NOTES, line)
312
+ sent = re.findall(SENT, line)
313
+ if sent:
314
+ sent = self._word_tokenizer.tokenize(sent[0])
315
+ review_line = ReviewLine(sent=sent, features=feats, notes=notes)
316
+ review.add_line(review_line)
317
+
318
+ def _read_sent_block(self, stream):
319
+ sents = []
320
+ for review in self._read_review_block(stream):
321
+ sents.extend([sent for sent in review.sents()])
322
+ return sents
323
+
324
+ def _read_word_block(self, stream):
325
+ words = []
326
+ for i in range(20): # Read 20 lines at a time.
327
+ line = stream.readline()
328
+ sent = re.findall(SENT, line)
329
+ if sent:
330
+ words.extend(self._word_tokenizer.tokenize(sent[0]))
331
+ return words
llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/rte.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: RTE Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Ewan Klein <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ Corpus reader for the Recognizing Textual Entailment (RTE) Challenge Corpora.
10
+
11
+ The files were taken from the RTE1, RTE2 and RTE3 datasets and the files
12
+ were regularized.
13
+
14
+ Filenames are of the form rte*_dev.xml and rte*_test.xml. The latter are the
15
+ gold standard annotated files.
16
+
17
+ Each entailment corpus is a list of 'text'/'hypothesis' pairs. The following
18
+ example is taken from RTE3::
19
+
20
+ <pair id="1" entailment="YES" task="IE" length="short" >
21
+
22
+ <t>The sale was made to pay Yukos' US$ 27.5 billion tax bill,
23
+ Yuganskneftegaz was originally sold for US$ 9.4 billion to a little known
24
+ company Baikalfinansgroup which was later bought by the Russian
25
+ state-owned oil company Rosneft .</t>
26
+
27
+ <h>Baikalfinansgroup was sold to Rosneft.</h>
28
+ </pair>
29
+
30
+ In order to provide globally unique IDs for each pair, a new attribute
31
+ ``challenge`` has been added to the root element ``entailment-corpus`` of each
32
+ file, taking values 1, 2 or 3. The GID is formatted 'm-n', where 'm' is the
33
+ challenge number and 'n' is the pair ID.
34
+ """
35
+ from nltk.corpus.reader.api import *
36
+ from nltk.corpus.reader.util import *
37
+ from nltk.corpus.reader.xmldocs import *
38
+
39
+
40
+ def norm(value_string):
41
+ """
42
+ Normalize the string value in an RTE pair's ``value`` or ``entailment``
43
+ attribute as an integer (1, 0).
44
+
45
+ :param value_string: the label used to classify a text/hypothesis pair
46
+ :type value_string: str
47
+ :rtype: int
48
+ """
49
+
50
+ valdict = {"TRUE": 1, "FALSE": 0, "YES": 1, "NO": 0}
51
+ return valdict[value_string.upper()]
52
+
53
+
54
+ class RTEPair:
55
+ """
56
+ Container for RTE text-hypothesis pairs.
57
+
58
+ The entailment relation is signalled by the ``value`` attribute in RTE1, and by
59
+ ``entailment`` in RTE2 and RTE3. These both get mapped on to the ``entailment``
60
+ attribute of this class.
61
+ """
62
+
63
+ def __init__(
64
+ self,
65
+ pair,
66
+ challenge=None,
67
+ id=None,
68
+ text=None,
69
+ hyp=None,
70
+ value=None,
71
+ task=None,
72
+ length=None,
73
+ ):
74
+ """
75
+ :param challenge: version of the RTE challenge (i.e., RTE1, RTE2 or RTE3)
76
+ :param id: identifier for the pair
77
+ :param text: the text component of the pair
78
+ :param hyp: the hypothesis component of the pair
79
+ :param value: classification label for the pair
80
+ :param task: attribute for the particular NLP task that the data was drawn from
81
+ :param length: attribute for the length of the text of the pair
82
+ """
83
+ self.challenge = challenge
84
+ self.id = pair.attrib["id"]
85
+ self.gid = f"{self.challenge}-{self.id}"
86
+ self.text = pair[0].text
87
+ self.hyp = pair[1].text
88
+
89
+ if "value" in pair.attrib:
90
+ self.value = norm(pair.attrib["value"])
91
+ elif "entailment" in pair.attrib:
92
+ self.value = norm(pair.attrib["entailment"])
93
+ else:
94
+ self.value = value
95
+ if "task" in pair.attrib:
96
+ self.task = pair.attrib["task"]
97
+ else:
98
+ self.task = task
99
+ if "length" in pair.attrib:
100
+ self.length = pair.attrib["length"]
101
+ else:
102
+ self.length = length
103
+
104
+ def __repr__(self):
105
+ if self.challenge:
106
+ return f"<RTEPair: gid={self.challenge}-{self.id}>"
107
+ else:
108
+ return "<RTEPair: id=%s>" % self.id
109
+
110
+
111
+ class RTECorpusReader(XMLCorpusReader):
112
+ """
113
+ Corpus reader for corpora in RTE challenges.
114
+
115
+ This is just a wrapper around the XMLCorpusReader. See module docstring above for the expected
116
+ structure of input documents.
117
+ """
118
+
119
+ def _read_etree(self, doc):
120
+ """
121
+ Map the XML input into an RTEPair.
122
+
123
+ This uses the ``getiterator()`` method from the ElementTree package to
124
+ find all the ``<pair>`` elements.
125
+
126
+ :param doc: a parsed XML document
127
+ :rtype: list(RTEPair)
128
+ """
129
+ try:
130
+ challenge = doc.attrib["challenge"]
131
+ except KeyError:
132
+ challenge = None
133
+ pairiter = doc.iter("pair")
134
+ return [RTEPair(pair, challenge=challenge) for pair in pairiter]
135
+
136
+ def pairs(self, fileids):
137
+ """
138
+ Build a list of RTEPairs from a RTE corpus.
139
+
140
+ :param fileids: a list of RTE corpus fileids
141
+ :type: list
142
+ :rtype: list(RTEPair)
143
+ """
144
+ if isinstance(fileids, str):
145
+ fileids = [fileids]
146
+ return concat([self._read_etree(self.xml(fileid)) for fileid in fileids])
llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/semcor.py ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: SemCor Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Nathan Schneider <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ Corpus reader for the SemCor Corpus.
10
+ """
11
+
12
+ __docformat__ = "epytext en"
13
+
14
+ from nltk.corpus.reader.api import *
15
+ from nltk.corpus.reader.xmldocs import XMLCorpusReader, XMLCorpusView
16
+ from nltk.tree import Tree
17
+
18
+
19
+ class SemcorCorpusReader(XMLCorpusReader):
20
+ """
21
+ Corpus reader for the SemCor Corpus.
22
+ For access to the complete XML data structure, use the ``xml()``
23
+ method. For access to simple word lists and tagged word lists, use
24
+ ``words()``, ``sents()``, ``tagged_words()``, and ``tagged_sents()``.
25
+ """
26
+
27
+ def __init__(self, root, fileids, wordnet, lazy=True):
28
+ XMLCorpusReader.__init__(self, root, fileids)
29
+ self._lazy = lazy
30
+ self._wordnet = wordnet
31
+
32
+ def words(self, fileids=None):
33
+ """
34
+ :return: the given file(s) as a list of words and punctuation symbols.
35
+ :rtype: list(str)
36
+ """
37
+ return self._items(fileids, "word", False, False, False)
38
+
39
+ def chunks(self, fileids=None):
40
+ """
41
+ :return: the given file(s) as a list of chunks,
42
+ each of which is a list of words and punctuation symbols
43
+ that form a unit.
44
+ :rtype: list(list(str))
45
+ """
46
+ return self._items(fileids, "chunk", False, False, False)
47
+
48
+ def tagged_chunks(self, fileids=None, tag=("pos" or "sem" or "both")):
49
+ """
50
+ :return: the given file(s) as a list of tagged chunks, represented
51
+ in tree form.
52
+ :rtype: list(Tree)
53
+
54
+ :param tag: `'pos'` (part of speech), `'sem'` (semantic), or `'both'`
55
+ to indicate the kind of tags to include. Semantic tags consist of
56
+ WordNet lemma IDs, plus an `'NE'` node if the chunk is a named entity
57
+ without a specific entry in WordNet. (Named entities of type 'other'
58
+ have no lemma. Other chunks not in WordNet have no semantic tag.
59
+ Punctuation tokens have `None` for their part of speech tag.)
60
+ """
61
+ return self._items(fileids, "chunk", False, tag != "sem", tag != "pos")
62
+
63
+ def sents(self, fileids=None):
64
+ """
65
+ :return: the given file(s) as a list of sentences, each encoded
66
+ as a list of word strings.
67
+ :rtype: list(list(str))
68
+ """
69
+ return self._items(fileids, "word", True, False, False)
70
+
71
+ def chunk_sents(self, fileids=None):
72
+ """
73
+ :return: the given file(s) as a list of sentences, each encoded
74
+ as a list of chunks.
75
+ :rtype: list(list(list(str)))
76
+ """
77
+ return self._items(fileids, "chunk", True, False, False)
78
+
79
+ def tagged_sents(self, fileids=None, tag=("pos" or "sem" or "both")):
80
+ """
81
+ :return: the given file(s) as a list of sentences. Each sentence
82
+ is represented as a list of tagged chunks (in tree form).
83
+ :rtype: list(list(Tree))
84
+
85
+ :param tag: `'pos'` (part of speech), `'sem'` (semantic), or `'both'`
86
+ to indicate the kind of tags to include. Semantic tags consist of
87
+ WordNet lemma IDs, plus an `'NE'` node if the chunk is a named entity
88
+ without a specific entry in WordNet. (Named entities of type 'other'
89
+ have no lemma. Other chunks not in WordNet have no semantic tag.
90
+ Punctuation tokens have `None` for their part of speech tag.)
91
+ """
92
+ return self._items(fileids, "chunk", True, tag != "sem", tag != "pos")
93
+
94
+ def _items(self, fileids, unit, bracket_sent, pos_tag, sem_tag):
95
+ if unit == "word" and not bracket_sent:
96
+ # the result of the SemcorWordView may be a multiword unit, so the
97
+ # LazyConcatenation will make sure the sentence is flattened
98
+ _ = lambda *args: LazyConcatenation(
99
+ (SemcorWordView if self._lazy else self._words)(*args)
100
+ )
101
+ else:
102
+ _ = SemcorWordView if self._lazy else self._words
103
+ return concat(
104
+ [
105
+ _(fileid, unit, bracket_sent, pos_tag, sem_tag, self._wordnet)
106
+ for fileid in self.abspaths(fileids)
107
+ ]
108
+ )
109
+
110
+ def _words(self, fileid, unit, bracket_sent, pos_tag, sem_tag):
111
+ """
112
+ Helper used to implement the view methods -- returns a list of
113
+ tokens, (segmented) words, chunks, or sentences. The tokens
114
+ and chunks may optionally be tagged (with POS and sense
115
+ information).
116
+
117
+ :param fileid: The name of the underlying file.
118
+ :param unit: One of `'token'`, `'word'`, or `'chunk'`.
119
+ :param bracket_sent: If true, include sentence bracketing.
120
+ :param pos_tag: Whether to include part-of-speech tags.
121
+ :param sem_tag: Whether to include semantic tags, namely WordNet lemma
122
+ and OOV named entity status.
123
+ """
124
+ assert unit in ("token", "word", "chunk")
125
+ result = []
126
+
127
+ xmldoc = ElementTree.parse(fileid).getroot()
128
+ for xmlsent in xmldoc.findall(".//s"):
129
+ sent = []
130
+ for xmlword in _all_xmlwords_in(xmlsent):
131
+ itm = SemcorCorpusReader._word(
132
+ xmlword, unit, pos_tag, sem_tag, self._wordnet
133
+ )
134
+ if unit == "word":
135
+ sent.extend(itm)
136
+ else:
137
+ sent.append(itm)
138
+
139
+ if bracket_sent:
140
+ result.append(SemcorSentence(xmlsent.attrib["snum"], sent))
141
+ else:
142
+ result.extend(sent)
143
+
144
+ assert None not in result
145
+ return result
146
+
147
+ @staticmethod
148
+ def _word(xmlword, unit, pos_tag, sem_tag, wordnet):
149
+ tkn = xmlword.text
150
+ if not tkn:
151
+ tkn = "" # fixes issue 337?
152
+
153
+ lemma = xmlword.get("lemma", tkn) # lemma or NE class
154
+ lexsn = xmlword.get("lexsn") # lex_sense (locator for the lemma's sense)
155
+ if lexsn is not None:
156
+ sense_key = lemma + "%" + lexsn
157
+ wnpos = ("n", "v", "a", "r", "s")[
158
+ int(lexsn.split(":")[0]) - 1
159
+ ] # see http://wordnet.princeton.edu/man/senseidx.5WN.html
160
+ else:
161
+ sense_key = wnpos = None
162
+ redef = xmlword.get(
163
+ "rdf", tkn
164
+ ) # redefinition--this indicates the lookup string
165
+ # does not exactly match the enclosed string, e.g. due to typographical adjustments
166
+ # or discontinuity of a multiword expression. If a redefinition has occurred,
167
+ # the "rdf" attribute holds its inflected form and "lemma" holds its lemma.
168
+ # For NEs, "rdf", "lemma", and "pn" all hold the same value (the NE class).
169
+ sensenum = xmlword.get("wnsn") # WordNet sense number
170
+ isOOVEntity = "pn" in xmlword.keys() # a "personal name" (NE) not in WordNet
171
+ pos = xmlword.get(
172
+ "pos"
173
+ ) # part of speech for the whole chunk (None for punctuation)
174
+
175
+ if unit == "token":
176
+ if not pos_tag and not sem_tag:
177
+ itm = tkn
178
+ else:
179
+ itm = (
180
+ (tkn,)
181
+ + ((pos,) if pos_tag else ())
182
+ + ((lemma, wnpos, sensenum, isOOVEntity) if sem_tag else ())
183
+ )
184
+ return itm
185
+ else:
186
+ ww = tkn.split("_") # TODO: case where punctuation intervenes in MWE
187
+ if unit == "word":
188
+ return ww
189
+ else:
190
+ if sensenum is not None:
191
+ try:
192
+ sense = wordnet.lemma_from_key(sense_key) # Lemma object
193
+ except Exception:
194
+ # cannot retrieve the wordnet.Lemma object. possible reasons:
195
+ # (a) the wordnet corpus is not downloaded;
196
+ # (b) a nonexistent sense is annotated: e.g., such.s.00 triggers:
197
+ # nltk.corpus.reader.wordnet.WordNetError: No synset found for key u'such%5:00:01:specified:00'
198
+ # solution: just use the lemma name as a string
199
+ try:
200
+ sense = "%s.%s.%02d" % (
201
+ lemma,
202
+ wnpos,
203
+ int(sensenum),
204
+ ) # e.g.: reach.v.02
205
+ except ValueError:
206
+ sense = (
207
+ lemma + "." + wnpos + "." + sensenum
208
+ ) # e.g. the sense number may be "2;1"
209
+
210
+ bottom = [Tree(pos, ww)] if pos_tag else ww
211
+
212
+ if sem_tag and isOOVEntity:
213
+ if sensenum is not None:
214
+ return Tree(sense, [Tree("NE", bottom)])
215
+ else: # 'other' NE
216
+ return Tree("NE", bottom)
217
+ elif sem_tag and sensenum is not None:
218
+ return Tree(sense, bottom)
219
+ elif pos_tag:
220
+ return bottom[0]
221
+ else:
222
+ return bottom # chunk as a list
223
+
224
+
225
+ def _all_xmlwords_in(elt, result=None):
226
+ if result is None:
227
+ result = []
228
+ for child in elt:
229
+ if child.tag in ("wf", "punc"):
230
+ result.append(child)
231
+ else:
232
+ _all_xmlwords_in(child, result)
233
+ return result
234
+
235
+
236
+ class SemcorSentence(list):
237
+ """
238
+ A list of words, augmented by an attribute ``num`` used to record
239
+ the sentence identifier (the ``n`` attribute from the XML).
240
+ """
241
+
242
+ def __init__(self, num, items):
243
+ self.num = num
244
+ list.__init__(self, items)
245
+
246
+
247
+ class SemcorWordView(XMLCorpusView):
248
+ """
249
+ A stream backed corpus view specialized for use with the BNC corpus.
250
+ """
251
+
252
+ def __init__(self, fileid, unit, bracket_sent, pos_tag, sem_tag, wordnet):
253
+ """
254
+ :param fileid: The name of the underlying file.
255
+ :param unit: One of `'token'`, `'word'`, or `'chunk'`.
256
+ :param bracket_sent: If true, include sentence bracketing.
257
+ :param pos_tag: Whether to include part-of-speech tags.
258
+ :param sem_tag: Whether to include semantic tags, namely WordNet lemma
259
+ and OOV named entity status.
260
+ """
261
+ if bracket_sent:
262
+ tagspec = ".*/s"
263
+ else:
264
+ tagspec = ".*/s/(punc|wf)"
265
+
266
+ self._unit = unit
267
+ self._sent = bracket_sent
268
+ self._pos_tag = pos_tag
269
+ self._sem_tag = sem_tag
270
+ self._wordnet = wordnet
271
+
272
+ XMLCorpusView.__init__(self, fileid, tagspec)
273
+
274
+ def handle_elt(self, elt, context):
275
+ if self._sent:
276
+ return self.handle_sent(elt)
277
+ else:
278
+ return self.handle_word(elt)
279
+
280
+ def handle_word(self, elt):
281
+ return SemcorCorpusReader._word(
282
+ elt, self._unit, self._pos_tag, self._sem_tag, self._wordnet
283
+ )
284
+
285
+ def handle_sent(self, elt):
286
+ sent = []
287
+ for child in elt:
288
+ if child.tag in ("wf", "punc"):
289
+ itm = self.handle_word(child)
290
+ if self._unit == "word":
291
+ sent.extend(itm)
292
+ else:
293
+ sent.append(itm)
294
+ else:
295
+ raise ValueError("Unexpected element %s" % child.tag)
296
+ return SemcorSentence(elt.attrib["snum"], sent)
llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/sentiwordnet.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: SentiWordNet
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Christopher Potts <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ An NLTK interface for SentiWordNet
10
+
11
+ SentiWordNet is a lexical resource for opinion mining.
12
+ SentiWordNet assigns to each synset of WordNet three
13
+ sentiment scores: positivity, negativity, and objectivity.
14
+
15
+ For details about SentiWordNet see:
16
+ http://sentiwordnet.isti.cnr.it/
17
+
18
+ >>> from nltk.corpus import sentiwordnet as swn
19
+ >>> print(swn.senti_synset('breakdown.n.03'))
20
+ <breakdown.n.03: PosScore=0.0 NegScore=0.25>
21
+ >>> list(swn.senti_synsets('slow'))
22
+ [SentiSynset('decelerate.v.01'), SentiSynset('slow.v.02'),\
23
+ SentiSynset('slow.v.03'), SentiSynset('slow.a.01'),\
24
+ SentiSynset('slow.a.02'), SentiSynset('dense.s.04'),\
25
+ SentiSynset('slow.a.04'), SentiSynset('boring.s.01'),\
26
+ SentiSynset('dull.s.08'), SentiSynset('slowly.r.01'),\
27
+ SentiSynset('behind.r.03')]
28
+ >>> happy = swn.senti_synsets('happy', 'a')
29
+ >>> happy0 = list(happy)[0]
30
+ >>> happy0.pos_score()
31
+ 0.875
32
+ >>> happy0.neg_score()
33
+ 0.0
34
+ >>> happy0.obj_score()
35
+ 0.125
36
+ """
37
+
38
+ import re
39
+
40
+ from nltk.corpus.reader import CorpusReader
41
+
42
+
43
+ class SentiWordNetCorpusReader(CorpusReader):
44
+ def __init__(self, root, fileids, encoding="utf-8"):
45
+ """
46
+ Construct a new SentiWordNet Corpus Reader, using data from
47
+ the specified file.
48
+ """
49
+ super().__init__(root, fileids, encoding=encoding)
50
+ if len(self._fileids) != 1:
51
+ raise ValueError("Exactly one file must be specified")
52
+ self._db = {}
53
+ self._parse_src_file()
54
+
55
+ def _parse_src_file(self):
56
+ lines = self.open(self._fileids[0]).read().splitlines()
57
+ lines = filter((lambda x: not re.search(r"^\s*#", x)), lines)
58
+ for i, line in enumerate(lines):
59
+ fields = [field.strip() for field in re.split(r"\t+", line)]
60
+ try:
61
+ pos, offset, pos_score, neg_score, synset_terms, gloss = fields
62
+ except BaseException as e:
63
+ raise ValueError(f"Line {i} formatted incorrectly: {line}\n") from e
64
+ if pos and offset:
65
+ offset = int(offset)
66
+ self._db[(pos, offset)] = (float(pos_score), float(neg_score))
67
+
68
+ def senti_synset(self, *vals):
69
+ from nltk.corpus import wordnet as wn
70
+
71
+ if tuple(vals) in self._db:
72
+ pos_score, neg_score = self._db[tuple(vals)]
73
+ pos, offset = vals
74
+ if pos == "s":
75
+ pos = "a"
76
+ synset = wn.synset_from_pos_and_offset(pos, offset)
77
+ return SentiSynset(pos_score, neg_score, synset)
78
+ else:
79
+ synset = wn.synset(vals[0])
80
+ pos = synset.pos()
81
+ if pos == "s":
82
+ pos = "a"
83
+ offset = synset.offset()
84
+ if (pos, offset) in self._db:
85
+ pos_score, neg_score = self._db[(pos, offset)]
86
+ return SentiSynset(pos_score, neg_score, synset)
87
+ else:
88
+ return None
89
+
90
+ def senti_synsets(self, string, pos=None):
91
+ from nltk.corpus import wordnet as wn
92
+
93
+ sentis = []
94
+ synset_list = wn.synsets(string, pos)
95
+ for synset in synset_list:
96
+ sentis.append(self.senti_synset(synset.name()))
97
+ sentis = filter(lambda x: x, sentis)
98
+ return sentis
99
+
100
+ def all_senti_synsets(self):
101
+ from nltk.corpus import wordnet as wn
102
+
103
+ for key, fields in self._db.items():
104
+ pos, offset = key
105
+ pos_score, neg_score = fields
106
+ synset = wn.synset_from_pos_and_offset(pos, offset)
107
+ yield SentiSynset(pos_score, neg_score, synset)
108
+
109
+
110
+ class SentiSynset:
111
+ def __init__(self, pos_score, neg_score, synset):
112
+ self._pos_score = pos_score
113
+ self._neg_score = neg_score
114
+ self._obj_score = 1.0 - (self._pos_score + self._neg_score)
115
+ self.synset = synset
116
+
117
+ def pos_score(self):
118
+ return self._pos_score
119
+
120
+ def neg_score(self):
121
+ return self._neg_score
122
+
123
+ def obj_score(self):
124
+ return self._obj_score
125
+
126
+ def __str__(self):
127
+ """Prints just the Pos/Neg scores for now."""
128
+ s = "<"
129
+ s += self.synset.name() + ": "
130
+ s += "PosScore=%s " % self._pos_score
131
+ s += "NegScore=%s" % self._neg_score
132
+ s += ">"
133
+ return s
134
+
135
+ def __repr__(self):
136
+ return "Senti" + repr(self.synset)
llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/sinica_treebank.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Sinica Treebank Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ Sinica Treebank Corpus Sample
10
+
11
+ http://rocling.iis.sinica.edu.tw/CKIP/engversion/treebank.htm
12
+
13
+ 10,000 parsed sentences, drawn from the Academia Sinica Balanced
14
+ Corpus of Modern Chinese. Parse tree notation is based on
15
+ Information-based Case Grammar. Tagset documentation is available
16
+ at https://www.sinica.edu.tw/SinicaCorpus/modern_e_wordtype.html
17
+
18
+ Language and Knowledge Processing Group, Institute of Information
19
+ Science, Academia Sinica
20
+
21
+ The data is distributed with the Natural Language Toolkit under the terms of
22
+ the Creative Commons Attribution-NonCommercial-ShareAlike License
23
+ [https://creativecommons.org/licenses/by-nc-sa/2.5/].
24
+
25
+ References:
26
+
27
+ Feng-Yi Chen, Pi-Fang Tsai, Keh-Jiann Chen, and Chu-Ren Huang (1999)
28
+ The Construction of Sinica Treebank. Computational Linguistics and
29
+ Chinese Language Processing, 4, pp 87-104.
30
+
31
+ Huang Chu-Ren, Keh-Jiann Chen, Feng-Yi Chen, Keh-Jiann Chen, Zhao-Ming
32
+ Gao, and Kuang-Yu Chen. 2000. Sinica Treebank: Design Criteria,
33
+ Annotation Guidelines, and On-line Interface. Proceedings of 2nd
34
+ Chinese Language Processing Workshop, Association for Computational
35
+ Linguistics.
36
+
37
+ Chen Keh-Jiann and Yu-Ming Hsieh (2004) Chinese Treebanks and Grammar
38
+ Extraction, Proceedings of IJCNLP-04, pp560-565.
39
+ """
40
+
41
+ from nltk.corpus.reader.api import *
42
+ from nltk.corpus.reader.util import *
43
+ from nltk.tag import map_tag
44
+ from nltk.tree import sinica_parse
45
+
46
+ IDENTIFIER = re.compile(r"^#\S+\s")
47
+ APPENDIX = re.compile(r"(?<=\))#.*$")
48
+ TAGWORD = re.compile(r":([^:()|]+):([^:()|]+)")
49
+ WORD = re.compile(r":[^:()|]+:([^:()|]+)")
50
+
51
+
52
+ class SinicaTreebankCorpusReader(SyntaxCorpusReader):
53
+ """
54
+ Reader for the sinica treebank.
55
+ """
56
+
57
+ def _read_block(self, stream):
58
+ sent = stream.readline()
59
+ sent = IDENTIFIER.sub("", sent)
60
+ sent = APPENDIX.sub("", sent)
61
+ return [sent]
62
+
63
+ def _parse(self, sent):
64
+ return sinica_parse(sent)
65
+
66
+ def _tag(self, sent, tagset=None):
67
+ tagged_sent = [(w, t) for (t, w) in TAGWORD.findall(sent)]
68
+ if tagset and tagset != self._tagset:
69
+ tagged_sent = [
70
+ (w, map_tag(self._tagset, tagset, t)) for (w, t) in tagged_sent
71
+ ]
72
+ return tagged_sent
73
+
74
+ def _word(self, sent):
75
+ return WORD.findall(sent)
llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/string_category.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: String Category Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ Read tuples from a corpus consisting of categorized strings.
11
+ For example, from the question classification corpus:
12
+
13
+ NUM:dist How far is it from Denver to Aspen ?
14
+ LOC:city What county is Modesto , California in ?
15
+ HUM:desc Who was Galileo ?
16
+ DESC:def What is an atom ?
17
+ NUM:date When did Hawaii become a state ?
18
+ """
19
+
20
+ from nltk.corpus.reader.api import *
21
+
22
+ # based on PPAttachmentCorpusReader
23
+ from nltk.corpus.reader.util import *
24
+
25
+
26
+ # [xx] Should the order of the tuple be reversed -- in most other places
27
+ # in nltk, we use the form (data, tag) -- e.g., tagged words and
28
+ # labeled texts for classifiers.
29
+ class StringCategoryCorpusReader(CorpusReader):
30
+ def __init__(self, root, fileids, delimiter=" ", encoding="utf8"):
31
+ """
32
+ :param root: The root directory for this corpus.
33
+ :param fileids: A list or regexp specifying the fileids in this corpus.
34
+ :param delimiter: Field delimiter
35
+ """
36
+ CorpusReader.__init__(self, root, fileids, encoding)
37
+ self._delimiter = delimiter
38
+
39
+ def tuples(self, fileids=None):
40
+ if fileids is None:
41
+ fileids = self._fileids
42
+ elif isinstance(fileids, str):
43
+ fileids = [fileids]
44
+ return concat(
45
+ [
46
+ StreamBackedCorpusView(fileid, self._read_tuple_block, encoding=enc)
47
+ for (fileid, enc) in self.abspaths(fileids, True)
48
+ ]
49
+ )
50
+
51
+ def _read_tuple_block(self, stream):
52
+ line = stream.readline().strip()
53
+ if line:
54
+ return [tuple(line.split(self._delimiter, 1))]
55
+ else:
56
+ return []
llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/switchboard.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Switchboard Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+ import re
8
+
9
+ from nltk.corpus.reader.api import *
10
+ from nltk.corpus.reader.util import *
11
+ from nltk.tag import map_tag, str2tuple
12
+
13
+
14
+ class SwitchboardTurn(list):
15
+ """
16
+ A specialized list object used to encode switchboard utterances.
17
+ The elements of the list are the words in the utterance; and two
18
+ attributes, ``speaker`` and ``id``, are provided to retrieve the
19
+ spearker identifier and utterance id. Note that utterance ids
20
+ are only unique within a given discourse.
21
+ """
22
+
23
+ def __init__(self, words, speaker, id):
24
+ list.__init__(self, words)
25
+ self.speaker = speaker
26
+ self.id = int(id)
27
+
28
+ def __repr__(self):
29
+ if len(self) == 0:
30
+ text = ""
31
+ elif isinstance(self[0], tuple):
32
+ text = " ".join("%s/%s" % w for w in self)
33
+ else:
34
+ text = " ".join(self)
35
+ return f"<{self.speaker}.{self.id}: {text!r}>"
36
+
37
+
38
+ class SwitchboardCorpusReader(CorpusReader):
39
+ _FILES = ["tagged"]
40
+ # Use the "tagged" file even for non-tagged data methods, since
41
+ # it's tokenized.
42
+
43
+ def __init__(self, root, tagset=None):
44
+ CorpusReader.__init__(self, root, self._FILES)
45
+ self._tagset = tagset
46
+
47
+ def words(self):
48
+ return StreamBackedCorpusView(self.abspath("tagged"), self._words_block_reader)
49
+
50
+ def tagged_words(self, tagset=None):
51
+ def tagged_words_block_reader(stream):
52
+ return self._tagged_words_block_reader(stream, tagset)
53
+
54
+ return StreamBackedCorpusView(self.abspath("tagged"), tagged_words_block_reader)
55
+
56
+ def turns(self):
57
+ return StreamBackedCorpusView(self.abspath("tagged"), self._turns_block_reader)
58
+
59
+ def tagged_turns(self, tagset=None):
60
+ def tagged_turns_block_reader(stream):
61
+ return self._tagged_turns_block_reader(stream, tagset)
62
+
63
+ return StreamBackedCorpusView(self.abspath("tagged"), tagged_turns_block_reader)
64
+
65
+ def discourses(self):
66
+ return StreamBackedCorpusView(
67
+ self.abspath("tagged"), self._discourses_block_reader
68
+ )
69
+
70
+ def tagged_discourses(self, tagset=False):
71
+ def tagged_discourses_block_reader(stream):
72
+ return self._tagged_discourses_block_reader(stream, tagset)
73
+
74
+ return StreamBackedCorpusView(
75
+ self.abspath("tagged"), tagged_discourses_block_reader
76
+ )
77
+
78
+ def _discourses_block_reader(self, stream):
79
+ # returns at most 1 discourse. (The other methods depend on this.)
80
+ return [
81
+ [
82
+ self._parse_utterance(u, include_tag=False)
83
+ for b in read_blankline_block(stream)
84
+ for u in b.split("\n")
85
+ if u.strip()
86
+ ]
87
+ ]
88
+
89
+ def _tagged_discourses_block_reader(self, stream, tagset=None):
90
+ # returns at most 1 discourse. (The other methods depend on this.)
91
+ return [
92
+ [
93
+ self._parse_utterance(u, include_tag=True, tagset=tagset)
94
+ for b in read_blankline_block(stream)
95
+ for u in b.split("\n")
96
+ if u.strip()
97
+ ]
98
+ ]
99
+
100
+ def _turns_block_reader(self, stream):
101
+ return self._discourses_block_reader(stream)[0]
102
+
103
+ def _tagged_turns_block_reader(self, stream, tagset=None):
104
+ return self._tagged_discourses_block_reader(stream, tagset)[0]
105
+
106
+ def _words_block_reader(self, stream):
107
+ return sum(self._discourses_block_reader(stream)[0], [])
108
+
109
+ def _tagged_words_block_reader(self, stream, tagset=None):
110
+ return sum(self._tagged_discourses_block_reader(stream, tagset)[0], [])
111
+
112
+ _UTTERANCE_RE = re.compile(r"(\w+)\.(\d+)\:\s*(.*)")
113
+ _SEP = "/"
114
+
115
+ def _parse_utterance(self, utterance, include_tag, tagset=None):
116
+ m = self._UTTERANCE_RE.match(utterance)
117
+ if m is None:
118
+ raise ValueError("Bad utterance %r" % utterance)
119
+ speaker, id, text = m.groups()
120
+ words = [str2tuple(s, self._SEP) for s in text.split()]
121
+ if not include_tag:
122
+ words = [w for (w, t) in words]
123
+ elif tagset and tagset != self._tagset:
124
+ words = [(w, map_tag(self._tagset, tagset, t)) for (w, t) in words]
125
+ return SwitchboardTurn(words, speaker, id)
llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/tagged.py ADDED
@@ -0,0 +1,354 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Tagged Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # Steven Bird <[email protected]>
6
+ # Jacob Perkins <[email protected]>
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+ """
11
+ A reader for corpora whose documents contain part-of-speech-tagged words.
12
+ """
13
+
14
+ import os
15
+
16
+ from nltk.corpus.reader.api import *
17
+ from nltk.corpus.reader.timit import read_timit_block
18
+ from nltk.corpus.reader.util import *
19
+ from nltk.tag import map_tag, str2tuple
20
+ from nltk.tokenize import *
21
+
22
+
23
+ class TaggedCorpusReader(CorpusReader):
24
+ """
25
+ Reader for simple part-of-speech tagged corpora. Paragraphs are
26
+ assumed to be split using blank lines. Sentences and words can be
27
+ tokenized using the default tokenizers, or by custom tokenizers
28
+ specified as parameters to the constructor. Words are parsed
29
+ using ``nltk.tag.str2tuple``. By default, ``'/'`` is used as the
30
+ separator. I.e., words should have the form::
31
+
32
+ word1/tag1 word2/tag2 word3/tag3 ...
33
+
34
+ But custom separators may be specified as parameters to the
35
+ constructor. Part of speech tags are case-normalized to upper
36
+ case.
37
+ """
38
+
39
+ def __init__(
40
+ self,
41
+ root,
42
+ fileids,
43
+ sep="/",
44
+ word_tokenizer=WhitespaceTokenizer(),
45
+ sent_tokenizer=RegexpTokenizer("\n", gaps=True),
46
+ para_block_reader=read_blankline_block,
47
+ encoding="utf8",
48
+ tagset=None,
49
+ ):
50
+ """
51
+ Construct a new Tagged Corpus reader for a set of documents
52
+ located at the given root directory. Example usage:
53
+
54
+ >>> root = '/...path to corpus.../'
55
+ >>> reader = TaggedCorpusReader(root, '.*', '.txt') # doctest: +SKIP
56
+
57
+ :param root: The root directory for this corpus.
58
+ :param fileids: A list or regexp specifying the fileids in this corpus.
59
+ """
60
+ CorpusReader.__init__(self, root, fileids, encoding)
61
+ self._sep = sep
62
+ self._word_tokenizer = word_tokenizer
63
+ self._sent_tokenizer = sent_tokenizer
64
+ self._para_block_reader = para_block_reader
65
+ self._tagset = tagset
66
+
67
+ def words(self, fileids=None):
68
+ """
69
+ :return: the given file(s) as a list of words
70
+ and punctuation symbols.
71
+ :rtype: list(str)
72
+ """
73
+ return concat(
74
+ [
75
+ TaggedCorpusView(
76
+ fileid,
77
+ enc,
78
+ False,
79
+ False,
80
+ False,
81
+ self._sep,
82
+ self._word_tokenizer,
83
+ self._sent_tokenizer,
84
+ self._para_block_reader,
85
+ None,
86
+ )
87
+ for (fileid, enc) in self.abspaths(fileids, True)
88
+ ]
89
+ )
90
+
91
+ def sents(self, fileids=None):
92
+ """
93
+ :return: the given file(s) as a list of
94
+ sentences or utterances, each encoded as a list of word
95
+ strings.
96
+ :rtype: list(list(str))
97
+ """
98
+ return concat(
99
+ [
100
+ TaggedCorpusView(
101
+ fileid,
102
+ enc,
103
+ False,
104
+ True,
105
+ False,
106
+ self._sep,
107
+ self._word_tokenizer,
108
+ self._sent_tokenizer,
109
+ self._para_block_reader,
110
+ None,
111
+ )
112
+ for (fileid, enc) in self.abspaths(fileids, True)
113
+ ]
114
+ )
115
+
116
+ def paras(self, fileids=None):
117
+ """
118
+ :return: the given file(s) as a list of
119
+ paragraphs, each encoded as a list of sentences, which are
120
+ in turn encoded as lists of word strings.
121
+ :rtype: list(list(list(str)))
122
+ """
123
+ return concat(
124
+ [
125
+ TaggedCorpusView(
126
+ fileid,
127
+ enc,
128
+ False,
129
+ True,
130
+ True,
131
+ self._sep,
132
+ self._word_tokenizer,
133
+ self._sent_tokenizer,
134
+ self._para_block_reader,
135
+ None,
136
+ )
137
+ for (fileid, enc) in self.abspaths(fileids, True)
138
+ ]
139
+ )
140
+
141
+ def tagged_words(self, fileids=None, tagset=None):
142
+ """
143
+ :return: the given file(s) as a list of tagged
144
+ words and punctuation symbols, encoded as tuples
145
+ ``(word,tag)``.
146
+ :rtype: list(tuple(str,str))
147
+ """
148
+ if tagset and tagset != self._tagset:
149
+ tag_mapping_function = lambda t: map_tag(self._tagset, tagset, t)
150
+ else:
151
+ tag_mapping_function = None
152
+ return concat(
153
+ [
154
+ TaggedCorpusView(
155
+ fileid,
156
+ enc,
157
+ True,
158
+ False,
159
+ False,
160
+ self._sep,
161
+ self._word_tokenizer,
162
+ self._sent_tokenizer,
163
+ self._para_block_reader,
164
+ tag_mapping_function,
165
+ )
166
+ for (fileid, enc) in self.abspaths(fileids, True)
167
+ ]
168
+ )
169
+
170
+ def tagged_sents(self, fileids=None, tagset=None):
171
+ """
172
+ :return: the given file(s) as a list of
173
+ sentences, each encoded as a list of ``(word,tag)`` tuples.
174
+
175
+ :rtype: list(list(tuple(str,str)))
176
+ """
177
+ if tagset and tagset != self._tagset:
178
+ tag_mapping_function = lambda t: map_tag(self._tagset, tagset, t)
179
+ else:
180
+ tag_mapping_function = None
181
+ return concat(
182
+ [
183
+ TaggedCorpusView(
184
+ fileid,
185
+ enc,
186
+ True,
187
+ True,
188
+ False,
189
+ self._sep,
190
+ self._word_tokenizer,
191
+ self._sent_tokenizer,
192
+ self._para_block_reader,
193
+ tag_mapping_function,
194
+ )
195
+ for (fileid, enc) in self.abspaths(fileids, True)
196
+ ]
197
+ )
198
+
199
+ def tagged_paras(self, fileids=None, tagset=None):
200
+ """
201
+ :return: the given file(s) as a list of
202
+ paragraphs, each encoded as a list of sentences, which are
203
+ in turn encoded as lists of ``(word,tag)`` tuples.
204
+ :rtype: list(list(list(tuple(str,str))))
205
+ """
206
+ if tagset and tagset != self._tagset:
207
+ tag_mapping_function = lambda t: map_tag(self._tagset, tagset, t)
208
+ else:
209
+ tag_mapping_function = None
210
+ return concat(
211
+ [
212
+ TaggedCorpusView(
213
+ fileid,
214
+ enc,
215
+ True,
216
+ True,
217
+ True,
218
+ self._sep,
219
+ self._word_tokenizer,
220
+ self._sent_tokenizer,
221
+ self._para_block_reader,
222
+ tag_mapping_function,
223
+ )
224
+ for (fileid, enc) in self.abspaths(fileids, True)
225
+ ]
226
+ )
227
+
228
+
229
+ class CategorizedTaggedCorpusReader(CategorizedCorpusReader, TaggedCorpusReader):
230
+ """
231
+ A reader for part-of-speech tagged corpora whose documents are
232
+ divided into categories based on their file identifiers.
233
+ """
234
+
235
+ def __init__(self, *args, **kwargs):
236
+ """
237
+ Initialize the corpus reader. Categorization arguments
238
+ (``cat_pattern``, ``cat_map``, and ``cat_file``) are passed to
239
+ the ``CategorizedCorpusReader`` constructor. The remaining arguments
240
+ are passed to the ``TaggedCorpusReader``.
241
+ """
242
+ CategorizedCorpusReader.__init__(self, kwargs)
243
+ TaggedCorpusReader.__init__(self, *args, **kwargs)
244
+
245
+ def tagged_words(self, fileids=None, categories=None, tagset=None):
246
+ return super().tagged_words(self._resolve(fileids, categories), tagset)
247
+
248
+ def tagged_sents(self, fileids=None, categories=None, tagset=None):
249
+ return super().tagged_sents(self._resolve(fileids, categories), tagset)
250
+
251
+ def tagged_paras(self, fileids=None, categories=None, tagset=None):
252
+ return super().tagged_paras(self._resolve(fileids, categories), tagset)
253
+
254
+
255
+ class TaggedCorpusView(StreamBackedCorpusView):
256
+ """
257
+ A specialized corpus view for tagged documents. It can be
258
+ customized via flags to divide the tagged corpus documents up by
259
+ sentence or paragraph, and to include or omit part of speech tags.
260
+ ``TaggedCorpusView`` objects are typically created by
261
+ ``TaggedCorpusReader`` (not directly by nltk users).
262
+ """
263
+
264
+ def __init__(
265
+ self,
266
+ corpus_file,
267
+ encoding,
268
+ tagged,
269
+ group_by_sent,
270
+ group_by_para,
271
+ sep,
272
+ word_tokenizer,
273
+ sent_tokenizer,
274
+ para_block_reader,
275
+ tag_mapping_function=None,
276
+ ):
277
+ self._tagged = tagged
278
+ self._group_by_sent = group_by_sent
279
+ self._group_by_para = group_by_para
280
+ self._sep = sep
281
+ self._word_tokenizer = word_tokenizer
282
+ self._sent_tokenizer = sent_tokenizer
283
+ self._para_block_reader = para_block_reader
284
+ self._tag_mapping_function = tag_mapping_function
285
+ StreamBackedCorpusView.__init__(self, corpus_file, encoding=encoding)
286
+
287
+ def read_block(self, stream):
288
+ """Reads one paragraph at a time."""
289
+ block = []
290
+ for para_str in self._para_block_reader(stream):
291
+ para = []
292
+ for sent_str in self._sent_tokenizer.tokenize(para_str):
293
+ sent = [
294
+ str2tuple(s, self._sep)
295
+ for s in self._word_tokenizer.tokenize(sent_str)
296
+ ]
297
+ if self._tag_mapping_function:
298
+ sent = [(w, self._tag_mapping_function(t)) for (w, t) in sent]
299
+ if not self._tagged:
300
+ sent = [w for (w, t) in sent]
301
+ if self._group_by_sent:
302
+ para.append(sent)
303
+ else:
304
+ para.extend(sent)
305
+ if self._group_by_para:
306
+ block.append(para)
307
+ else:
308
+ block.extend(para)
309
+ return block
310
+
311
+
312
+ # needs to implement simplified tags
313
+ class MacMorphoCorpusReader(TaggedCorpusReader):
314
+ """
315
+ A corpus reader for the MAC_MORPHO corpus. Each line contains a
316
+ single tagged word, using '_' as a separator. Sentence boundaries
317
+ are based on the end-sentence tag ('_.'). Paragraph information
318
+ is not included in the corpus, so each paragraph returned by
319
+ ``self.paras()`` and ``self.tagged_paras()`` contains a single
320
+ sentence.
321
+ """
322
+
323
+ def __init__(self, root, fileids, encoding="utf8", tagset=None):
324
+ TaggedCorpusReader.__init__(
325
+ self,
326
+ root,
327
+ fileids,
328
+ sep="_",
329
+ word_tokenizer=LineTokenizer(),
330
+ sent_tokenizer=RegexpTokenizer(".*\n"),
331
+ para_block_reader=self._read_block,
332
+ encoding=encoding,
333
+ tagset=tagset,
334
+ )
335
+
336
+ def _read_block(self, stream):
337
+ return read_regexp_block(stream, r".*", r".*_\.")
338
+
339
+
340
+ class TimitTaggedCorpusReader(TaggedCorpusReader):
341
+ """
342
+ A corpus reader for tagged sentences that are included in the TIMIT corpus.
343
+ """
344
+
345
+ def __init__(self, *args, **kwargs):
346
+ TaggedCorpusReader.__init__(
347
+ self, para_block_reader=read_timit_block, *args, **kwargs
348
+ )
349
+
350
+ def paras(self):
351
+ raise NotImplementedError("use sents() instead")
352
+
353
+ def tagged_paras(self):
354
+ raise NotImplementedError("use tagged_sents() instead")
llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/timit.py ADDED
@@ -0,0 +1,510 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: TIMIT Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2007 NLTK Project
4
+ # Author: Haejoong Lee <[email protected]>
5
+ # Steven Bird <[email protected]>
6
+ # Jacob Perkins <[email protected]>
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+ # [xx] this docstring is out-of-date:
11
+ """
12
+ Read tokens, phonemes and audio data from the NLTK TIMIT Corpus.
13
+
14
+ This corpus contains selected portion of the TIMIT corpus.
15
+
16
+ - 16 speakers from 8 dialect regions
17
+ - 1 male and 1 female from each dialect region
18
+ - total 130 sentences (10 sentences per speaker. Note that some
19
+ sentences are shared among other speakers, especially sa1 and sa2
20
+ are spoken by all speakers.)
21
+ - total 160 recording of sentences (10 recordings per speaker)
22
+ - audio format: NIST Sphere, single channel, 16kHz sampling,
23
+ 16 bit sample, PCM encoding
24
+
25
+
26
+ Module contents
27
+ ===============
28
+
29
+ The timit corpus reader provides 4 functions and 4 data items.
30
+
31
+ - utterances
32
+
33
+ List of utterances in the corpus. There are total 160 utterances,
34
+ each of which corresponds to a unique utterance of a speaker.
35
+ Here's an example of an utterance identifier in the list::
36
+
37
+ dr1-fvmh0/sx206
38
+ - _---- _---
39
+ | | | | |
40
+ | | | | |
41
+ | | | | `--- sentence number
42
+ | | | `----- sentence type (a:all, i:shared, x:exclusive)
43
+ | | `--------- speaker ID
44
+ | `------------ sex (m:male, f:female)
45
+ `-------------- dialect region (1..8)
46
+
47
+ - speakers
48
+
49
+ List of speaker IDs. An example of speaker ID::
50
+
51
+ dr1-fvmh0
52
+
53
+ Note that if you split an item ID with colon and take the first element of
54
+ the result, you will get a speaker ID.
55
+
56
+ >>> itemid = 'dr1-fvmh0/sx206'
57
+ >>> spkrid , sentid = itemid.split('/')
58
+ >>> spkrid
59
+ 'dr1-fvmh0'
60
+
61
+ The second element of the result is a sentence ID.
62
+
63
+ - dictionary()
64
+
65
+ Phonetic dictionary of words contained in this corpus. This is a Python
66
+ dictionary from words to phoneme lists.
67
+
68
+ - spkrinfo()
69
+
70
+ Speaker information table. It's a Python dictionary from speaker IDs to
71
+ records of 10 fields. Speaker IDs the same as the ones in timie.speakers.
72
+ Each record is a dictionary from field names to values, and the fields are
73
+ as follows::
74
+
75
+ id speaker ID as defined in the original TIMIT speaker info table
76
+ sex speaker gender (M:male, F:female)
77
+ dr speaker dialect region (1:new england, 2:northern,
78
+ 3:north midland, 4:south midland, 5:southern, 6:new york city,
79
+ 7:western, 8:army brat (moved around))
80
+ use corpus type (TRN:training, TST:test)
81
+ in this sample corpus only TRN is available
82
+ recdate recording date
83
+ birthdate speaker birth date
84
+ ht speaker height
85
+ race speaker race (WHT:white, BLK:black, AMR:american indian,
86
+ SPN:spanish-american, ORN:oriental,???:unknown)
87
+ edu speaker education level (HS:high school, AS:associate degree,
88
+ BS:bachelor's degree (BS or BA), MS:master's degree (MS or MA),
89
+ PHD:doctorate degree (PhD,JD,MD), ??:unknown)
90
+ comments comments by the recorder
91
+
92
+ The 4 functions are as follows.
93
+
94
+ - tokenized(sentences=items, offset=False)
95
+
96
+ Given a list of items, returns an iterator of a list of word lists,
97
+ each of which corresponds to an item (sentence). If offset is set to True,
98
+ each element of the word list is a tuple of word(string), start offset and
99
+ end offset, where offset is represented as a number of 16kHz samples.
100
+
101
+ - phonetic(sentences=items, offset=False)
102
+
103
+ Given a list of items, returns an iterator of a list of phoneme lists,
104
+ each of which corresponds to an item (sentence). If offset is set to True,
105
+ each element of the phoneme list is a tuple of word(string), start offset
106
+ and end offset, where offset is represented as a number of 16kHz samples.
107
+
108
+ - audiodata(item, start=0, end=None)
109
+
110
+ Given an item, returns a chunk of audio samples formatted into a string.
111
+ When the function is called, if start and end are omitted, the entire
112
+ samples of the recording will be returned. If only end is omitted,
113
+ samples from the start offset to the end of the recording will be returned.
114
+
115
+ - play(data)
116
+
117
+ Play the given audio samples. The audio samples can be obtained from the
118
+ timit.audiodata function.
119
+
120
+ """
121
+ import sys
122
+ import time
123
+
124
+ from nltk.corpus.reader.api import *
125
+ from nltk.internals import import_from_stdlib
126
+ from nltk.tree import Tree
127
+
128
+
129
+ class TimitCorpusReader(CorpusReader):
130
+ """
131
+ Reader for the TIMIT corpus (or any other corpus with the same
132
+ file layout and use of file formats). The corpus root directory
133
+ should contain the following files:
134
+
135
+ - timitdic.txt: dictionary of standard transcriptions
136
+ - spkrinfo.txt: table of speaker information
137
+
138
+ In addition, the root directory should contain one subdirectory
139
+ for each speaker, containing three files for each utterance:
140
+
141
+ - <utterance-id>.txt: text content of utterances
142
+ - <utterance-id>.wrd: tokenized text content of utterances
143
+ - <utterance-id>.phn: phonetic transcription of utterances
144
+ - <utterance-id>.wav: utterance sound file
145
+ """
146
+
147
+ _FILE_RE = r"(\w+-\w+/\w+\.(phn|txt|wav|wrd))|" + r"timitdic\.txt|spkrinfo\.txt"
148
+ """A regexp matching fileids that are used by this corpus reader."""
149
+ _UTTERANCE_RE = r"\w+-\w+/\w+\.txt"
150
+
151
+ def __init__(self, root, encoding="utf8"):
152
+ """
153
+ Construct a new TIMIT corpus reader in the given directory.
154
+ :param root: The root directory for this corpus.
155
+ """
156
+ # Ensure that wave files don't get treated as unicode data:
157
+ if isinstance(encoding, str):
158
+ encoding = [(r".*\.wav", None), (".*", encoding)]
159
+
160
+ CorpusReader.__init__(
161
+ self, root, find_corpus_fileids(root, self._FILE_RE), encoding=encoding
162
+ )
163
+
164
+ self._utterances = [
165
+ name[:-4] for name in find_corpus_fileids(root, self._UTTERANCE_RE)
166
+ ]
167
+ """A list of the utterance identifiers for all utterances in
168
+ this corpus."""
169
+
170
+ self._speakerinfo = None
171
+ self._root = root
172
+ self.speakers = sorted({u.split("/")[0] for u in self._utterances})
173
+
174
+ def fileids(self, filetype=None):
175
+ """
176
+ Return a list of file identifiers for the files that make up
177
+ this corpus.
178
+
179
+ :param filetype: If specified, then ``filetype`` indicates that
180
+ only the files that have the given type should be
181
+ returned. Accepted values are: ``txt``, ``wrd``, ``phn``,
182
+ ``wav``, or ``metadata``,
183
+ """
184
+ if filetype is None:
185
+ return CorpusReader.fileids(self)
186
+ elif filetype in ("txt", "wrd", "phn", "wav"):
187
+ return [f"{u}.{filetype}" for u in self._utterances]
188
+ elif filetype == "metadata":
189
+ return ["timitdic.txt", "spkrinfo.txt"]
190
+ else:
191
+ raise ValueError("Bad value for filetype: %r" % filetype)
192
+
193
+ def utteranceids(
194
+ self, dialect=None, sex=None, spkrid=None, sent_type=None, sentid=None
195
+ ):
196
+ """
197
+ :return: A list of the utterance identifiers for all
198
+ utterances in this corpus, or for the given speaker, dialect
199
+ region, gender, sentence type, or sentence number, if
200
+ specified.
201
+ """
202
+ if isinstance(dialect, str):
203
+ dialect = [dialect]
204
+ if isinstance(sex, str):
205
+ sex = [sex]
206
+ if isinstance(spkrid, str):
207
+ spkrid = [spkrid]
208
+ if isinstance(sent_type, str):
209
+ sent_type = [sent_type]
210
+ if isinstance(sentid, str):
211
+ sentid = [sentid]
212
+
213
+ utterances = self._utterances[:]
214
+ if dialect is not None:
215
+ utterances = [u for u in utterances if u[2] in dialect]
216
+ if sex is not None:
217
+ utterances = [u for u in utterances if u[4] in sex]
218
+ if spkrid is not None:
219
+ utterances = [u for u in utterances if u[:9] in spkrid]
220
+ if sent_type is not None:
221
+ utterances = [u for u in utterances if u[11] in sent_type]
222
+ if sentid is not None:
223
+ utterances = [u for u in utterances if u[10:] in spkrid]
224
+ return utterances
225
+
226
+ def transcription_dict(self):
227
+ """
228
+ :return: A dictionary giving the 'standard' transcription for
229
+ each word.
230
+ """
231
+ _transcriptions = {}
232
+ with self.open("timitdic.txt") as fp:
233
+ for line in fp:
234
+ if not line.strip() or line[0] == ";":
235
+ continue
236
+ m = re.match(r"\s*(\S+)\s+/(.*)/\s*$", line)
237
+ if not m:
238
+ raise ValueError("Bad line: %r" % line)
239
+ _transcriptions[m.group(1)] = m.group(2).split()
240
+ return _transcriptions
241
+
242
+ def spkrid(self, utterance):
243
+ return utterance.split("/")[0]
244
+
245
+ def sentid(self, utterance):
246
+ return utterance.split("/")[1]
247
+
248
+ def utterance(self, spkrid, sentid):
249
+ return f"{spkrid}/{sentid}"
250
+
251
+ def spkrutteranceids(self, speaker):
252
+ """
253
+ :return: A list of all utterances associated with a given
254
+ speaker.
255
+ """
256
+ return [
257
+ utterance
258
+ for utterance in self._utterances
259
+ if utterance.startswith(speaker + "/")
260
+ ]
261
+
262
+ def spkrinfo(self, speaker):
263
+ """
264
+ :return: A dictionary mapping .. something.
265
+ """
266
+ if speaker in self._utterances:
267
+ speaker = self.spkrid(speaker)
268
+
269
+ if self._speakerinfo is None:
270
+ self._speakerinfo = {}
271
+ with self.open("spkrinfo.txt") as fp:
272
+ for line in fp:
273
+ if not line.strip() or line[0] == ";":
274
+ continue
275
+ rec = line.strip().split(None, 9)
276
+ key = f"dr{rec[2]}-{rec[1].lower()}{rec[0].lower()}"
277
+ self._speakerinfo[key] = SpeakerInfo(*rec)
278
+
279
+ return self._speakerinfo[speaker]
280
+
281
+ def phones(self, utterances=None):
282
+ results = []
283
+ for fileid in self._utterance_fileids(utterances, ".phn"):
284
+ with self.open(fileid) as fp:
285
+ for line in fp:
286
+ if line.strip():
287
+ results.append(line.split()[-1])
288
+ return results
289
+
290
+ def phone_times(self, utterances=None):
291
+ """
292
+ offset is represented as a number of 16kHz samples!
293
+ """
294
+ results = []
295
+ for fileid in self._utterance_fileids(utterances, ".phn"):
296
+ with self.open(fileid) as fp:
297
+ for line in fp:
298
+ if line.strip():
299
+ results.append(
300
+ (
301
+ line.split()[2],
302
+ int(line.split()[0]),
303
+ int(line.split()[1]),
304
+ )
305
+ )
306
+ return results
307
+
308
+ def words(self, utterances=None):
309
+ results = []
310
+ for fileid in self._utterance_fileids(utterances, ".wrd"):
311
+ with self.open(fileid) as fp:
312
+ for line in fp:
313
+ if line.strip():
314
+ results.append(line.split()[-1])
315
+ return results
316
+
317
+ def word_times(self, utterances=None):
318
+ results = []
319
+ for fileid in self._utterance_fileids(utterances, ".wrd"):
320
+ with self.open(fileid) as fp:
321
+ for line in fp:
322
+ if line.strip():
323
+ results.append(
324
+ (
325
+ line.split()[2],
326
+ int(line.split()[0]),
327
+ int(line.split()[1]),
328
+ )
329
+ )
330
+ return results
331
+
332
+ def sents(self, utterances=None):
333
+ results = []
334
+ for fileid in self._utterance_fileids(utterances, ".wrd"):
335
+ with self.open(fileid) as fp:
336
+ results.append([line.split()[-1] for line in fp if line.strip()])
337
+ return results
338
+
339
+ def sent_times(self, utterances=None):
340
+ # TODO: Check this
341
+ return [
342
+ (
343
+ line.split(None, 2)[-1].strip(),
344
+ int(line.split()[0]),
345
+ int(line.split()[1]),
346
+ )
347
+ for fileid in self._utterance_fileids(utterances, ".txt")
348
+ for line in self.open(fileid)
349
+ if line.strip()
350
+ ]
351
+
352
+ def phone_trees(self, utterances=None):
353
+ if utterances is None:
354
+ utterances = self._utterances
355
+ if isinstance(utterances, str):
356
+ utterances = [utterances]
357
+
358
+ trees = []
359
+ for utterance in utterances:
360
+ word_times = self.word_times(utterance)
361
+ phone_times = self.phone_times(utterance)
362
+ sent_times = self.sent_times(utterance)
363
+
364
+ while sent_times:
365
+ (sent, sent_start, sent_end) = sent_times.pop(0)
366
+ trees.append(Tree("S", []))
367
+ while (
368
+ word_times and phone_times and phone_times[0][2] <= word_times[0][1]
369
+ ):
370
+ trees[-1].append(phone_times.pop(0)[0])
371
+ while word_times and word_times[0][2] <= sent_end:
372
+ (word, word_start, word_end) = word_times.pop(0)
373
+ trees[-1].append(Tree(word, []))
374
+ while phone_times and phone_times[0][2] <= word_end:
375
+ trees[-1][-1].append(phone_times.pop(0)[0])
376
+ while phone_times and phone_times[0][2] <= sent_end:
377
+ trees[-1].append(phone_times.pop(0)[0])
378
+ return trees
379
+
380
+ # [xx] NOTE: This is currently broken -- we're assuming that the
381
+ # fileids are WAV fileids (aka RIFF), but they're actually NIST SPHERE
382
+ # fileids.
383
+ def wav(self, utterance, start=0, end=None):
384
+ # nltk.chunk conflicts with the stdlib module 'chunk'
385
+ wave = import_from_stdlib("wave")
386
+
387
+ w = wave.open(self.open(utterance + ".wav"), "rb")
388
+
389
+ if end is None:
390
+ end = w.getnframes()
391
+
392
+ # Skip past frames before start, then read the frames we want
393
+ w.readframes(start)
394
+ frames = w.readframes(end - start)
395
+
396
+ # Open a new temporary file -- the wave module requires
397
+ # an actual file, and won't work w/ stringio. :(
398
+ tf = tempfile.TemporaryFile()
399
+ out = wave.open(tf, "w")
400
+
401
+ # Write the parameters & data to the new file.
402
+ out.setparams(w.getparams())
403
+ out.writeframes(frames)
404
+ out.close()
405
+
406
+ # Read the data back from the file, and return it. The
407
+ # file will automatically be deleted when we return.
408
+ tf.seek(0)
409
+ return tf.read()
410
+
411
+ def audiodata(self, utterance, start=0, end=None):
412
+ assert end is None or end > start
413
+ headersize = 44
414
+ with self.open(utterance + ".wav") as fp:
415
+ if end is None:
416
+ data = fp.read()
417
+ else:
418
+ data = fp.read(headersize + end * 2)
419
+ return data[headersize + start * 2 :]
420
+
421
+ def _utterance_fileids(self, utterances, extension):
422
+ if utterances is None:
423
+ utterances = self._utterances
424
+ if isinstance(utterances, str):
425
+ utterances = [utterances]
426
+ return [f"{u}{extension}" for u in utterances]
427
+
428
+ def play(self, utterance, start=0, end=None):
429
+ """
430
+ Play the given audio sample.
431
+
432
+ :param utterance: The utterance id of the sample to play
433
+ """
434
+ # Method 1: os audio dev.
435
+ try:
436
+ import ossaudiodev
437
+
438
+ try:
439
+ dsp = ossaudiodev.open("w")
440
+ dsp.setfmt(ossaudiodev.AFMT_S16_LE)
441
+ dsp.channels(1)
442
+ dsp.speed(16000)
443
+ dsp.write(self.audiodata(utterance, start, end))
444
+ dsp.close()
445
+ except OSError as e:
446
+ print(
447
+ (
448
+ "can't acquire the audio device; please "
449
+ "activate your audio device."
450
+ ),
451
+ file=sys.stderr,
452
+ )
453
+ print("system error message:", str(e), file=sys.stderr)
454
+ return
455
+ except ImportError:
456
+ pass
457
+
458
+ # Method 2: pygame
459
+ try:
460
+ # FIXME: this won't work under python 3
461
+ import pygame.mixer
462
+ import StringIO
463
+
464
+ pygame.mixer.init(16000)
465
+ f = StringIO.StringIO(self.wav(utterance, start, end))
466
+ pygame.mixer.Sound(f).play()
467
+ while pygame.mixer.get_busy():
468
+ time.sleep(0.01)
469
+ return
470
+ except ImportError:
471
+ pass
472
+
473
+ # Method 3: complain. :)
474
+ print(
475
+ ("you must install pygame or ossaudiodev " "for audio playback."),
476
+ file=sys.stderr,
477
+ )
478
+
479
+
480
+ class SpeakerInfo:
481
+ def __init__(
482
+ self, id, sex, dr, use, recdate, birthdate, ht, race, edu, comments=None
483
+ ):
484
+ self.id = id
485
+ self.sex = sex
486
+ self.dr = dr
487
+ self.use = use
488
+ self.recdate = recdate
489
+ self.birthdate = birthdate
490
+ self.ht = ht
491
+ self.race = race
492
+ self.edu = edu
493
+ self.comments = comments
494
+
495
+ def __repr__(self):
496
+ attribs = "id sex dr use recdate birthdate ht race edu comments"
497
+ args = [f"{attr}={getattr(self, attr)!r}" for attr in attribs.split()]
498
+ return "SpeakerInfo(%s)" % (", ".join(args))
499
+
500
+
501
+ def read_timit_block(stream):
502
+ """
503
+ Block reader for timit tagged sentences, which are preceded by a sentence
504
+ number that will be ignored.
505
+ """
506
+ line = stream.readline()
507
+ if not line:
508
+ return []
509
+ n, sent = line.split(" ", 1)
510
+ return [sent]
llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/util.py ADDED
@@ -0,0 +1,867 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Corpus Reader Utilities
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ import bisect
10
+ import os
11
+ import pickle
12
+ import re
13
+ import tempfile
14
+ from functools import reduce
15
+ from xml.etree import ElementTree
16
+
17
+ from nltk.data import (
18
+ FileSystemPathPointer,
19
+ PathPointer,
20
+ SeekableUnicodeStreamReader,
21
+ ZipFilePathPointer,
22
+ )
23
+ from nltk.internals import slice_bounds
24
+ from nltk.tokenize import wordpunct_tokenize
25
+ from nltk.util import AbstractLazySequence, LazyConcatenation, LazySubsequence
26
+
27
+ ######################################################################
28
+ # { Corpus View
29
+ ######################################################################
30
+
31
+
32
+ class StreamBackedCorpusView(AbstractLazySequence):
33
+ """
34
+ A 'view' of a corpus file, which acts like a sequence of tokens:
35
+ it can be accessed by index, iterated over, etc. However, the
36
+ tokens are only constructed as-needed -- the entire corpus is
37
+ never stored in memory at once.
38
+
39
+ The constructor to ``StreamBackedCorpusView`` takes two arguments:
40
+ a corpus fileid (specified as a string or as a ``PathPointer``);
41
+ and a block reader. A "block reader" is a function that reads
42
+ zero or more tokens from a stream, and returns them as a list. A
43
+ very simple example of a block reader is:
44
+
45
+ >>> def simple_block_reader(stream):
46
+ ... return stream.readline().split()
47
+
48
+ This simple block reader reads a single line at a time, and
49
+ returns a single token (consisting of a string) for each
50
+ whitespace-separated substring on the line.
51
+
52
+ When deciding how to define the block reader for a given
53
+ corpus, careful consideration should be given to the size of
54
+ blocks handled by the block reader. Smaller block sizes will
55
+ increase the memory requirements of the corpus view's internal
56
+ data structures (by 2 integers per block). On the other hand,
57
+ larger block sizes may decrease performance for random access to
58
+ the corpus. (But note that larger block sizes will *not*
59
+ decrease performance for iteration.)
60
+
61
+ Internally, ``CorpusView`` maintains a partial mapping from token
62
+ index to file position, with one entry per block. When a token
63
+ with a given index *i* is requested, the ``CorpusView`` constructs
64
+ it as follows:
65
+
66
+ 1. First, it searches the toknum/filepos mapping for the token
67
+ index closest to (but less than or equal to) *i*.
68
+
69
+ 2. Then, starting at the file position corresponding to that
70
+ index, it reads one block at a time using the block reader
71
+ until it reaches the requested token.
72
+
73
+ The toknum/filepos mapping is created lazily: it is initially
74
+ empty, but every time a new block is read, the block's
75
+ initial token is added to the mapping. (Thus, the toknum/filepos
76
+ map has one entry per block.)
77
+
78
+ In order to increase efficiency for random access patterns that
79
+ have high degrees of locality, the corpus view may cache one or
80
+ more blocks.
81
+
82
+ :note: Each ``CorpusView`` object internally maintains an open file
83
+ object for its underlying corpus file. This file should be
84
+ automatically closed when the ``CorpusView`` is garbage collected,
85
+ but if you wish to close it manually, use the ``close()``
86
+ method. If you access a ``CorpusView``'s items after it has been
87
+ closed, the file object will be automatically re-opened.
88
+
89
+ :warning: If the contents of the file are modified during the
90
+ lifetime of the ``CorpusView``, then the ``CorpusView``'s behavior
91
+ is undefined.
92
+
93
+ :warning: If a unicode encoding is specified when constructing a
94
+ ``CorpusView``, then the block reader may only call
95
+ ``stream.seek()`` with offsets that have been returned by
96
+ ``stream.tell()``; in particular, calling ``stream.seek()`` with
97
+ relative offsets, or with offsets based on string lengths, may
98
+ lead to incorrect behavior.
99
+
100
+ :ivar _block_reader: The function used to read
101
+ a single block from the underlying file stream.
102
+ :ivar _toknum: A list containing the token index of each block
103
+ that has been processed. In particular, ``_toknum[i]`` is the
104
+ token index of the first token in block ``i``. Together
105
+ with ``_filepos``, this forms a partial mapping between token
106
+ indices and file positions.
107
+ :ivar _filepos: A list containing the file position of each block
108
+ that has been processed. In particular, ``_toknum[i]`` is the
109
+ file position of the first character in block ``i``. Together
110
+ with ``_toknum``, this forms a partial mapping between token
111
+ indices and file positions.
112
+ :ivar _stream: The stream used to access the underlying corpus file.
113
+ :ivar _len: The total number of tokens in the corpus, if known;
114
+ or None, if the number of tokens is not yet known.
115
+ :ivar _eofpos: The character position of the last character in the
116
+ file. This is calculated when the corpus view is initialized,
117
+ and is used to decide when the end of file has been reached.
118
+ :ivar _cache: A cache of the most recently read block. It
119
+ is encoded as a tuple (start_toknum, end_toknum, tokens), where
120
+ start_toknum is the token index of the first token in the block;
121
+ end_toknum is the token index of the first token not in the
122
+ block; and tokens is a list of the tokens in the block.
123
+ """
124
+
125
+ def __init__(self, fileid, block_reader=None, startpos=0, encoding="utf8"):
126
+ """
127
+ Create a new corpus view, based on the file ``fileid``, and
128
+ read with ``block_reader``. See the class documentation
129
+ for more information.
130
+
131
+ :param fileid: The path to the file that is read by this
132
+ corpus view. ``fileid`` can either be a string or a
133
+ ``PathPointer``.
134
+
135
+ :param startpos: The file position at which the view will
136
+ start reading. This can be used to skip over preface
137
+ sections.
138
+
139
+ :param encoding: The unicode encoding that should be used to
140
+ read the file's contents. If no encoding is specified,
141
+ then the file's contents will be read as a non-unicode
142
+ string (i.e., a str).
143
+ """
144
+ if block_reader:
145
+ self.read_block = block_reader
146
+ # Initialize our toknum/filepos mapping.
147
+ self._toknum = [0]
148
+ self._filepos = [startpos]
149
+ self._encoding = encoding
150
+ # We don't know our length (number of tokens) yet.
151
+ self._len = None
152
+
153
+ self._fileid = fileid
154
+ self._stream = None
155
+
156
+ self._current_toknum = None
157
+ """This variable is set to the index of the next token that
158
+ will be read, immediately before ``self.read_block()`` is
159
+ called. This is provided for the benefit of the block
160
+ reader, which under rare circumstances may need to know
161
+ the current token number."""
162
+
163
+ self._current_blocknum = None
164
+ """This variable is set to the index of the next block that
165
+ will be read, immediately before ``self.read_block()`` is
166
+ called. This is provided for the benefit of the block
167
+ reader, which under rare circumstances may need to know
168
+ the current block number."""
169
+
170
+ # Find the length of the file.
171
+ try:
172
+ if isinstance(self._fileid, PathPointer):
173
+ self._eofpos = self._fileid.file_size()
174
+ else:
175
+ self._eofpos = os.stat(self._fileid).st_size
176
+ except Exception as exc:
177
+ raise ValueError(f"Unable to open or access {fileid!r} -- {exc}") from exc
178
+
179
+ # Maintain a cache of the most recently read block, to
180
+ # increase efficiency of random access.
181
+ self._cache = (-1, -1, None)
182
+
183
+ fileid = property(
184
+ lambda self: self._fileid,
185
+ doc="""
186
+ The fileid of the file that is accessed by this view.
187
+
188
+ :type: str or PathPointer""",
189
+ )
190
+
191
+ def read_block(self, stream):
192
+ """
193
+ Read a block from the input stream.
194
+
195
+ :return: a block of tokens from the input stream
196
+ :rtype: list(any)
197
+ :param stream: an input stream
198
+ :type stream: stream
199
+ """
200
+ raise NotImplementedError("Abstract Method")
201
+
202
+ def _open(self):
203
+ """
204
+ Open the file stream associated with this corpus view. This
205
+ will be called performed if any value is read from the view
206
+ while its file stream is closed.
207
+ """
208
+ if isinstance(self._fileid, PathPointer):
209
+ self._stream = self._fileid.open(self._encoding)
210
+ elif self._encoding:
211
+ self._stream = SeekableUnicodeStreamReader(
212
+ open(self._fileid, "rb"), self._encoding
213
+ )
214
+ else:
215
+ self._stream = open(self._fileid, "rb")
216
+
217
+ def close(self):
218
+ """
219
+ Close the file stream associated with this corpus view. This
220
+ can be useful if you are worried about running out of file
221
+ handles (although the stream should automatically be closed
222
+ upon garbage collection of the corpus view). If the corpus
223
+ view is accessed after it is closed, it will be automatically
224
+ re-opened.
225
+ """
226
+ if self._stream is not None:
227
+ self._stream.close()
228
+ self._stream = None
229
+
230
+ def __enter__(self):
231
+ return self
232
+
233
+ def __exit__(self, type, value, traceback):
234
+ self.close()
235
+
236
+ def __len__(self):
237
+ if self._len is None:
238
+ # iterate_from() sets self._len when it reaches the end
239
+ # of the file:
240
+ for tok in self.iterate_from(self._toknum[-1]):
241
+ pass
242
+ return self._len
243
+
244
+ def __getitem__(self, i):
245
+ if isinstance(i, slice):
246
+ start, stop = slice_bounds(self, i)
247
+ # Check if it's in the cache.
248
+ offset = self._cache[0]
249
+ if offset <= start and stop <= self._cache[1]:
250
+ return self._cache[2][start - offset : stop - offset]
251
+ # Construct & return the result.
252
+ return LazySubsequence(self, start, stop)
253
+ else:
254
+ # Handle negative indices
255
+ if i < 0:
256
+ i += len(self)
257
+ if i < 0:
258
+ raise IndexError("index out of range")
259
+ # Check if it's in the cache.
260
+ offset = self._cache[0]
261
+ if offset <= i < self._cache[1]:
262
+ return self._cache[2][i - offset]
263
+ # Use iterate_from to extract it.
264
+ try:
265
+ return next(self.iterate_from(i))
266
+ except StopIteration as e:
267
+ raise IndexError("index out of range") from e
268
+
269
+ # If we wanted to be thread-safe, then this method would need to
270
+ # do some locking.
271
+ def iterate_from(self, start_tok):
272
+ # Start by feeding from the cache, if possible.
273
+ if self._cache[0] <= start_tok < self._cache[1]:
274
+ for tok in self._cache[2][start_tok - self._cache[0] :]:
275
+ yield tok
276
+ start_tok += 1
277
+
278
+ # Decide where in the file we should start. If `start` is in
279
+ # our mapping, then we can jump straight to the correct block;
280
+ # otherwise, start at the last block we've processed.
281
+ if start_tok < self._toknum[-1]:
282
+ block_index = bisect.bisect_right(self._toknum, start_tok) - 1
283
+ toknum = self._toknum[block_index]
284
+ filepos = self._filepos[block_index]
285
+ else:
286
+ block_index = len(self._toknum) - 1
287
+ toknum = self._toknum[-1]
288
+ filepos = self._filepos[-1]
289
+
290
+ # Open the stream, if it's not open already.
291
+ if self._stream is None:
292
+ self._open()
293
+
294
+ # If the file is empty, the while loop will never run.
295
+ # This *seems* to be all the state we need to set:
296
+ if self._eofpos == 0:
297
+ self._len = 0
298
+
299
+ # Each iteration through this loop, we read a single block
300
+ # from the stream.
301
+ while filepos < self._eofpos:
302
+ # Read the next block.
303
+ self._stream.seek(filepos)
304
+ self._current_toknum = toknum
305
+ self._current_blocknum = block_index
306
+ tokens = self.read_block(self._stream)
307
+ assert isinstance(tokens, (tuple, list, AbstractLazySequence)), (
308
+ "block reader %s() should return list or tuple."
309
+ % self.read_block.__name__
310
+ )
311
+ num_toks = len(tokens)
312
+ new_filepos = self._stream.tell()
313
+ assert (
314
+ new_filepos > filepos
315
+ ), "block reader %s() should consume at least 1 byte (filepos=%d)" % (
316
+ self.read_block.__name__,
317
+ filepos,
318
+ )
319
+
320
+ # Update our cache.
321
+ self._cache = (toknum, toknum + num_toks, list(tokens))
322
+
323
+ # Update our mapping.
324
+ assert toknum <= self._toknum[-1]
325
+ if num_toks > 0:
326
+ block_index += 1
327
+ if toknum == self._toknum[-1]:
328
+ assert new_filepos > self._filepos[-1] # monotonic!
329
+ self._filepos.append(new_filepos)
330
+ self._toknum.append(toknum + num_toks)
331
+ else:
332
+ # Check for consistency:
333
+ assert (
334
+ new_filepos == self._filepos[block_index]
335
+ ), "inconsistent block reader (num chars read)"
336
+ assert (
337
+ toknum + num_toks == self._toknum[block_index]
338
+ ), "inconsistent block reader (num tokens returned)"
339
+
340
+ # If we reached the end of the file, then update self._len
341
+ if new_filepos == self._eofpos:
342
+ self._len = toknum + num_toks
343
+ # Generate the tokens in this block (but skip any tokens
344
+ # before start_tok). Note that between yields, our state
345
+ # may be modified.
346
+ for tok in tokens[max(0, start_tok - toknum) :]:
347
+ yield tok
348
+ # If we're at the end of the file, then we're done.
349
+ assert new_filepos <= self._eofpos
350
+ if new_filepos == self._eofpos:
351
+ break
352
+ # Update our indices
353
+ toknum += num_toks
354
+ filepos = new_filepos
355
+
356
+ # If we reach this point, then we should know our length.
357
+ assert self._len is not None
358
+ # Enforce closing of stream once we reached end of file
359
+ # We should have reached EOF once we're out of the while loop.
360
+ self.close()
361
+
362
+ # Use concat for these, so we can use a ConcatenatedCorpusView
363
+ # when possible.
364
+ def __add__(self, other):
365
+ return concat([self, other])
366
+
367
+ def __radd__(self, other):
368
+ return concat([other, self])
369
+
370
+ def __mul__(self, count):
371
+ return concat([self] * count)
372
+
373
+ def __rmul__(self, count):
374
+ return concat([self] * count)
375
+
376
+
377
+ class ConcatenatedCorpusView(AbstractLazySequence):
378
+ """
379
+ A 'view' of a corpus file that joins together one or more
380
+ ``StreamBackedCorpusViews<StreamBackedCorpusView>``. At most
381
+ one file handle is left open at any time.
382
+ """
383
+
384
+ def __init__(self, corpus_views):
385
+ self._pieces = corpus_views
386
+ """A list of the corpus subviews that make up this
387
+ concatenation."""
388
+
389
+ self._offsets = [0]
390
+ """A list of offsets, indicating the index at which each
391
+ subview begins. In particular::
392
+ offsets[i] = sum([len(p) for p in pieces[:i]])"""
393
+
394
+ self._open_piece = None
395
+ """The most recently accessed corpus subview (or None).
396
+ Before a new subview is accessed, this subview will be closed."""
397
+
398
+ def __len__(self):
399
+ if len(self._offsets) <= len(self._pieces):
400
+ # Iterate to the end of the corpus.
401
+ for tok in self.iterate_from(self._offsets[-1]):
402
+ pass
403
+
404
+ return self._offsets[-1]
405
+
406
+ def close(self):
407
+ for piece in self._pieces:
408
+ piece.close()
409
+
410
+ def iterate_from(self, start_tok):
411
+ piecenum = bisect.bisect_right(self._offsets, start_tok) - 1
412
+
413
+ while piecenum < len(self._pieces):
414
+ offset = self._offsets[piecenum]
415
+ piece = self._pieces[piecenum]
416
+
417
+ # If we've got another piece open, close it first.
418
+ if self._open_piece is not piece:
419
+ if self._open_piece is not None:
420
+ self._open_piece.close()
421
+ self._open_piece = piece
422
+
423
+ # Get everything we can from this piece.
424
+ yield from piece.iterate_from(max(0, start_tok - offset))
425
+
426
+ # Update the offset table.
427
+ if piecenum + 1 == len(self._offsets):
428
+ self._offsets.append(self._offsets[-1] + len(piece))
429
+
430
+ # Move on to the next piece.
431
+ piecenum += 1
432
+
433
+
434
+ def concat(docs):
435
+ """
436
+ Concatenate together the contents of multiple documents from a
437
+ single corpus, using an appropriate concatenation function. This
438
+ utility function is used by corpus readers when the user requests
439
+ more than one document at a time.
440
+ """
441
+ if len(docs) == 1:
442
+ return docs[0]
443
+ if len(docs) == 0:
444
+ raise ValueError("concat() expects at least one object!")
445
+
446
+ types = {d.__class__ for d in docs}
447
+
448
+ # If they're all strings, use string concatenation.
449
+ if all(isinstance(doc, str) for doc in docs):
450
+ return "".join(docs)
451
+
452
+ # If they're all corpus views, then use ConcatenatedCorpusView.
453
+ for typ in types:
454
+ if not issubclass(typ, (StreamBackedCorpusView, ConcatenatedCorpusView)):
455
+ break
456
+ else:
457
+ return ConcatenatedCorpusView(docs)
458
+
459
+ # If they're all lazy sequences, use a lazy concatenation
460
+ for typ in types:
461
+ if not issubclass(typ, AbstractLazySequence):
462
+ break
463
+ else:
464
+ return LazyConcatenation(docs)
465
+
466
+ # Otherwise, see what we can do:
467
+ if len(types) == 1:
468
+ typ = list(types)[0]
469
+
470
+ if issubclass(typ, list):
471
+ return reduce((lambda a, b: a + b), docs, [])
472
+
473
+ if issubclass(typ, tuple):
474
+ return reduce((lambda a, b: a + b), docs, ())
475
+
476
+ if ElementTree.iselement(typ):
477
+ xmltree = ElementTree.Element("documents")
478
+ for doc in docs:
479
+ xmltree.append(doc)
480
+ return xmltree
481
+
482
+ # No method found!
483
+ raise ValueError("Don't know how to concatenate types: %r" % types)
484
+
485
+
486
+ ######################################################################
487
+ # { Corpus View for Pickled Sequences
488
+ ######################################################################
489
+
490
+
491
+ class PickleCorpusView(StreamBackedCorpusView):
492
+ """
493
+ A stream backed corpus view for corpus files that consist of
494
+ sequences of serialized Python objects (serialized using
495
+ ``pickle.dump``). One use case for this class is to store the
496
+ result of running feature detection on a corpus to disk. This can
497
+ be useful when performing feature detection is expensive (so we
498
+ don't want to repeat it); but the corpus is too large to store in
499
+ memory. The following example illustrates this technique:
500
+
501
+ >>> from nltk.corpus.reader.util import PickleCorpusView
502
+ >>> from nltk.util import LazyMap
503
+ >>> feature_corpus = LazyMap(detect_features, corpus) # doctest: +SKIP
504
+ >>> PickleCorpusView.write(feature_corpus, some_fileid) # doctest: +SKIP
505
+ >>> pcv = PickleCorpusView(some_fileid) # doctest: +SKIP
506
+ """
507
+
508
+ BLOCK_SIZE = 100
509
+ PROTOCOL = -1
510
+
511
+ def __init__(self, fileid, delete_on_gc=False):
512
+ """
513
+ Create a new corpus view that reads the pickle corpus
514
+ ``fileid``.
515
+
516
+ :param delete_on_gc: If true, then ``fileid`` will be deleted
517
+ whenever this object gets garbage-collected.
518
+ """
519
+ self._delete_on_gc = delete_on_gc
520
+ StreamBackedCorpusView.__init__(self, fileid)
521
+
522
+ def read_block(self, stream):
523
+ result = []
524
+ for i in range(self.BLOCK_SIZE):
525
+ try:
526
+ result.append(pickle.load(stream))
527
+ except EOFError:
528
+ break
529
+ return result
530
+
531
+ def __del__(self):
532
+ """
533
+ If ``delete_on_gc`` was set to true when this
534
+ ``PickleCorpusView`` was created, then delete the corpus view's
535
+ fileid. (This method is called whenever a
536
+ ``PickledCorpusView`` is garbage-collected.
537
+ """
538
+ if getattr(self, "_delete_on_gc"):
539
+ if os.path.exists(self._fileid):
540
+ try:
541
+ os.remove(self._fileid)
542
+ except OSError:
543
+ pass
544
+ self.__dict__.clear() # make the garbage collector's job easier
545
+
546
+ @classmethod
547
+ def write(cls, sequence, output_file):
548
+ if isinstance(output_file, str):
549
+ output_file = open(output_file, "wb")
550
+ for item in sequence:
551
+ pickle.dump(item, output_file, cls.PROTOCOL)
552
+
553
+ @classmethod
554
+ def cache_to_tempfile(cls, sequence, delete_on_gc=True):
555
+ """
556
+ Write the given sequence to a temporary file as a pickle
557
+ corpus; and then return a ``PickleCorpusView`` view for that
558
+ temporary corpus file.
559
+
560
+ :param delete_on_gc: If true, then the temporary file will be
561
+ deleted whenever this object gets garbage-collected.
562
+ """
563
+ try:
564
+ fd, output_file_name = tempfile.mkstemp(".pcv", "nltk-")
565
+ output_file = os.fdopen(fd, "wb")
566
+ cls.write(sequence, output_file)
567
+ output_file.close()
568
+ return PickleCorpusView(output_file_name, delete_on_gc)
569
+ except OSError as e:
570
+ raise ValueError("Error while creating temp file: %s" % e) from e
571
+
572
+
573
+ ######################################################################
574
+ # { Block Readers
575
+ ######################################################################
576
+
577
+
578
+ def read_whitespace_block(stream):
579
+ toks = []
580
+ for i in range(20): # Read 20 lines at a time.
581
+ toks.extend(stream.readline().split())
582
+ return toks
583
+
584
+
585
+ def read_wordpunct_block(stream):
586
+ toks = []
587
+ for i in range(20): # Read 20 lines at a time.
588
+ toks.extend(wordpunct_tokenize(stream.readline()))
589
+ return toks
590
+
591
+
592
+ def read_line_block(stream):
593
+ toks = []
594
+ for i in range(20):
595
+ line = stream.readline()
596
+ if not line:
597
+ return toks
598
+ toks.append(line.rstrip("\n"))
599
+ return toks
600
+
601
+
602
+ def read_blankline_block(stream):
603
+ s = ""
604
+ while True:
605
+ line = stream.readline()
606
+ # End of file:
607
+ if not line:
608
+ if s:
609
+ return [s]
610
+ else:
611
+ return []
612
+ # Blank line:
613
+ elif line and not line.strip():
614
+ if s:
615
+ return [s]
616
+ # Other line:
617
+ else:
618
+ s += line
619
+
620
+
621
+ def read_alignedsent_block(stream):
622
+ s = ""
623
+ while True:
624
+ line = stream.readline()
625
+ if line[0] == "=" or line[0] == "\n" or line[:2] == "\r\n":
626
+ continue
627
+ # End of file:
628
+ if not line:
629
+ if s:
630
+ return [s]
631
+ else:
632
+ return []
633
+ # Other line:
634
+ else:
635
+ s += line
636
+ if re.match(r"^\d+-\d+", line) is not None:
637
+ return [s]
638
+
639
+
640
+ def read_regexp_block(stream, start_re, end_re=None):
641
+ """
642
+ Read a sequence of tokens from a stream, where tokens begin with
643
+ lines that match ``start_re``. If ``end_re`` is specified, then
644
+ tokens end with lines that match ``end_re``; otherwise, tokens end
645
+ whenever the next line matching ``start_re`` or EOF is found.
646
+ """
647
+ # Scan until we find a line matching the start regexp.
648
+ while True:
649
+ line = stream.readline()
650
+ if not line:
651
+ return [] # end of file.
652
+ if re.match(start_re, line):
653
+ break
654
+
655
+ # Scan until we find another line matching the regexp, or EOF.
656
+ lines = [line]
657
+ while True:
658
+ oldpos = stream.tell()
659
+ line = stream.readline()
660
+ # End of file:
661
+ if not line:
662
+ return ["".join(lines)]
663
+ # End of token:
664
+ if end_re is not None and re.match(end_re, line):
665
+ return ["".join(lines)]
666
+ # Start of new token: backup to just before it starts, and
667
+ # return the token we've already collected.
668
+ if end_re is None and re.match(start_re, line):
669
+ stream.seek(oldpos)
670
+ return ["".join(lines)]
671
+ # Anything else is part of the token.
672
+ lines.append(line)
673
+
674
+
675
+ def read_sexpr_block(stream, block_size=16384, comment_char=None):
676
+ """
677
+ Read a sequence of s-expressions from the stream, and leave the
678
+ stream's file position at the end the last complete s-expression
679
+ read. This function will always return at least one s-expression,
680
+ unless there are no more s-expressions in the file.
681
+
682
+ If the file ends in in the middle of an s-expression, then that
683
+ incomplete s-expression is returned when the end of the file is
684
+ reached.
685
+
686
+ :param block_size: The default block size for reading. If an
687
+ s-expression is longer than one block, then more than one
688
+ block will be read.
689
+ :param comment_char: A character that marks comments. Any lines
690
+ that begin with this character will be stripped out.
691
+ (If spaces or tabs precede the comment character, then the
692
+ line will not be stripped.)
693
+ """
694
+ start = stream.tell()
695
+ block = stream.read(block_size)
696
+ encoding = getattr(stream, "encoding", None)
697
+ assert encoding is not None or isinstance(block, str)
698
+ if encoding not in (None, "utf-8"):
699
+ import warnings
700
+
701
+ warnings.warn(
702
+ "Parsing may fail, depending on the properties "
703
+ "of the %s encoding!" % encoding
704
+ )
705
+ # (e.g., the utf-16 encoding does not work because it insists
706
+ # on adding BOMs to the beginning of encoded strings.)
707
+
708
+ if comment_char:
709
+ COMMENT = re.compile("(?m)^%s.*$" % re.escape(comment_char))
710
+ while True:
711
+ try:
712
+ # If we're stripping comments, then make sure our block ends
713
+ # on a line boundary; and then replace any comments with
714
+ # space characters. (We can't just strip them out -- that
715
+ # would make our offset wrong.)
716
+ if comment_char:
717
+ block += stream.readline()
718
+ block = re.sub(COMMENT, _sub_space, block)
719
+ # Read the block.
720
+ tokens, offset = _parse_sexpr_block(block)
721
+ # Skip whitespace
722
+ offset = re.compile(r"\s*").search(block, offset).end()
723
+
724
+ # Move to the end position.
725
+ if encoding is None:
726
+ stream.seek(start + offset)
727
+ else:
728
+ stream.seek(start + len(block[:offset].encode(encoding)))
729
+
730
+ # Return the list of tokens we processed
731
+ return tokens
732
+ except ValueError as e:
733
+ if e.args[0] == "Block too small":
734
+ next_block = stream.read(block_size)
735
+ if next_block:
736
+ block += next_block
737
+ continue
738
+ else:
739
+ # The file ended mid-sexpr -- return what we got.
740
+ return [block.strip()]
741
+ else:
742
+ raise
743
+
744
+
745
+ def _sub_space(m):
746
+ """Helper function: given a regexp match, return a string of
747
+ spaces that's the same length as the matched string."""
748
+ return " " * (m.end() - m.start())
749
+
750
+
751
+ def _parse_sexpr_block(block):
752
+ tokens = []
753
+ start = end = 0
754
+
755
+ while end < len(block):
756
+ m = re.compile(r"\S").search(block, end)
757
+ if not m:
758
+ return tokens, end
759
+
760
+ start = m.start()
761
+
762
+ # Case 1: sexpr is not parenthesized.
763
+ if m.group() != "(":
764
+ m2 = re.compile(r"[\s(]").search(block, start)
765
+ if m2:
766
+ end = m2.start()
767
+ else:
768
+ if tokens:
769
+ return tokens, end
770
+ raise ValueError("Block too small")
771
+
772
+ # Case 2: parenthesized sexpr.
773
+ else:
774
+ nesting = 0
775
+ for m in re.compile(r"[()]").finditer(block, start):
776
+ if m.group() == "(":
777
+ nesting += 1
778
+ else:
779
+ nesting -= 1
780
+ if nesting == 0:
781
+ end = m.end()
782
+ break
783
+ else:
784
+ if tokens:
785
+ return tokens, end
786
+ raise ValueError("Block too small")
787
+
788
+ tokens.append(block[start:end])
789
+
790
+ return tokens, end
791
+
792
+
793
+ ######################################################################
794
+ # { Finding Corpus Items
795
+ ######################################################################
796
+
797
+
798
+ def find_corpus_fileids(root, regexp):
799
+ if not isinstance(root, PathPointer):
800
+ raise TypeError("find_corpus_fileids: expected a PathPointer")
801
+ regexp += "$"
802
+
803
+ # Find fileids in a zipfile: scan the zipfile's namelist. Filter
804
+ # out entries that end in '/' -- they're directories.
805
+ if isinstance(root, ZipFilePathPointer):
806
+ fileids = [
807
+ name[len(root.entry) :]
808
+ for name in root.zipfile.namelist()
809
+ if not name.endswith("/")
810
+ ]
811
+ items = [name for name in fileids if re.match(regexp, name)]
812
+ return sorted(items)
813
+
814
+ # Find fileids in a directory: use os.walk to search all (proper
815
+ # or symlinked) subdirectories, and match paths against the regexp.
816
+ elif isinstance(root, FileSystemPathPointer):
817
+ items = []
818
+ for dirname, subdirs, fileids in os.walk(root.path):
819
+ prefix = "".join("%s/" % p for p in _path_from(root.path, dirname))
820
+ items += [
821
+ prefix + fileid
822
+ for fileid in fileids
823
+ if re.match(regexp, prefix + fileid)
824
+ ]
825
+ # Don't visit svn directories:
826
+ if ".svn" in subdirs:
827
+ subdirs.remove(".svn")
828
+ return sorted(items)
829
+
830
+ else:
831
+ raise AssertionError("Don't know how to handle %r" % root)
832
+
833
+
834
+ def _path_from(parent, child):
835
+ if os.path.split(parent)[1] == "":
836
+ parent = os.path.split(parent)[0]
837
+ path = []
838
+ while parent != child:
839
+ child, dirname = os.path.split(child)
840
+ path.insert(0, dirname)
841
+ assert os.path.split(child)[0] != child
842
+ return path
843
+
844
+
845
+ ######################################################################
846
+ # { Paragraph structure in Treebank files
847
+ ######################################################################
848
+
849
+
850
+ def tagged_treebank_para_block_reader(stream):
851
+ # Read the next paragraph.
852
+ para = ""
853
+ while True:
854
+ line = stream.readline()
855
+ # End of paragraph:
856
+ if re.match(r"======+\s*$", line):
857
+ if para.strip():
858
+ return [para]
859
+ # End of file:
860
+ elif line == "":
861
+ if para.strip():
862
+ return [para]
863
+ else:
864
+ return []
865
+ # Content line:
866
+ else:
867
+ para += line
llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/verbnet.py ADDED
@@ -0,0 +1,629 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Verbnet Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ An NLTK interface to the VerbNet verb lexicon
10
+
11
+ For details about VerbNet see:
12
+ https://verbs.colorado.edu/~mpalmer/projects/verbnet.html
13
+ """
14
+
15
+ import re
16
+ import textwrap
17
+ from collections import defaultdict
18
+
19
+ from nltk.corpus.reader.xmldocs import XMLCorpusReader
20
+
21
+
22
+ class VerbnetCorpusReader(XMLCorpusReader):
23
+ """
24
+ An NLTK interface to the VerbNet verb lexicon.
25
+
26
+ From the VerbNet site: "VerbNet (VN) (Kipper-Schuler 2006) is the largest
27
+ on-line verb lexicon currently available for English. It is a hierarchical
28
+ domain-independent, broad-coverage verb lexicon with mappings to other
29
+ lexical resources such as WordNet (Miller, 1990; Fellbaum, 1998), XTAG
30
+ (XTAG Research Group, 2001), and FrameNet (Baker et al., 1998)."
31
+
32
+ For details about VerbNet see:
33
+ https://verbs.colorado.edu/~mpalmer/projects/verbnet.html
34
+ """
35
+
36
+ # No unicode encoding param, since the data files are all XML.
37
+ def __init__(self, root, fileids, wrap_etree=False):
38
+ XMLCorpusReader.__init__(self, root, fileids, wrap_etree)
39
+
40
+ self._lemma_to_class = defaultdict(list)
41
+ """A dictionary mapping from verb lemma strings to lists of
42
+ VerbNet class identifiers."""
43
+
44
+ self._wordnet_to_class = defaultdict(list)
45
+ """A dictionary mapping from wordnet identifier strings to
46
+ lists of VerbNet class identifiers."""
47
+
48
+ self._class_to_fileid = {}
49
+ """A dictionary mapping from class identifiers to
50
+ corresponding file identifiers. The keys of this dictionary
51
+ provide a complete list of all classes and subclasses."""
52
+
53
+ self._shortid_to_longid = {}
54
+
55
+ # Initialize the dictionaries. Use the quick (regexp-based)
56
+ # method instead of the slow (xml-based) method, because it
57
+ # runs 2-30 times faster.
58
+ self._quick_index()
59
+
60
+ _LONGID_RE = re.compile(r"([^\-\.]*)-([\d+.\-]+)$")
61
+ """Regular expression that matches (and decomposes) longids"""
62
+
63
+ _SHORTID_RE = re.compile(r"[\d+.\-]+$")
64
+ """Regular expression that matches shortids"""
65
+
66
+ _INDEX_RE = re.compile(
67
+ r'<MEMBER name="\??([^"]+)" wn="([^"]*)"[^>]+>|' r'<VNSUBCLASS ID="([^"]+)"/?>'
68
+ )
69
+ """Regular expression used by ``_index()`` to quickly scan the corpus
70
+ for basic information."""
71
+
72
+ def lemmas(self, vnclass=None):
73
+ """
74
+ Return a list of all verb lemmas that appear in any class, or
75
+ in the ``classid`` if specified.
76
+ """
77
+ if vnclass is None:
78
+ return sorted(self._lemma_to_class.keys())
79
+ else:
80
+ # [xx] should this include subclass members?
81
+ if isinstance(vnclass, str):
82
+ vnclass = self.vnclass(vnclass)
83
+ return [member.get("name") for member in vnclass.findall("MEMBERS/MEMBER")]
84
+
85
+ def wordnetids(self, vnclass=None):
86
+ """
87
+ Return a list of all wordnet identifiers that appear in any
88
+ class, or in ``classid`` if specified.
89
+ """
90
+ if vnclass is None:
91
+ return sorted(self._wordnet_to_class.keys())
92
+ else:
93
+ # [xx] should this include subclass members?
94
+ if isinstance(vnclass, str):
95
+ vnclass = self.vnclass(vnclass)
96
+ return sum(
97
+ (
98
+ member.get("wn", "").split()
99
+ for member in vnclass.findall("MEMBERS/MEMBER")
100
+ ),
101
+ [],
102
+ )
103
+
104
+ def classids(self, lemma=None, wordnetid=None, fileid=None, classid=None):
105
+ """
106
+ Return a list of the VerbNet class identifiers. If a file
107
+ identifier is specified, then return only the VerbNet class
108
+ identifiers for classes (and subclasses) defined by that file.
109
+ If a lemma is specified, then return only VerbNet class
110
+ identifiers for classes that contain that lemma as a member.
111
+ If a wordnetid is specified, then return only identifiers for
112
+ classes that contain that wordnetid as a member. If a classid
113
+ is specified, then return only identifiers for subclasses of
114
+ the specified VerbNet class.
115
+ If nothing is specified, return all classids within VerbNet
116
+ """
117
+ if fileid is not None:
118
+ return [c for (c, f) in self._class_to_fileid.items() if f == fileid]
119
+ elif lemma is not None:
120
+ return self._lemma_to_class[lemma]
121
+ elif wordnetid is not None:
122
+ return self._wordnet_to_class[wordnetid]
123
+ elif classid is not None:
124
+ xmltree = self.vnclass(classid)
125
+ return [
126
+ subclass.get("ID")
127
+ for subclass in xmltree.findall("SUBCLASSES/VNSUBCLASS")
128
+ ]
129
+ else:
130
+ return sorted(self._class_to_fileid.keys())
131
+
132
+ def vnclass(self, fileid_or_classid):
133
+ """Returns VerbNet class ElementTree
134
+
135
+ Return an ElementTree containing the xml for the specified
136
+ VerbNet class.
137
+
138
+ :param fileid_or_classid: An identifier specifying which class
139
+ should be returned. Can be a file identifier (such as
140
+ ``'put-9.1.xml'``), or a VerbNet class identifier (such as
141
+ ``'put-9.1'``) or a short VerbNet class identifier (such as
142
+ ``'9.1'``).
143
+ """
144
+ # File identifier: just return the xml.
145
+ if fileid_or_classid in self._fileids:
146
+ return self.xml(fileid_or_classid)
147
+
148
+ # Class identifier: get the xml, and find the right elt.
149
+ classid = self.longid(fileid_or_classid)
150
+ if classid in self._class_to_fileid:
151
+ fileid = self._class_to_fileid[self.longid(classid)]
152
+ tree = self.xml(fileid)
153
+ if classid == tree.get("ID"):
154
+ return tree
155
+ else:
156
+ for subclass in tree.findall(".//VNSUBCLASS"):
157
+ if classid == subclass.get("ID"):
158
+ return subclass
159
+ else:
160
+ assert False # we saw it during _index()!
161
+
162
+ else:
163
+ raise ValueError(f"Unknown identifier {fileid_or_classid}")
164
+
165
+ def fileids(self, vnclass_ids=None):
166
+ """
167
+ Return a list of fileids that make up this corpus. If
168
+ ``vnclass_ids`` is specified, then return the fileids that make
169
+ up the specified VerbNet class(es).
170
+ """
171
+ if vnclass_ids is None:
172
+ return self._fileids
173
+ elif isinstance(vnclass_ids, str):
174
+ return [self._class_to_fileid[self.longid(vnclass_ids)]]
175
+ else:
176
+ return [
177
+ self._class_to_fileid[self.longid(vnclass_id)]
178
+ for vnclass_id in vnclass_ids
179
+ ]
180
+
181
+ def frames(self, vnclass):
182
+ """Given a VerbNet class, this method returns VerbNet frames
183
+
184
+ The members returned are:
185
+ 1) Example
186
+ 2) Description
187
+ 3) Syntax
188
+ 4) Semantics
189
+
190
+ :param vnclass: A VerbNet class identifier; or an ElementTree
191
+ containing the xml contents of a VerbNet class.
192
+ :return: frames - a list of frame dictionaries
193
+ """
194
+ if isinstance(vnclass, str):
195
+ vnclass = self.vnclass(vnclass)
196
+ frames = []
197
+ vnframes = vnclass.findall("FRAMES/FRAME")
198
+ for vnframe in vnframes:
199
+ frames.append(
200
+ {
201
+ "example": self._get_example_within_frame(vnframe),
202
+ "description": self._get_description_within_frame(vnframe),
203
+ "syntax": self._get_syntactic_list_within_frame(vnframe),
204
+ "semantics": self._get_semantics_within_frame(vnframe),
205
+ }
206
+ )
207
+ return frames
208
+
209
+ def subclasses(self, vnclass):
210
+ """Returns subclass ids, if any exist
211
+
212
+ Given a VerbNet class, this method returns subclass ids (if they exist)
213
+ in a list of strings.
214
+
215
+ :param vnclass: A VerbNet class identifier; or an ElementTree
216
+ containing the xml contents of a VerbNet class.
217
+ :return: list of subclasses
218
+ """
219
+ if isinstance(vnclass, str):
220
+ vnclass = self.vnclass(vnclass)
221
+
222
+ subclasses = [
223
+ subclass.get("ID") for subclass in vnclass.findall("SUBCLASSES/VNSUBCLASS")
224
+ ]
225
+ return subclasses
226
+
227
+ def themroles(self, vnclass):
228
+ """Returns thematic roles participating in a VerbNet class
229
+
230
+ Members returned as part of roles are-
231
+ 1) Type
232
+ 2) Modifiers
233
+
234
+ :param vnclass: A VerbNet class identifier; or an ElementTree
235
+ containing the xml contents of a VerbNet class.
236
+ :return: themroles: A list of thematic roles in the VerbNet class
237
+ """
238
+ if isinstance(vnclass, str):
239
+ vnclass = self.vnclass(vnclass)
240
+
241
+ themroles = []
242
+ for trole in vnclass.findall("THEMROLES/THEMROLE"):
243
+ themroles.append(
244
+ {
245
+ "type": trole.get("type"),
246
+ "modifiers": [
247
+ {"value": restr.get("Value"), "type": restr.get("type")}
248
+ for restr in trole.findall("SELRESTRS/SELRESTR")
249
+ ],
250
+ }
251
+ )
252
+ return themroles
253
+
254
+ ######################################################################
255
+ # { Index Initialization
256
+ ######################################################################
257
+
258
+ def _index(self):
259
+ """
260
+ Initialize the indexes ``_lemma_to_class``,
261
+ ``_wordnet_to_class``, and ``_class_to_fileid`` by scanning
262
+ through the corpus fileids. This is fast if ElementTree
263
+ uses the C implementation (<0.1 secs), but quite slow (>10 secs)
264
+ if only the python implementation is available.
265
+ """
266
+ for fileid in self._fileids:
267
+ self._index_helper(self.xml(fileid), fileid)
268
+
269
+ def _index_helper(self, xmltree, fileid):
270
+ """Helper for ``_index()``"""
271
+ vnclass = xmltree.get("ID")
272
+ self._class_to_fileid[vnclass] = fileid
273
+ self._shortid_to_longid[self.shortid(vnclass)] = vnclass
274
+ for member in xmltree.findall("MEMBERS/MEMBER"):
275
+ self._lemma_to_class[member.get("name")].append(vnclass)
276
+ for wn in member.get("wn", "").split():
277
+ self._wordnet_to_class[wn].append(vnclass)
278
+ for subclass in xmltree.findall("SUBCLASSES/VNSUBCLASS"):
279
+ self._index_helper(subclass, fileid)
280
+
281
+ def _quick_index(self):
282
+ """
283
+ Initialize the indexes ``_lemma_to_class``,
284
+ ``_wordnet_to_class``, and ``_class_to_fileid`` by scanning
285
+ through the corpus fileids. This doesn't do proper xml parsing,
286
+ but is good enough to find everything in the standard VerbNet
287
+ corpus -- and it runs about 30 times faster than xml parsing
288
+ (with the python ElementTree; only 2-3 times faster
289
+ if ElementTree uses the C implementation).
290
+ """
291
+ # nb: if we got rid of wordnet_to_class, this would run 2-3
292
+ # times faster.
293
+ for fileid in self._fileids:
294
+ vnclass = fileid[:-4] # strip the '.xml'
295
+ self._class_to_fileid[vnclass] = fileid
296
+ self._shortid_to_longid[self.shortid(vnclass)] = vnclass
297
+ with self.open(fileid) as fp:
298
+ for m in self._INDEX_RE.finditer(fp.read()):
299
+ groups = m.groups()
300
+ if groups[0] is not None:
301
+ self._lemma_to_class[groups[0]].append(vnclass)
302
+ for wn in groups[1].split():
303
+ self._wordnet_to_class[wn].append(vnclass)
304
+ elif groups[2] is not None:
305
+ self._class_to_fileid[groups[2]] = fileid
306
+ vnclass = groups[2] # for <MEMBER> elts.
307
+ self._shortid_to_longid[self.shortid(vnclass)] = vnclass
308
+ else:
309
+ assert False, "unexpected match condition"
310
+
311
+ ######################################################################
312
+ # { Identifier conversion
313
+ ######################################################################
314
+
315
+ def longid(self, shortid):
316
+ """Returns longid of a VerbNet class
317
+
318
+ Given a short VerbNet class identifier (eg '37.10'), map it
319
+ to a long id (eg 'confess-37.10'). If ``shortid`` is already a
320
+ long id, then return it as-is"""
321
+ if self._LONGID_RE.match(shortid):
322
+ return shortid # it's already a longid.
323
+ elif not self._SHORTID_RE.match(shortid):
324
+ raise ValueError("vnclass identifier %r not found" % shortid)
325
+ try:
326
+ return self._shortid_to_longid[shortid]
327
+ except KeyError as e:
328
+ raise ValueError("vnclass identifier %r not found" % shortid) from e
329
+
330
+ def shortid(self, longid):
331
+ """Returns shortid of a VerbNet class
332
+
333
+ Given a long VerbNet class identifier (eg 'confess-37.10'),
334
+ map it to a short id (eg '37.10'). If ``longid`` is already a
335
+ short id, then return it as-is."""
336
+ if self._SHORTID_RE.match(longid):
337
+ return longid # it's already a shortid.
338
+ m = self._LONGID_RE.match(longid)
339
+ if m:
340
+ return m.group(2)
341
+ else:
342
+ raise ValueError("vnclass identifier %r not found" % longid)
343
+
344
+ ######################################################################
345
+ # { Frame access utility functions
346
+ ######################################################################
347
+
348
+ def _get_semantics_within_frame(self, vnframe):
349
+ """Returns semantics within a single frame
350
+
351
+ A utility function to retrieve semantics within a frame in VerbNet
352
+ Members of the semantics dictionary:
353
+ 1) Predicate value
354
+ 2) Arguments
355
+
356
+ :param vnframe: An ElementTree containing the xml contents of
357
+ a VerbNet frame.
358
+ :return: semantics: semantics dictionary
359
+ """
360
+ semantics_within_single_frame = []
361
+ for pred in vnframe.findall("SEMANTICS/PRED"):
362
+ arguments = [
363
+ {"type": arg.get("type"), "value": arg.get("value")}
364
+ for arg in pred.findall("ARGS/ARG")
365
+ ]
366
+ semantics_within_single_frame.append(
367
+ {
368
+ "predicate_value": pred.get("value"),
369
+ "arguments": arguments,
370
+ "negated": pred.get("bool") == "!",
371
+ }
372
+ )
373
+ return semantics_within_single_frame
374
+
375
+ def _get_example_within_frame(self, vnframe):
376
+ """Returns example within a frame
377
+
378
+ A utility function to retrieve an example within a frame in VerbNet.
379
+
380
+ :param vnframe: An ElementTree containing the xml contents of
381
+ a VerbNet frame.
382
+ :return: example_text: The example sentence for this particular frame
383
+ """
384
+ example_element = vnframe.find("EXAMPLES/EXAMPLE")
385
+ if example_element is not None:
386
+ example_text = example_element.text
387
+ else:
388
+ example_text = ""
389
+ return example_text
390
+
391
+ def _get_description_within_frame(self, vnframe):
392
+ """Returns member description within frame
393
+
394
+ A utility function to retrieve a description of participating members
395
+ within a frame in VerbNet.
396
+
397
+ :param vnframe: An ElementTree containing the xml contents of
398
+ a VerbNet frame.
399
+ :return: description: a description dictionary with members - primary and secondary
400
+ """
401
+ description_element = vnframe.find("DESCRIPTION")
402
+ return {
403
+ "primary": description_element.attrib["primary"],
404
+ "secondary": description_element.get("secondary", ""),
405
+ }
406
+
407
+ def _get_syntactic_list_within_frame(self, vnframe):
408
+ """Returns semantics within a frame
409
+
410
+ A utility function to retrieve semantics within a frame in VerbNet.
411
+ Members of the syntactic dictionary:
412
+ 1) POS Tag
413
+ 2) Modifiers
414
+
415
+ :param vnframe: An ElementTree containing the xml contents of
416
+ a VerbNet frame.
417
+ :return: syntax_within_single_frame
418
+ """
419
+ syntax_within_single_frame = []
420
+ for elt in vnframe.find("SYNTAX"):
421
+ pos_tag = elt.tag
422
+ modifiers = dict()
423
+ modifiers["value"] = elt.get("value") if "value" in elt.attrib else ""
424
+ modifiers["selrestrs"] = [
425
+ {"value": restr.get("Value"), "type": restr.get("type")}
426
+ for restr in elt.findall("SELRESTRS/SELRESTR")
427
+ ]
428
+ modifiers["synrestrs"] = [
429
+ {"value": restr.get("Value"), "type": restr.get("type")}
430
+ for restr in elt.findall("SYNRESTRS/SYNRESTR")
431
+ ]
432
+ syntax_within_single_frame.append(
433
+ {"pos_tag": pos_tag, "modifiers": modifiers}
434
+ )
435
+ return syntax_within_single_frame
436
+
437
+ ######################################################################
438
+ # { Pretty Printing
439
+ ######################################################################
440
+
441
+ def pprint(self, vnclass):
442
+ """Returns pretty printed version of a VerbNet class
443
+
444
+ Return a string containing a pretty-printed representation of
445
+ the given VerbNet class.
446
+
447
+ :param vnclass: A VerbNet class identifier; or an ElementTree
448
+ containing the xml contents of a VerbNet class.
449
+ """
450
+ if isinstance(vnclass, str):
451
+ vnclass = self.vnclass(vnclass)
452
+
453
+ s = vnclass.get("ID") + "\n"
454
+ s += self.pprint_subclasses(vnclass, indent=" ") + "\n"
455
+ s += self.pprint_members(vnclass, indent=" ") + "\n"
456
+ s += " Thematic roles:\n"
457
+ s += self.pprint_themroles(vnclass, indent=" ") + "\n"
458
+ s += " Frames:\n"
459
+ s += self.pprint_frames(vnclass, indent=" ")
460
+ return s
461
+
462
+ def pprint_subclasses(self, vnclass, indent=""):
463
+ """Returns pretty printed version of subclasses of VerbNet class
464
+
465
+ Return a string containing a pretty-printed representation of
466
+ the given VerbNet class's subclasses.
467
+
468
+ :param vnclass: A VerbNet class identifier; or an ElementTree
469
+ containing the xml contents of a VerbNet class.
470
+ """
471
+ if isinstance(vnclass, str):
472
+ vnclass = self.vnclass(vnclass)
473
+
474
+ subclasses = self.subclasses(vnclass)
475
+ if not subclasses:
476
+ subclasses = ["(none)"]
477
+ s = "Subclasses: " + " ".join(subclasses)
478
+ return textwrap.fill(
479
+ s, 70, initial_indent=indent, subsequent_indent=indent + " "
480
+ )
481
+
482
+ def pprint_members(self, vnclass, indent=""):
483
+ """Returns pretty printed version of members in a VerbNet class
484
+
485
+ Return a string containing a pretty-printed representation of
486
+ the given VerbNet class's member verbs.
487
+
488
+ :param vnclass: A VerbNet class identifier; or an ElementTree
489
+ containing the xml contents of a VerbNet class.
490
+ """
491
+ if isinstance(vnclass, str):
492
+ vnclass = self.vnclass(vnclass)
493
+
494
+ members = self.lemmas(vnclass)
495
+ if not members:
496
+ members = ["(none)"]
497
+ s = "Members: " + " ".join(members)
498
+ return textwrap.fill(
499
+ s, 70, initial_indent=indent, subsequent_indent=indent + " "
500
+ )
501
+
502
+ def pprint_themroles(self, vnclass, indent=""):
503
+ """Returns pretty printed version of thematic roles in a VerbNet class
504
+
505
+ Return a string containing a pretty-printed representation of
506
+ the given VerbNet class's thematic roles.
507
+
508
+ :param vnclass: A VerbNet class identifier; or an ElementTree
509
+ containing the xml contents of a VerbNet class.
510
+ """
511
+ if isinstance(vnclass, str):
512
+ vnclass = self.vnclass(vnclass)
513
+
514
+ pieces = []
515
+ for themrole in self.themroles(vnclass):
516
+ piece = indent + "* " + themrole.get("type")
517
+ modifiers = [
518
+ modifier["value"] + modifier["type"]
519
+ for modifier in themrole["modifiers"]
520
+ ]
521
+ if modifiers:
522
+ piece += "[{}]".format(" ".join(modifiers))
523
+ pieces.append(piece)
524
+ return "\n".join(pieces)
525
+
526
+ def pprint_frames(self, vnclass, indent=""):
527
+ """Returns pretty version of all frames in a VerbNet class
528
+
529
+ Return a string containing a pretty-printed representation of
530
+ the list of frames within the VerbNet class.
531
+
532
+ :param vnclass: A VerbNet class identifier; or an ElementTree
533
+ containing the xml contents of a VerbNet class.
534
+ """
535
+ if isinstance(vnclass, str):
536
+ vnclass = self.vnclass(vnclass)
537
+ pieces = []
538
+ for vnframe in self.frames(vnclass):
539
+ pieces.append(self._pprint_single_frame(vnframe, indent))
540
+ return "\n".join(pieces)
541
+
542
+ def _pprint_single_frame(self, vnframe, indent=""):
543
+ """Returns pretty printed version of a single frame in a VerbNet class
544
+
545
+ Returns a string containing a pretty-printed representation of
546
+ the given frame.
547
+
548
+ :param vnframe: An ElementTree containing the xml contents of
549
+ a VerbNet frame.
550
+ """
551
+ frame_string = self._pprint_description_within_frame(vnframe, indent) + "\n"
552
+ frame_string += self._pprint_example_within_frame(vnframe, indent + " ") + "\n"
553
+ frame_string += (
554
+ self._pprint_syntax_within_frame(vnframe, indent + " Syntax: ") + "\n"
555
+ )
556
+ frame_string += indent + " Semantics:\n"
557
+ frame_string += self._pprint_semantics_within_frame(vnframe, indent + " ")
558
+ return frame_string
559
+
560
+ def _pprint_example_within_frame(self, vnframe, indent=""):
561
+ """Returns pretty printed version of example within frame in a VerbNet class
562
+
563
+ Return a string containing a pretty-printed representation of
564
+ the given VerbNet frame example.
565
+
566
+ :param vnframe: An ElementTree containing the xml contents of
567
+ a Verbnet frame.
568
+ """
569
+ if vnframe["example"]:
570
+ return indent + " Example: " + vnframe["example"]
571
+
572
+ def _pprint_description_within_frame(self, vnframe, indent=""):
573
+ """Returns pretty printed version of a VerbNet frame description
574
+
575
+ Return a string containing a pretty-printed representation of
576
+ the given VerbNet frame description.
577
+
578
+ :param vnframe: An ElementTree containing the xml contents of
579
+ a VerbNet frame.
580
+ """
581
+ description = indent + vnframe["description"]["primary"]
582
+ if vnframe["description"]["secondary"]:
583
+ description += " ({})".format(vnframe["description"]["secondary"])
584
+ return description
585
+
586
+ def _pprint_syntax_within_frame(self, vnframe, indent=""):
587
+ """Returns pretty printed version of syntax within a frame in a VerbNet class
588
+
589
+ Return a string containing a pretty-printed representation of
590
+ the given VerbNet frame syntax.
591
+
592
+ :param vnframe: An ElementTree containing the xml contents of
593
+ a VerbNet frame.
594
+ """
595
+ pieces = []
596
+ for element in vnframe["syntax"]:
597
+ piece = element["pos_tag"]
598
+ modifier_list = []
599
+ if "value" in element["modifiers"] and element["modifiers"]["value"]:
600
+ modifier_list.append(element["modifiers"]["value"])
601
+ modifier_list += [
602
+ "{}{}".format(restr["value"], restr["type"])
603
+ for restr in (
604
+ element["modifiers"]["selrestrs"]
605
+ + element["modifiers"]["synrestrs"]
606
+ )
607
+ ]
608
+ if modifier_list:
609
+ piece += "[{}]".format(" ".join(modifier_list))
610
+ pieces.append(piece)
611
+
612
+ return indent + " ".join(pieces)
613
+
614
+ def _pprint_semantics_within_frame(self, vnframe, indent=""):
615
+ """Returns a pretty printed version of semantics within frame in a VerbNet class
616
+
617
+ Return a string containing a pretty-printed representation of
618
+ the given VerbNet frame semantics.
619
+
620
+ :param vnframe: An ElementTree containing the xml contents of
621
+ a VerbNet frame.
622
+ """
623
+ pieces = []
624
+ for predicate in vnframe["semantics"]:
625
+ arguments = [argument["value"] for argument in predicate["arguments"]]
626
+ pieces.append(
627
+ f"{'¬' if predicate['negated'] else ''}{predicate['predicate_value']}({', '.join(arguments)})"
628
+ )
629
+ return "\n".join(f"{indent}* {piece}" for piece in pieces)
llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/wordnet.py ADDED
@@ -0,0 +1,2489 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: WordNet
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bethard <[email protected]>
5
+ # Steven Bird <[email protected]>
6
+ # Edward Loper <[email protected]>
7
+ # Nitin Madnani <[email protected]>
8
+ # Nasruddin A’aidil Shari
9
+ # Sim Wei Ying Geraldine
10
+ # Soe Lynn
11
+ # Francis Bond <[email protected]>
12
+ # Eric Kafe <[email protected]>
13
+
14
+ # URL: <https://www.nltk.org/>
15
+ # For license information, see LICENSE.TXT
16
+
17
+ """
18
+ An NLTK interface for WordNet
19
+
20
+ WordNet is a lexical database of English.
21
+ Using synsets, helps find conceptual relationships between words
22
+ such as hypernyms, hyponyms, synonyms, antonyms etc.
23
+
24
+ For details about WordNet see:
25
+ https://wordnet.princeton.edu/
26
+
27
+ This module also allows you to find lemmas in languages
28
+ other than English from the Open Multilingual Wordnet
29
+ https://omwn.org/
30
+
31
+ """
32
+
33
+ import math
34
+ import os
35
+ import re
36
+ import warnings
37
+ from collections import defaultdict, deque
38
+ from functools import total_ordering
39
+ from itertools import chain, islice
40
+ from operator import itemgetter
41
+
42
+ from nltk.corpus.reader import CorpusReader
43
+ from nltk.internals import deprecated
44
+ from nltk.probability import FreqDist
45
+ from nltk.util import binary_search_file as _binary_search_file
46
+
47
+ ######################################################################
48
+ # Table of Contents
49
+ ######################################################################
50
+ # - Constants
51
+ # - Data Classes
52
+ # - WordNetError
53
+ # - Lemma
54
+ # - Synset
55
+ # - WordNet Corpus Reader
56
+ # - WordNet Information Content Corpus Reader
57
+ # - Similarity Metrics
58
+ # - Demo
59
+
60
+ ######################################################################
61
+ # Constants
62
+ ######################################################################
63
+
64
+ #: Positive infinity (for similarity functions)
65
+ _INF = 1e300
66
+
67
+ # { Part-of-speech constants
68
+ ADJ, ADJ_SAT, ADV, NOUN, VERB = "a", "s", "r", "n", "v"
69
+ # }
70
+
71
+ POS_LIST = [NOUN, VERB, ADJ, ADV]
72
+
73
+ # A table of strings that are used to express verb frames.
74
+ VERB_FRAME_STRINGS = (
75
+ None,
76
+ "Something %s",
77
+ "Somebody %s",
78
+ "It is %sing",
79
+ "Something is %sing PP",
80
+ "Something %s something Adjective/Noun",
81
+ "Something %s Adjective/Noun",
82
+ "Somebody %s Adjective",
83
+ "Somebody %s something",
84
+ "Somebody %s somebody",
85
+ "Something %s somebody",
86
+ "Something %s something",
87
+ "Something %s to somebody",
88
+ "Somebody %s on something",
89
+ "Somebody %s somebody something",
90
+ "Somebody %s something to somebody",
91
+ "Somebody %s something from somebody",
92
+ "Somebody %s somebody with something",
93
+ "Somebody %s somebody of something",
94
+ "Somebody %s something on somebody",
95
+ "Somebody %s somebody PP",
96
+ "Somebody %s something PP",
97
+ "Somebody %s PP",
98
+ "Somebody's (body part) %s",
99
+ "Somebody %s somebody to INFINITIVE",
100
+ "Somebody %s somebody INFINITIVE",
101
+ "Somebody %s that CLAUSE",
102
+ "Somebody %s to somebody",
103
+ "Somebody %s to INFINITIVE",
104
+ "Somebody %s whether INFINITIVE",
105
+ "Somebody %s somebody into V-ing something",
106
+ "Somebody %s something with something",
107
+ "Somebody %s INFINITIVE",
108
+ "Somebody %s VERB-ing",
109
+ "It %s that CLAUSE",
110
+ "Something %s INFINITIVE",
111
+ # OEWN additions:
112
+ "Somebody %s at something",
113
+ "Somebody %s for something",
114
+ "Somebody %s on somebody",
115
+ "Somebody %s out of somebody",
116
+ )
117
+
118
+ SENSENUM_RE = re.compile(r"\.[\d]+\.")
119
+
120
+
121
+ ######################################################################
122
+ # Data Classes
123
+ ######################################################################
124
+
125
+
126
+ class WordNetError(Exception):
127
+ """An exception class for wordnet-related errors."""
128
+
129
+
130
+ @total_ordering
131
+ class _WordNetObject:
132
+ """A common base class for lemmas and synsets."""
133
+
134
+ def hypernyms(self):
135
+ return self._related("@")
136
+
137
+ def _hypernyms(self):
138
+ return self._related("@")
139
+
140
+ def instance_hypernyms(self):
141
+ return self._related("@i")
142
+
143
+ def _instance_hypernyms(self):
144
+ return self._related("@i")
145
+
146
+ def hyponyms(self):
147
+ return self._related("~")
148
+
149
+ def instance_hyponyms(self):
150
+ return self._related("~i")
151
+
152
+ def member_holonyms(self):
153
+ return self._related("#m")
154
+
155
+ def substance_holonyms(self):
156
+ return self._related("#s")
157
+
158
+ def part_holonyms(self):
159
+ return self._related("#p")
160
+
161
+ def member_meronyms(self):
162
+ return self._related("%m")
163
+
164
+ def substance_meronyms(self):
165
+ return self._related("%s")
166
+
167
+ def part_meronyms(self):
168
+ return self._related("%p")
169
+
170
+ def topic_domains(self):
171
+ return self._related(";c")
172
+
173
+ def in_topic_domains(self):
174
+ return self._related("-c")
175
+
176
+ def region_domains(self):
177
+ return self._related(";r")
178
+
179
+ def in_region_domains(self):
180
+ return self._related("-r")
181
+
182
+ def usage_domains(self):
183
+ return self._related(";u")
184
+
185
+ def in_usage_domains(self):
186
+ return self._related("-u")
187
+
188
+ def attributes(self):
189
+ return self._related("=")
190
+
191
+ def entailments(self):
192
+ return self._related("*")
193
+
194
+ def causes(self):
195
+ return self._related(">")
196
+
197
+ def also_sees(self):
198
+ return self._related("^")
199
+
200
+ def verb_groups(self):
201
+ return self._related("$")
202
+
203
+ def similar_tos(self):
204
+ return self._related("&")
205
+
206
+ def __hash__(self):
207
+ return hash(self._name)
208
+
209
+ def __eq__(self, other):
210
+ return self._name == other._name
211
+
212
+ def __ne__(self, other):
213
+ return self._name != other._name
214
+
215
+ def __lt__(self, other):
216
+ return self._name < other._name
217
+
218
+
219
+ class Lemma(_WordNetObject):
220
+ """
221
+ The lexical entry for a single morphological form of a
222
+ sense-disambiguated word.
223
+
224
+ Create a Lemma from a "<word>.<pos>.<number>.<lemma>" string where:
225
+ <word> is the morphological stem identifying the synset
226
+ <pos> is one of the module attributes ADJ, ADJ_SAT, ADV, NOUN or VERB
227
+ <number> is the sense number, counting from 0.
228
+ <lemma> is the morphological form of interest
229
+
230
+ Note that <word> and <lemma> can be different, e.g. the Synset
231
+ 'salt.n.03' has the Lemmas 'salt.n.03.salt', 'salt.n.03.saltiness' and
232
+ 'salt.n.03.salinity'.
233
+
234
+ Lemma attributes, accessible via methods with the same name:
235
+
236
+ - name: The canonical name of this lemma.
237
+ - synset: The synset that this lemma belongs to.
238
+ - syntactic_marker: For adjectives, the WordNet string identifying the
239
+ syntactic position relative modified noun. See:
240
+ https://wordnet.princeton.edu/documentation/wninput5wn
241
+ For all other parts of speech, this attribute is None.
242
+ - count: The frequency of this lemma in wordnet.
243
+
244
+ Lemma methods:
245
+
246
+ Lemmas have the following methods for retrieving related Lemmas. They
247
+ correspond to the names for the pointer symbols defined here:
248
+ https://wordnet.princeton.edu/documentation/wninput5wn
249
+ These methods all return lists of Lemmas:
250
+
251
+ - antonyms
252
+ - hypernyms, instance_hypernyms
253
+ - hyponyms, instance_hyponyms
254
+ - member_holonyms, substance_holonyms, part_holonyms
255
+ - member_meronyms, substance_meronyms, part_meronyms
256
+ - topic_domains, region_domains, usage_domains
257
+ - attributes
258
+ - derivationally_related_forms
259
+ - entailments
260
+ - causes
261
+ - also_sees
262
+ - verb_groups
263
+ - similar_tos
264
+ - pertainyms
265
+ """
266
+
267
+ __slots__ = [
268
+ "_wordnet_corpus_reader",
269
+ "_name",
270
+ "_syntactic_marker",
271
+ "_synset",
272
+ "_frame_strings",
273
+ "_frame_ids",
274
+ "_lexname_index",
275
+ "_lex_id",
276
+ "_lang",
277
+ "_key",
278
+ ]
279
+
280
+ def __init__(
281
+ self,
282
+ wordnet_corpus_reader,
283
+ synset,
284
+ name,
285
+ lexname_index,
286
+ lex_id,
287
+ syntactic_marker,
288
+ ):
289
+ self._wordnet_corpus_reader = wordnet_corpus_reader
290
+ self._name = name
291
+ self._syntactic_marker = syntactic_marker
292
+ self._synset = synset
293
+ self._frame_strings = []
294
+ self._frame_ids = []
295
+ self._lexname_index = lexname_index
296
+ self._lex_id = lex_id
297
+ self._lang = "eng"
298
+
299
+ self._key = None # gets set later.
300
+
301
+ def name(self):
302
+ return self._name
303
+
304
+ def syntactic_marker(self):
305
+ return self._syntactic_marker
306
+
307
+ def synset(self):
308
+ return self._synset
309
+
310
+ def frame_strings(self):
311
+ return self._frame_strings
312
+
313
+ def frame_ids(self):
314
+ return self._frame_ids
315
+
316
+ def lang(self):
317
+ return self._lang
318
+
319
+ def key(self):
320
+ return self._key
321
+
322
+ def __repr__(self):
323
+ tup = type(self).__name__, self._synset._name, self._name
324
+ return "%s('%s.%s')" % tup
325
+
326
+ def _related(self, relation_symbol):
327
+ get_synset = self._wordnet_corpus_reader.synset_from_pos_and_offset
328
+ if (self._name, relation_symbol) not in self._synset._lemma_pointers:
329
+ return []
330
+ return [
331
+ get_synset(pos, offset)._lemmas[lemma_index]
332
+ for pos, offset, lemma_index in self._synset._lemma_pointers[
333
+ self._name, relation_symbol
334
+ ]
335
+ ]
336
+
337
+ def count(self):
338
+ """Return the frequency count for this Lemma"""
339
+ return self._wordnet_corpus_reader.lemma_count(self)
340
+
341
+ def antonyms(self):
342
+ return self._related("!")
343
+
344
+ def derivationally_related_forms(self):
345
+ return self._related("+")
346
+
347
+ def pertainyms(self):
348
+ return self._related("\\")
349
+
350
+
351
+ class Synset(_WordNetObject):
352
+ """Create a Synset from a "<lemma>.<pos>.<number>" string where:
353
+ <lemma> is the word's morphological stem
354
+ <pos> is one of the module attributes ADJ, ADJ_SAT, ADV, NOUN or VERB
355
+ <number> is the sense number, counting from 0.
356
+
357
+ Synset attributes, accessible via methods with the same name:
358
+
359
+ - name: The canonical name of this synset, formed using the first lemma
360
+ of this synset. Note that this may be different from the name
361
+ passed to the constructor if that string used a different lemma to
362
+ identify the synset.
363
+ - pos: The synset's part of speech, matching one of the module level
364
+ attributes ADJ, ADJ_SAT, ADV, NOUN or VERB.
365
+ - lemmas: A list of the Lemma objects for this synset.
366
+ - definition: The definition for this synset.
367
+ - examples: A list of example strings for this synset.
368
+ - offset: The offset in the WordNet dict file of this synset.
369
+ - lexname: The name of the lexicographer file containing this synset.
370
+
371
+ Synset methods:
372
+
373
+ Synsets have the following methods for retrieving related Synsets.
374
+ They correspond to the names for the pointer symbols defined here:
375
+ https://wordnet.princeton.edu/documentation/wninput5wn
376
+ These methods all return lists of Synsets.
377
+
378
+ - hypernyms, instance_hypernyms
379
+ - hyponyms, instance_hyponyms
380
+ - member_holonyms, substance_holonyms, part_holonyms
381
+ - member_meronyms, substance_meronyms, part_meronyms
382
+ - attributes
383
+ - entailments
384
+ - causes
385
+ - also_sees
386
+ - verb_groups
387
+ - similar_tos
388
+
389
+ Additionally, Synsets support the following methods specific to the
390
+ hypernym relation:
391
+
392
+ - root_hypernyms
393
+ - common_hypernyms
394
+ - lowest_common_hypernyms
395
+
396
+ Note that Synsets do not support the following relations because
397
+ these are defined by WordNet as lexical relations:
398
+
399
+ - antonyms
400
+ - derivationally_related_forms
401
+ - pertainyms
402
+ """
403
+
404
+ __slots__ = [
405
+ "_pos",
406
+ "_offset",
407
+ "_name",
408
+ "_frame_ids",
409
+ "_lemmas",
410
+ "_lemma_names",
411
+ "_definition",
412
+ "_examples",
413
+ "_lexname",
414
+ "_pointers",
415
+ "_lemma_pointers",
416
+ "_max_depth",
417
+ "_min_depth",
418
+ ]
419
+
420
+ def __init__(self, wordnet_corpus_reader):
421
+ self._wordnet_corpus_reader = wordnet_corpus_reader
422
+ # All of these attributes get initialized by
423
+ # WordNetCorpusReader._synset_from_pos_and_line()
424
+
425
+ self._pos = None
426
+ self._offset = None
427
+ self._name = None
428
+ self._frame_ids = []
429
+ self._lemmas = []
430
+ self._lemma_names = []
431
+ self._definition = None
432
+ self._examples = []
433
+ self._lexname = None # lexicographer name
434
+ self._all_hypernyms = None
435
+
436
+ self._pointers = defaultdict(set)
437
+ self._lemma_pointers = defaultdict(list)
438
+
439
+ def pos(self):
440
+ return self._pos
441
+
442
+ def offset(self):
443
+ return self._offset
444
+
445
+ def name(self):
446
+ return self._name
447
+
448
+ def frame_ids(self):
449
+ return self._frame_ids
450
+
451
+ def _doc(self, doc_type, default, lang="eng"):
452
+ """Helper method for Synset.definition and Synset.examples"""
453
+ corpus = self._wordnet_corpus_reader
454
+ if lang not in corpus.langs():
455
+ return None
456
+ elif lang == "eng":
457
+ return default
458
+ else:
459
+ corpus._load_lang_data(lang)
460
+ of = corpus.ss2of(self)
461
+ i = corpus.lg_attrs.index(doc_type)
462
+ if of in corpus._lang_data[lang][i]:
463
+ return corpus._lang_data[lang][i][of]
464
+ else:
465
+ return None
466
+
467
+ def definition(self, lang="eng"):
468
+ """Return definition in specified language"""
469
+ return self._doc("def", self._definition, lang=lang)
470
+
471
+ def examples(self, lang="eng"):
472
+ """Return examples in specified language"""
473
+ return self._doc("exe", self._examples, lang=lang)
474
+
475
+ def lexname(self):
476
+ return self._lexname
477
+
478
+ def _needs_root(self):
479
+ if self._pos == NOUN and self._wordnet_corpus_reader.get_version() != "1.6":
480
+ return False
481
+ else:
482
+ return True
483
+
484
+ def lemma_names(self, lang="eng"):
485
+ """Return all the lemma_names associated with the synset"""
486
+ if lang == "eng":
487
+ return self._lemma_names
488
+ else:
489
+ reader = self._wordnet_corpus_reader
490
+ reader._load_lang_data(lang)
491
+ i = reader.ss2of(self)
492
+ if i in reader._lang_data[lang][0]:
493
+ return reader._lang_data[lang][0][i]
494
+ else:
495
+ return []
496
+
497
+ def lemmas(self, lang="eng"):
498
+ """Return all the lemma objects associated with the synset"""
499
+ if lang == "eng":
500
+ return self._lemmas
501
+ elif self._name:
502
+ self._wordnet_corpus_reader._load_lang_data(lang)
503
+ lemmark = []
504
+ lemmy = self.lemma_names(lang)
505
+ for lem in lemmy:
506
+ temp = Lemma(
507
+ self._wordnet_corpus_reader,
508
+ self,
509
+ lem,
510
+ self._wordnet_corpus_reader._lexnames.index(self.lexname()),
511
+ 0,
512
+ None,
513
+ )
514
+ temp._lang = lang
515
+ lemmark.append(temp)
516
+ return lemmark
517
+
518
+ def root_hypernyms(self):
519
+ """Get the topmost hypernyms of this synset in WordNet."""
520
+
521
+ result = []
522
+ seen = set()
523
+ todo = [self]
524
+ while todo:
525
+ next_synset = todo.pop()
526
+ if next_synset not in seen:
527
+ seen.add(next_synset)
528
+ next_hypernyms = (
529
+ next_synset.hypernyms() + next_synset.instance_hypernyms()
530
+ )
531
+ if not next_hypernyms:
532
+ result.append(next_synset)
533
+ else:
534
+ todo.extend(next_hypernyms)
535
+ return result
536
+
537
+ # Simpler implementation which makes incorrect assumption that
538
+ # hypernym hierarchy is acyclic:
539
+ #
540
+ # if not self.hypernyms():
541
+ # return [self]
542
+ # else:
543
+ # return list(set(root for h in self.hypernyms()
544
+ # for root in h.root_hypernyms()))
545
+ def max_depth(self):
546
+ """
547
+ :return: The length of the longest hypernym path from this
548
+ synset to the root.
549
+ """
550
+
551
+ if "_max_depth" not in self.__dict__:
552
+ hypernyms = self.hypernyms() + self.instance_hypernyms()
553
+ if not hypernyms:
554
+ self._max_depth = 0
555
+ else:
556
+ self._max_depth = 1 + max(h.max_depth() for h in hypernyms)
557
+ return self._max_depth
558
+
559
+ def min_depth(self):
560
+ """
561
+ :return: The length of the shortest hypernym path from this
562
+ synset to the root.
563
+ """
564
+
565
+ if "_min_depth" not in self.__dict__:
566
+ hypernyms = self.hypernyms() + self.instance_hypernyms()
567
+ if not hypernyms:
568
+ self._min_depth = 0
569
+ else:
570
+ self._min_depth = 1 + min(h.min_depth() for h in hypernyms)
571
+ return self._min_depth
572
+
573
+ def closure(self, rel, depth=-1):
574
+ """
575
+ Return the transitive closure of source under the rel
576
+ relationship, breadth-first, discarding cycles:
577
+
578
+ >>> from nltk.corpus import wordnet as wn
579
+ >>> computer = wn.synset('computer.n.01')
580
+ >>> topic = lambda s:s.topic_domains()
581
+ >>> print(list(computer.closure(topic)))
582
+ [Synset('computer_science.n.01')]
583
+
584
+ UserWarning: Discarded redundant search for Synset('computer.n.01') at depth 2
585
+
586
+
587
+ Include redundant paths (but only once), avoiding duplicate searches
588
+ (from 'animal.n.01' to 'entity.n.01'):
589
+
590
+ >>> dog = wn.synset('dog.n.01')
591
+ >>> hyp = lambda s:s.hypernyms()
592
+ >>> print(list(dog.closure(hyp)))
593
+ [Synset('canine.n.02'), Synset('domestic_animal.n.01'), Synset('carnivore.n.01'),\
594
+ Synset('animal.n.01'), Synset('placental.n.01'), Synset('organism.n.01'),\
595
+ Synset('mammal.n.01'), Synset('living_thing.n.01'), Synset('vertebrate.n.01'),\
596
+ Synset('whole.n.02'), Synset('chordate.n.01'), Synset('object.n.01'),\
597
+ Synset('physical_entity.n.01'), Synset('entity.n.01')]
598
+
599
+ UserWarning: Discarded redundant search for Synset('animal.n.01') at depth 7
600
+ """
601
+
602
+ from nltk.util import acyclic_breadth_first
603
+
604
+ for synset in acyclic_breadth_first(self, rel, depth):
605
+ if synset != self:
606
+ yield synset
607
+
608
+ from nltk.util import acyclic_depth_first as acyclic_tree
609
+ from nltk.util import unweighted_minimum_spanning_tree as mst
610
+
611
+ # Also add this shortcut?
612
+ # from nltk.util import unweighted_minimum_spanning_digraph as umsd
613
+
614
+ def tree(self, rel, depth=-1, cut_mark=None):
615
+ """
616
+ Return the full relation tree, including self,
617
+ discarding cycles:
618
+
619
+ >>> from nltk.corpus import wordnet as wn
620
+ >>> from pprint import pprint
621
+ >>> computer = wn.synset('computer.n.01')
622
+ >>> topic = lambda s:s.topic_domains()
623
+ >>> pprint(computer.tree(topic))
624
+ [Synset('computer.n.01'), [Synset('computer_science.n.01')]]
625
+
626
+ UserWarning: Discarded redundant search for Synset('computer.n.01') at depth -3
627
+
628
+
629
+ But keep duplicate branches (from 'animal.n.01' to 'entity.n.01'):
630
+
631
+ >>> dog = wn.synset('dog.n.01')
632
+ >>> hyp = lambda s:s.hypernyms()
633
+ >>> pprint(dog.tree(hyp))
634
+ [Synset('dog.n.01'),
635
+ [Synset('canine.n.02'),
636
+ [Synset('carnivore.n.01'),
637
+ [Synset('placental.n.01'),
638
+ [Synset('mammal.n.01'),
639
+ [Synset('vertebrate.n.01'),
640
+ [Synset('chordate.n.01'),
641
+ [Synset('animal.n.01'),
642
+ [Synset('organism.n.01'),
643
+ [Synset('living_thing.n.01'),
644
+ [Synset('whole.n.02'),
645
+ [Synset('object.n.01'),
646
+ [Synset('physical_entity.n.01'),
647
+ [Synset('entity.n.01')]]]]]]]]]]]]],
648
+ [Synset('domestic_animal.n.01'),
649
+ [Synset('animal.n.01'),
650
+ [Synset('organism.n.01'),
651
+ [Synset('living_thing.n.01'),
652
+ [Synset('whole.n.02'),
653
+ [Synset('object.n.01'),
654
+ [Synset('physical_entity.n.01'), [Synset('entity.n.01')]]]]]]]]]
655
+ """
656
+
657
+ from nltk.util import acyclic_branches_depth_first
658
+
659
+ return acyclic_branches_depth_first(self, rel, depth, cut_mark)
660
+
661
+ def hypernym_paths(self):
662
+ """
663
+ Get the path(s) from this synset to the root, where each path is a
664
+ list of the synset nodes traversed on the way to the root.
665
+
666
+ :return: A list of lists, where each list gives the node sequence
667
+ connecting the initial ``Synset`` node and a root node.
668
+ """
669
+ paths = []
670
+
671
+ hypernyms = self.hypernyms() + self.instance_hypernyms()
672
+ if len(hypernyms) == 0:
673
+ paths = [[self]]
674
+
675
+ for hypernym in hypernyms:
676
+ for ancestor_list in hypernym.hypernym_paths():
677
+ ancestor_list.append(self)
678
+ paths.append(ancestor_list)
679
+ return paths
680
+
681
+ def common_hypernyms(self, other):
682
+ """
683
+ Find all synsets that are hypernyms of this synset and the
684
+ other synset.
685
+
686
+ :type other: Synset
687
+ :param other: other input synset.
688
+ :return: The synsets that are hypernyms of both synsets.
689
+ """
690
+ if not self._all_hypernyms:
691
+ self._all_hypernyms = {
692
+ self_synset
693
+ for self_synsets in self._iter_hypernym_lists()
694
+ for self_synset in self_synsets
695
+ }
696
+ if not other._all_hypernyms:
697
+ other._all_hypernyms = {
698
+ other_synset
699
+ for other_synsets in other._iter_hypernym_lists()
700
+ for other_synset in other_synsets
701
+ }
702
+ return list(self._all_hypernyms.intersection(other._all_hypernyms))
703
+
704
+ def lowest_common_hypernyms(self, other, simulate_root=False, use_min_depth=False):
705
+ """
706
+ Get a list of lowest synset(s) that both synsets have as a hypernym.
707
+ When `use_min_depth == False` this means that the synset which appears
708
+ as a hypernym of both `self` and `other` with the lowest maximum depth
709
+ is returned or if there are multiple such synsets at the same depth
710
+ they are all returned
711
+
712
+ However, if `use_min_depth == True` then the synset(s) which has/have
713
+ the lowest minimum depth and appear(s) in both paths is/are returned.
714
+
715
+ By setting the use_min_depth flag to True, the behavior of NLTK2 can be
716
+ preserved. This was changed in NLTK3 to give more accurate results in a
717
+ small set of cases, generally with synsets concerning people. (eg:
718
+ 'chef.n.01', 'fireman.n.01', etc.)
719
+
720
+ This method is an implementation of Ted Pedersen's "Lowest Common
721
+ Subsumer" method from the Perl Wordnet module. It can return either
722
+ "self" or "other" if they are a hypernym of the other.
723
+
724
+ :type other: Synset
725
+ :param other: other input synset
726
+ :type simulate_root: bool
727
+ :param simulate_root: The various verb taxonomies do not
728
+ share a single root which disallows this metric from working for
729
+ synsets that are not connected. This flag (False by default)
730
+ creates a fake root that connects all the taxonomies. Set it
731
+ to True to enable this behavior. For the noun taxonomy,
732
+ there is usually a default root except for WordNet version 1.6.
733
+ If you are using wordnet 1.6, a fake root will need to be added
734
+ for nouns as well.
735
+ :type use_min_depth: bool
736
+ :param use_min_depth: This setting mimics older (v2) behavior of NLTK
737
+ wordnet If True, will use the min_depth function to calculate the
738
+ lowest common hypernyms. This is known to give strange results for
739
+ some synset pairs (eg: 'chef.n.01', 'fireman.n.01') but is retained
740
+ for backwards compatibility
741
+ :return: The synsets that are the lowest common hypernyms of both
742
+ synsets
743
+ """
744
+ synsets = self.common_hypernyms(other)
745
+ if simulate_root:
746
+ fake_synset = Synset(None)
747
+ fake_synset._name = "*ROOT*"
748
+ fake_synset.hypernyms = lambda: []
749
+ fake_synset.instance_hypernyms = lambda: []
750
+ synsets.append(fake_synset)
751
+
752
+ try:
753
+ if use_min_depth:
754
+ max_depth = max(s.min_depth() for s in synsets)
755
+ unsorted_lch = [s for s in synsets if s.min_depth() == max_depth]
756
+ else:
757
+ max_depth = max(s.max_depth() for s in synsets)
758
+ unsorted_lch = [s for s in synsets if s.max_depth() == max_depth]
759
+ return sorted(unsorted_lch)
760
+ except ValueError:
761
+ return []
762
+
763
+ def hypernym_distances(self, distance=0, simulate_root=False):
764
+ """
765
+ Get the path(s) from this synset to the root, counting the distance
766
+ of each node from the initial node on the way. A set of
767
+ (synset, distance) tuples is returned.
768
+
769
+ :type distance: int
770
+ :param distance: the distance (number of edges) from this hypernym to
771
+ the original hypernym ``Synset`` on which this method was called.
772
+ :return: A set of ``(Synset, int)`` tuples where each ``Synset`` is
773
+ a hypernym of the first ``Synset``.
774
+ """
775
+ distances = {(self, distance)}
776
+ for hypernym in self._hypernyms() + self._instance_hypernyms():
777
+ distances |= hypernym.hypernym_distances(distance + 1, simulate_root=False)
778
+ if simulate_root:
779
+ fake_synset = Synset(None)
780
+ fake_synset._name = "*ROOT*"
781
+ fake_synset_distance = max(distances, key=itemgetter(1))[1]
782
+ distances.add((fake_synset, fake_synset_distance + 1))
783
+ return distances
784
+
785
+ def _shortest_hypernym_paths(self, simulate_root):
786
+ if self._name == "*ROOT*":
787
+ return {self: 0}
788
+
789
+ queue = deque([(self, 0)])
790
+ path = {}
791
+
792
+ while queue:
793
+ s, depth = queue.popleft()
794
+ if s in path:
795
+ continue
796
+ path[s] = depth
797
+
798
+ depth += 1
799
+ queue.extend((hyp, depth) for hyp in s._hypernyms())
800
+ queue.extend((hyp, depth) for hyp in s._instance_hypernyms())
801
+
802
+ if simulate_root:
803
+ fake_synset = Synset(None)
804
+ fake_synset._name = "*ROOT*"
805
+ path[fake_synset] = max(path.values()) + 1
806
+
807
+ return path
808
+
809
+ def shortest_path_distance(self, other, simulate_root=False):
810
+ """
811
+ Returns the distance of the shortest path linking the two synsets (if
812
+ one exists). For each synset, all the ancestor nodes and their
813
+ distances are recorded and compared. The ancestor node common to both
814
+ synsets that can be reached with the minimum number of traversals is
815
+ used. If no ancestor nodes are common, None is returned. If a node is
816
+ compared with itself 0 is returned.
817
+
818
+ :type other: Synset
819
+ :param other: The Synset to which the shortest path will be found.
820
+ :return: The number of edges in the shortest path connecting the two
821
+ nodes, or None if no path exists.
822
+ """
823
+
824
+ if self == other:
825
+ return 0
826
+
827
+ dist_dict1 = self._shortest_hypernym_paths(simulate_root)
828
+ dist_dict2 = other._shortest_hypernym_paths(simulate_root)
829
+
830
+ # For each ancestor synset common to both subject synsets, find the
831
+ # connecting path length. Return the shortest of these.
832
+
833
+ inf = float("inf")
834
+ path_distance = inf
835
+ for synset, d1 in dist_dict1.items():
836
+ d2 = dist_dict2.get(synset, inf)
837
+ path_distance = min(path_distance, d1 + d2)
838
+
839
+ return None if math.isinf(path_distance) else path_distance
840
+
841
+ # interface to similarity methods
842
+ def path_similarity(self, other, verbose=False, simulate_root=True):
843
+ """
844
+ Path Distance Similarity:
845
+ Return a score denoting how similar two word senses are, based on the
846
+ shortest path that connects the senses in the is-a (hypernym/hypnoym)
847
+ taxonomy. The score is in the range 0 to 1, except in those cases where
848
+ a path cannot be found (will only be true for verbs as there are many
849
+ distinct verb taxonomies), in which case None is returned. A score of
850
+ 1 represents identity i.e. comparing a sense with itself will return 1.
851
+
852
+ :type other: Synset
853
+ :param other: The ``Synset`` that this ``Synset`` is being compared to.
854
+ :type simulate_root: bool
855
+ :param simulate_root: The various verb taxonomies do not
856
+ share a single root which disallows this metric from working for
857
+ synsets that are not connected. This flag (True by default)
858
+ creates a fake root that connects all the taxonomies. Set it
859
+ to false to disable this behavior. For the noun taxonomy,
860
+ there is usually a default root except for WordNet version 1.6.
861
+ If you are using wordnet 1.6, a fake root will be added for nouns
862
+ as well.
863
+ :return: A score denoting the similarity of the two ``Synset`` objects,
864
+ normally between 0 and 1. None is returned if no connecting path
865
+ could be found. 1 is returned if a ``Synset`` is compared with
866
+ itself.
867
+ """
868
+
869
+ distance = self.shortest_path_distance(
870
+ other,
871
+ simulate_root=simulate_root and (self._needs_root() or other._needs_root()),
872
+ )
873
+ if distance is None or distance < 0:
874
+ return None
875
+ return 1.0 / (distance + 1)
876
+
877
+ def lch_similarity(self, other, verbose=False, simulate_root=True):
878
+ """
879
+ Leacock Chodorow Similarity:
880
+ Return a score denoting how similar two word senses are, based on the
881
+ shortest path that connects the senses (as above) and the maximum depth
882
+ of the taxonomy in which the senses occur. The relationship is given as
883
+ -log(p/2d) where p is the shortest path length and d is the taxonomy
884
+ depth.
885
+
886
+ :type other: Synset
887
+ :param other: The ``Synset`` that this ``Synset`` is being compared to.
888
+ :type simulate_root: bool
889
+ :param simulate_root: The various verb taxonomies do not
890
+ share a single root which disallows this metric from working for
891
+ synsets that are not connected. This flag (True by default)
892
+ creates a fake root that connects all the taxonomies. Set it
893
+ to false to disable this behavior. For the noun taxonomy,
894
+ there is usually a default root except for WordNet version 1.6.
895
+ If you are using wordnet 1.6, a fake root will be added for nouns
896
+ as well.
897
+ :return: A score denoting the similarity of the two ``Synset`` objects,
898
+ normally greater than 0. None is returned if no connecting path
899
+ could be found. If a ``Synset`` is compared with itself, the
900
+ maximum score is returned, which varies depending on the taxonomy
901
+ depth.
902
+ """
903
+
904
+ if self._pos != other._pos:
905
+ raise WordNetError(
906
+ "Computing the lch similarity requires "
907
+ "%s and %s to have the same part of speech." % (self, other)
908
+ )
909
+
910
+ need_root = self._needs_root()
911
+
912
+ if self._pos not in self._wordnet_corpus_reader._max_depth:
913
+ self._wordnet_corpus_reader._compute_max_depth(self._pos, need_root)
914
+
915
+ depth = self._wordnet_corpus_reader._max_depth[self._pos]
916
+
917
+ distance = self.shortest_path_distance(
918
+ other, simulate_root=simulate_root and need_root
919
+ )
920
+
921
+ if distance is None or distance < 0 or depth == 0:
922
+ return None
923
+ return -math.log((distance + 1) / (2.0 * depth))
924
+
925
+ def wup_similarity(self, other, verbose=False, simulate_root=True):
926
+ """
927
+ Wu-Palmer Similarity:
928
+ Return a score denoting how similar two word senses are, based on the
929
+ depth of the two senses in the taxonomy and that of their Least Common
930
+ Subsumer (most specific ancestor node). Previously, the scores computed
931
+ by this implementation did _not_ always agree with those given by
932
+ Pedersen's Perl implementation of WordNet Similarity. However, with
933
+ the addition of the simulate_root flag (see below), the score for
934
+ verbs now almost always agree but not always for nouns.
935
+
936
+ The LCS does not necessarily feature in the shortest path connecting
937
+ the two senses, as it is by definition the common ancestor deepest in
938
+ the taxonomy, not closest to the two senses. Typically, however, it
939
+ will so feature. Where multiple candidates for the LCS exist, that
940
+ whose shortest path to the root node is the longest will be selected.
941
+ Where the LCS has multiple paths to the root, the longer path is used
942
+ for the purposes of the calculation.
943
+
944
+ :type other: Synset
945
+ :param other: The ``Synset`` that this ``Synset`` is being compared to.
946
+ :type simulate_root: bool
947
+ :param simulate_root: The various verb taxonomies do not
948
+ share a single root which disallows this metric from working for
949
+ synsets that are not connected. This flag (True by default)
950
+ creates a fake root that connects all the taxonomies. Set it
951
+ to false to disable this behavior. For the noun taxonomy,
952
+ there is usually a default root except for WordNet version 1.6.
953
+ If you are using wordnet 1.6, a fake root will be added for nouns
954
+ as well.
955
+ :return: A float score denoting the similarity of the two ``Synset``
956
+ objects, normally greater than zero. If no connecting path between
957
+ the two senses can be found, None is returned.
958
+
959
+ """
960
+ need_root = self._needs_root() or other._needs_root()
961
+
962
+ # Note that to preserve behavior from NLTK2 we set use_min_depth=True
963
+ # It is possible that more accurate results could be obtained by
964
+ # removing this setting and it should be tested later on
965
+ subsumers = self.lowest_common_hypernyms(
966
+ other, simulate_root=simulate_root and need_root, use_min_depth=True
967
+ )
968
+
969
+ # If no LCS was found return None
970
+ if len(subsumers) == 0:
971
+ return None
972
+
973
+ subsumer = self if self in subsumers else subsumers[0]
974
+
975
+ # Get the longest path from the LCS to the root,
976
+ # including a correction:
977
+ # - add one because the calculations include both the start and end
978
+ # nodes
979
+ depth = subsumer.max_depth() + 1
980
+
981
+ # Note: No need for an additional add-one correction for non-nouns
982
+ # to account for an imaginary root node because that is now
983
+ # automatically handled by simulate_root
984
+ # if subsumer._pos != NOUN:
985
+ # depth += 1
986
+
987
+ # Get the shortest path from the LCS to each of the synsets it is
988
+ # subsuming. Add this to the LCS path length to get the path
989
+ # length from each synset to the root.
990
+ len1 = self.shortest_path_distance(
991
+ subsumer, simulate_root=simulate_root and need_root
992
+ )
993
+ len2 = other.shortest_path_distance(
994
+ subsumer, simulate_root=simulate_root and need_root
995
+ )
996
+ if len1 is None or len2 is None:
997
+ return None
998
+ len1 += depth
999
+ len2 += depth
1000
+ return (2.0 * depth) / (len1 + len2)
1001
+
1002
+ def res_similarity(self, other, ic, verbose=False):
1003
+ """
1004
+ Resnik Similarity:
1005
+ Return a score denoting how similar two word senses are, based on the
1006
+ Information Content (IC) of the Least Common Subsumer (most specific
1007
+ ancestor node).
1008
+
1009
+ :type other: Synset
1010
+ :param other: The ``Synset`` that this ``Synset`` is being compared to.
1011
+ :type ic: dict
1012
+ :param ic: an information content object (as returned by
1013
+ ``nltk.corpus.wordnet_ic.ic()``).
1014
+ :return: A float score denoting the similarity of the two ``Synset``
1015
+ objects. Synsets whose LCS is the root node of the taxonomy will
1016
+ have a score of 0 (e.g. N['dog'][0] and N['table'][0]).
1017
+ """
1018
+
1019
+ ic1, ic2, lcs_ic = _lcs_ic(self, other, ic)
1020
+ return lcs_ic
1021
+
1022
+ def jcn_similarity(self, other, ic, verbose=False):
1023
+ """
1024
+ Jiang-Conrath Similarity:
1025
+ Return a score denoting how similar two word senses are, based on the
1026
+ Information Content (IC) of the Least Common Subsumer (most specific
1027
+ ancestor node) and that of the two input Synsets. The relationship is
1028
+ given by the equation 1 / (IC(s1) + IC(s2) - 2 * IC(lcs)).
1029
+
1030
+ :type other: Synset
1031
+ :param other: The ``Synset`` that this ``Synset`` is being compared to.
1032
+ :type ic: dict
1033
+ :param ic: an information content object (as returned by
1034
+ ``nltk.corpus.wordnet_ic.ic()``).
1035
+ :return: A float score denoting the similarity of the two ``Synset``
1036
+ objects.
1037
+ """
1038
+
1039
+ if self == other:
1040
+ return _INF
1041
+
1042
+ ic1, ic2, lcs_ic = _lcs_ic(self, other, ic)
1043
+
1044
+ # If either of the input synsets are the root synset, or have a
1045
+ # frequency of 0 (sparse data problem), return 0.
1046
+ if ic1 == 0 or ic2 == 0:
1047
+ return 0
1048
+
1049
+ ic_difference = ic1 + ic2 - 2 * lcs_ic
1050
+
1051
+ if ic_difference == 0:
1052
+ return _INF
1053
+
1054
+ return 1 / ic_difference
1055
+
1056
+ def lin_similarity(self, other, ic, verbose=False):
1057
+ """
1058
+ Lin Similarity:
1059
+ Return a score denoting how similar two word senses are, based on the
1060
+ Information Content (IC) of the Least Common Subsumer (most specific
1061
+ ancestor node) and that of the two input Synsets. The relationship is
1062
+ given by the equation 2 * IC(lcs) / (IC(s1) + IC(s2)).
1063
+
1064
+ :type other: Synset
1065
+ :param other: The ``Synset`` that this ``Synset`` is being compared to.
1066
+ :type ic: dict
1067
+ :param ic: an information content object (as returned by
1068
+ ``nltk.corpus.wordnet_ic.ic()``).
1069
+ :return: A float score denoting the similarity of the two ``Synset``
1070
+ objects, in the range 0 to 1.
1071
+ """
1072
+
1073
+ ic1, ic2, lcs_ic = _lcs_ic(self, other, ic)
1074
+ return (2.0 * lcs_ic) / (ic1 + ic2)
1075
+
1076
+ def _iter_hypernym_lists(self):
1077
+ """
1078
+ :return: An iterator over ``Synset`` objects that are either proper
1079
+ hypernyms or instance of hypernyms of the synset.
1080
+ """
1081
+ todo = [self]
1082
+ seen = set()
1083
+ while todo:
1084
+ for synset in todo:
1085
+ seen.add(synset)
1086
+ yield todo
1087
+ todo = [
1088
+ hypernym
1089
+ for synset in todo
1090
+ for hypernym in (synset.hypernyms() + synset.instance_hypernyms())
1091
+ if hypernym not in seen
1092
+ ]
1093
+
1094
+ def __repr__(self):
1095
+ return f"{type(self).__name__}('{self._name}')"
1096
+
1097
+ def _related(self, relation_symbol, sort=True):
1098
+ get_synset = self._wordnet_corpus_reader.synset_from_pos_and_offset
1099
+ if relation_symbol not in self._pointers:
1100
+ return []
1101
+ pointer_tuples = self._pointers[relation_symbol]
1102
+ r = [get_synset(pos, offset) for pos, offset in pointer_tuples]
1103
+ if sort:
1104
+ r.sort()
1105
+ return r
1106
+
1107
+
1108
+ ######################################################################
1109
+ # WordNet Corpus Reader
1110
+ ######################################################################
1111
+
1112
+
1113
+ class WordNetCorpusReader(CorpusReader):
1114
+ """
1115
+ A corpus reader used to access wordnet or its variants.
1116
+ """
1117
+
1118
+ _ENCODING = "utf8"
1119
+
1120
+ # { Part-of-speech constants
1121
+ ADJ, ADJ_SAT, ADV, NOUN, VERB = "a", "s", "r", "n", "v"
1122
+ # }
1123
+
1124
+ # { Filename constants
1125
+ _FILEMAP = {ADJ: "adj", ADV: "adv", NOUN: "noun", VERB: "verb"}
1126
+ # }
1127
+
1128
+ # { Part of speech constants
1129
+ _pos_numbers = {NOUN: 1, VERB: 2, ADJ: 3, ADV: 4, ADJ_SAT: 5}
1130
+ _pos_names = dict(tup[::-1] for tup in _pos_numbers.items())
1131
+ # }
1132
+
1133
+ #: A list of file identifiers for all the fileids used by this
1134
+ #: corpus reader.
1135
+ _FILES = (
1136
+ "cntlist.rev",
1137
+ "lexnames",
1138
+ "index.sense",
1139
+ "index.adj",
1140
+ "index.adv",
1141
+ "index.noun",
1142
+ "index.verb",
1143
+ "data.adj",
1144
+ "data.adv",
1145
+ "data.noun",
1146
+ "data.verb",
1147
+ "adj.exc",
1148
+ "adv.exc",
1149
+ "noun.exc",
1150
+ "verb.exc",
1151
+ )
1152
+
1153
+ def __init__(self, root, omw_reader):
1154
+ """
1155
+ Construct a new wordnet corpus reader, with the given root
1156
+ directory.
1157
+ """
1158
+
1159
+ super().__init__(root, self._FILES, encoding=self._ENCODING)
1160
+
1161
+ # A index that provides the file offset
1162
+ # Map from lemma -> pos -> synset_index -> offset
1163
+ self._lemma_pos_offset_map = defaultdict(dict)
1164
+
1165
+ # A cache so we don't have to reconstruct synsets
1166
+ # Map from pos -> offset -> synset
1167
+ self._synset_offset_cache = defaultdict(dict)
1168
+
1169
+ # A lookup for the maximum depth of each part of speech. Useful for
1170
+ # the lch similarity metric.
1171
+ self._max_depth = defaultdict(dict)
1172
+
1173
+ # Corpus reader containing omw data.
1174
+ self._omw_reader = omw_reader
1175
+
1176
+ # Corpus reader containing extended_omw data.
1177
+ self._exomw_reader = None
1178
+
1179
+ self.provenances = defaultdict(str)
1180
+ self.provenances["eng"] = ""
1181
+
1182
+ if self._omw_reader is None:
1183
+ warnings.warn(
1184
+ "The multilingual functions are not available with this Wordnet version"
1185
+ )
1186
+
1187
+ self.omw_langs = set()
1188
+
1189
+ # A cache to store the wordnet data of multiple languages
1190
+ self._lang_data = defaultdict(list)
1191
+
1192
+ self._data_file_map = {}
1193
+ self._exception_map = {}
1194
+ self._lexnames = []
1195
+ self._key_count_file = None
1196
+ self._key_synset_file = None
1197
+
1198
+ # Load the lexnames
1199
+ with self.open("lexnames") as fp:
1200
+ for i, line in enumerate(fp):
1201
+ index, lexname, _ = line.split()
1202
+ assert int(index) == i
1203
+ self._lexnames.append(lexname)
1204
+
1205
+ # Load the indices for lemmas and synset offsets
1206
+ self._load_lemma_pos_offset_map()
1207
+
1208
+ # load the exception file data into memory
1209
+ self._load_exception_map()
1210
+
1211
+ self.nomap = []
1212
+ self.splits = {}
1213
+
1214
+ # map from WordNet 3.0 for OMW data
1215
+ self.map30 = self.map_wn30()
1216
+
1217
+ # Language data attributes
1218
+ self.lg_attrs = ["lemma", "none", "def", "exe"]
1219
+
1220
+ def index_sense(self, version=None):
1221
+ """Read sense key to synset id mapping from index.sense file in corpus directory"""
1222
+ fn = "index.sense"
1223
+ if version:
1224
+ from nltk.corpus import CorpusReader, LazyCorpusLoader
1225
+
1226
+ ixreader = LazyCorpusLoader(version, CorpusReader, r".*/" + fn)
1227
+ else:
1228
+ ixreader = self
1229
+ with ixreader.open(fn) as fp:
1230
+ sensekey_map = {}
1231
+ for line in fp:
1232
+ fields = line.strip().split()
1233
+ sensekey = fields[0]
1234
+ pos = self._pos_names[int(sensekey.split("%")[1].split(":")[0])]
1235
+ sensekey_map[sensekey] = f"{fields[1]}-{pos}"
1236
+ return sensekey_map
1237
+
1238
+ def map_to_many(self):
1239
+ sensekey_map1 = self.index_sense("wordnet")
1240
+ sensekey_map2 = self.index_sense()
1241
+ synset_to_many = {}
1242
+ for synsetid in set(sensekey_map1.values()):
1243
+ synset_to_many[synsetid] = []
1244
+ for sensekey in set(sensekey_map1.keys()).intersection(
1245
+ set(sensekey_map2.keys())
1246
+ ):
1247
+ source = sensekey_map1[sensekey]
1248
+ target = sensekey_map2[sensekey]
1249
+ synset_to_many[source].append(target)
1250
+ return synset_to_many
1251
+
1252
+ def map_to_one(self):
1253
+ synset_to_many = self.map_to_many()
1254
+ synset_to_one = {}
1255
+ for source in synset_to_many:
1256
+ candidates_bag = synset_to_many[source]
1257
+ if candidates_bag:
1258
+ candidates_set = set(candidates_bag)
1259
+ if len(candidates_set) == 1:
1260
+ target = candidates_bag[0]
1261
+ else:
1262
+ counts = []
1263
+ for candidate in candidates_set:
1264
+ counts.append((candidates_bag.count(candidate), candidate))
1265
+ self.splits[source] = counts
1266
+ target = max(counts)[1]
1267
+ synset_to_one[source] = target
1268
+ if source[-1] == "s":
1269
+ # Add a mapping from "a" to target for applications like omw,
1270
+ # where only Lithuanian and Slovak use the "s" ss_type.
1271
+ synset_to_one[f"{source[:-1]}a"] = target
1272
+ else:
1273
+ self.nomap.append(source)
1274
+ return synset_to_one
1275
+
1276
+ def map_wn30(self):
1277
+ """Mapping from Wordnet 3.0 to currently loaded Wordnet version"""
1278
+ if self.get_version() == "3.0":
1279
+ return None
1280
+ else:
1281
+ return self.map_to_one()
1282
+
1283
+ # Open Multilingual WordNet functions, contributed by
1284
+ # Nasruddin A’aidil Shari, Sim Wei Ying Geraldine, and Soe Lynn
1285
+
1286
+ def of2ss(self, of):
1287
+ """take an id and return the synsets"""
1288
+ return self.synset_from_pos_and_offset(of[-1], int(of[:8]))
1289
+
1290
+ def ss2of(self, ss):
1291
+ """return the ID of the synset"""
1292
+ if ss:
1293
+ return f"{ss.offset():08d}-{ss.pos()}"
1294
+
1295
+ def _load_lang_data(self, lang):
1296
+ """load the wordnet data of the requested language from the file to
1297
+ the cache, _lang_data"""
1298
+
1299
+ if lang in self._lang_data:
1300
+ return
1301
+
1302
+ if self._omw_reader and not self.omw_langs:
1303
+ self.add_omw()
1304
+
1305
+ if lang not in self.langs():
1306
+ raise WordNetError("Language is not supported.")
1307
+
1308
+ if self._exomw_reader and lang not in self.omw_langs:
1309
+ reader = self._exomw_reader
1310
+ else:
1311
+ reader = self._omw_reader
1312
+
1313
+ prov = self.provenances[lang]
1314
+ if prov in ["cldr", "wikt"]:
1315
+ prov2 = prov
1316
+ else:
1317
+ prov2 = "data"
1318
+
1319
+ with reader.open(f"{prov}/wn-{prov2}-{lang.split('_')[0]}.tab") as fp:
1320
+ self.custom_lemmas(fp, lang)
1321
+ self.disable_custom_lemmas(lang)
1322
+
1323
+ def add_provs(self, reader):
1324
+ """Add languages from Multilingual Wordnet to the provenance dictionary"""
1325
+ fileids = reader.fileids()
1326
+ for fileid in fileids:
1327
+ prov, langfile = os.path.split(fileid)
1328
+ file_name, file_extension = os.path.splitext(langfile)
1329
+ if file_extension == ".tab":
1330
+ lang = file_name.split("-")[-1]
1331
+ if lang in self.provenances or prov in ["cldr", "wikt"]:
1332
+ # We already have another resource for this lang,
1333
+ # so we need to further specify the lang id:
1334
+ lang = f"{lang}_{prov}"
1335
+ self.provenances[lang] = prov
1336
+
1337
+ def add_omw(self):
1338
+ self.add_provs(self._omw_reader)
1339
+ self.omw_langs = set(self.provenances.keys())
1340
+
1341
+ def add_exomw(self):
1342
+ """
1343
+ Add languages from Extended OMW
1344
+
1345
+ >>> import nltk
1346
+ >>> from nltk.corpus import wordnet as wn
1347
+ >>> wn.add_exomw()
1348
+ >>> print(wn.synset('intrinsically.r.01').lemmas(lang="eng_wikt"))
1349
+ [Lemma('intrinsically.r.01.per_se'), Lemma('intrinsically.r.01.as_such')]
1350
+ """
1351
+ from nltk.corpus import extended_omw
1352
+
1353
+ self.add_omw()
1354
+ self._exomw_reader = extended_omw
1355
+ self.add_provs(self._exomw_reader)
1356
+
1357
+ def langs(self):
1358
+ """return a list of languages supported by Multilingual Wordnet"""
1359
+ return list(self.provenances.keys())
1360
+
1361
+ def _load_lemma_pos_offset_map(self):
1362
+ for suffix in self._FILEMAP.values():
1363
+
1364
+ # parse each line of the file (ignoring comment lines)
1365
+ with self.open("index.%s" % suffix) as fp:
1366
+ for i, line in enumerate(fp):
1367
+ if line.startswith(" "):
1368
+ continue
1369
+
1370
+ _iter = iter(line.split())
1371
+
1372
+ def _next_token():
1373
+ return next(_iter)
1374
+
1375
+ try:
1376
+
1377
+ # get the lemma and part-of-speech
1378
+ lemma = _next_token()
1379
+ pos = _next_token()
1380
+
1381
+ # get the number of synsets for this lemma
1382
+ n_synsets = int(_next_token())
1383
+ assert n_synsets > 0
1384
+
1385
+ # get and ignore the pointer symbols for all synsets of
1386
+ # this lemma
1387
+ n_pointers = int(_next_token())
1388
+ [_next_token() for _ in range(n_pointers)]
1389
+
1390
+ # same as number of synsets
1391
+ n_senses = int(_next_token())
1392
+ assert n_synsets == n_senses
1393
+
1394
+ # get and ignore number of senses ranked according to
1395
+ # frequency
1396
+ _next_token()
1397
+
1398
+ # get synset offsets
1399
+ synset_offsets = [int(_next_token()) for _ in range(n_synsets)]
1400
+
1401
+ # raise more informative error with file name and line number
1402
+ except (AssertionError, ValueError) as e:
1403
+ tup = ("index.%s" % suffix), (i + 1), e
1404
+ raise WordNetError("file %s, line %i: %s" % tup) from e
1405
+
1406
+ # map lemmas and parts of speech to synsets
1407
+ self._lemma_pos_offset_map[lemma][pos] = synset_offsets
1408
+ if pos == ADJ:
1409
+ self._lemma_pos_offset_map[lemma][ADJ_SAT] = synset_offsets
1410
+
1411
+ def _load_exception_map(self):
1412
+ # load the exception file data into memory
1413
+ for pos, suffix in self._FILEMAP.items():
1414
+ self._exception_map[pos] = {}
1415
+ with self.open("%s.exc" % suffix) as fp:
1416
+ for line in fp:
1417
+ terms = line.split()
1418
+ self._exception_map[pos][terms[0]] = terms[1:]
1419
+ self._exception_map[ADJ_SAT] = self._exception_map[ADJ]
1420
+
1421
+ def _compute_max_depth(self, pos, simulate_root):
1422
+ """
1423
+ Compute the max depth for the given part of speech. This is
1424
+ used by the lch similarity metric.
1425
+ """
1426
+ depth = 0
1427
+ for ii in self.all_synsets(pos):
1428
+ try:
1429
+ depth = max(depth, ii.max_depth())
1430
+ except RuntimeError:
1431
+ print(ii)
1432
+ if simulate_root:
1433
+ depth += 1
1434
+ self._max_depth[pos] = depth
1435
+
1436
+ def get_version(self):
1437
+ fh = self._data_file(ADJ)
1438
+ fh.seek(0)
1439
+ for line in fh:
1440
+ match = re.search(r"Word[nN]et (\d+|\d+\.\d+) Copyright", line)
1441
+ if match is not None:
1442
+ version = match.group(1)
1443
+ fh.seek(0)
1444
+ return version
1445
+
1446
+ #############################################################
1447
+ # Loading Lemmas
1448
+ #############################################################
1449
+
1450
+ def lemma(self, name, lang="eng"):
1451
+ """Return lemma object that matches the name"""
1452
+ # cannot simply split on first '.',
1453
+ # e.g.: '.45_caliber.a.01..45_caliber'
1454
+ separator = SENSENUM_RE.search(name).end()
1455
+
1456
+ synset_name, lemma_name = name[: separator - 1], name[separator:]
1457
+
1458
+ synset = self.synset(synset_name)
1459
+ for lemma in synset.lemmas(lang):
1460
+ if lemma._name == lemma_name:
1461
+ return lemma
1462
+ raise WordNetError(f"No lemma {lemma_name!r} in {synset_name!r}")
1463
+
1464
+ def lemma_from_key(self, key):
1465
+ # Keys are case sensitive and always lower-case
1466
+ key = key.lower()
1467
+
1468
+ lemma_name, lex_sense = key.split("%")
1469
+ pos_number, lexname_index, lex_id, _, _ = lex_sense.split(":")
1470
+ pos = self._pos_names[int(pos_number)]
1471
+
1472
+ # open the key -> synset file if necessary
1473
+ if self._key_synset_file is None:
1474
+ self._key_synset_file = self.open("index.sense")
1475
+
1476
+ # Find the synset for the lemma.
1477
+ synset_line = _binary_search_file(self._key_synset_file, key)
1478
+ if not synset_line:
1479
+ raise WordNetError("No synset found for key %r" % key)
1480
+ offset = int(synset_line.split()[1])
1481
+ synset = self.synset_from_pos_and_offset(pos, offset)
1482
+ # return the corresponding lemma
1483
+ for lemma in synset._lemmas:
1484
+ if lemma._key == key:
1485
+ return lemma
1486
+ raise WordNetError("No lemma found for for key %r" % key)
1487
+
1488
+ #############################################################
1489
+ # Loading Synsets
1490
+ #############################################################
1491
+ def synset(self, name):
1492
+ # split name into lemma, part of speech and synset number
1493
+ lemma, pos, synset_index_str = name.lower().rsplit(".", 2)
1494
+ synset_index = int(synset_index_str) - 1
1495
+
1496
+ # get the offset for this synset
1497
+ try:
1498
+ offset = self._lemma_pos_offset_map[lemma][pos][synset_index]
1499
+ except KeyError as e:
1500
+ raise WordNetError(f"No lemma {lemma!r} with part of speech {pos!r}") from e
1501
+ except IndexError as e:
1502
+ n_senses = len(self._lemma_pos_offset_map[lemma][pos])
1503
+ raise WordNetError(
1504
+ f"Lemma {lemma!r} with part of speech {pos!r} only "
1505
+ f"has {n_senses} {'sense' if n_senses == 1 else 'senses'}"
1506
+ ) from e
1507
+
1508
+ # load synset information from the appropriate file
1509
+ synset = self.synset_from_pos_and_offset(pos, offset)
1510
+
1511
+ # some basic sanity checks on loaded attributes
1512
+ if pos == "s" and synset._pos == "a":
1513
+ message = (
1514
+ "Adjective satellite requested but only plain "
1515
+ "adjective found for lemma %r"
1516
+ )
1517
+ raise WordNetError(message % lemma)
1518
+ assert synset._pos == pos or (pos == "a" and synset._pos == "s")
1519
+
1520
+ # Return the synset object.
1521
+ return synset
1522
+
1523
+ def _data_file(self, pos):
1524
+ """
1525
+ Return an open file pointer for the data file for the given
1526
+ part of speech.
1527
+ """
1528
+ if pos == ADJ_SAT:
1529
+ pos = ADJ
1530
+ if self._data_file_map.get(pos) is None:
1531
+ fileid = "data.%s" % self._FILEMAP[pos]
1532
+ self._data_file_map[pos] = self.open(fileid)
1533
+ return self._data_file_map[pos]
1534
+
1535
+ def synset_from_pos_and_offset(self, pos, offset):
1536
+ """
1537
+ - pos: The synset's part of speech, matching one of the module level
1538
+ attributes ADJ, ADJ_SAT, ADV, NOUN or VERB ('a', 's', 'r', 'n', or 'v').
1539
+ - offset: The byte offset of this synset in the WordNet dict file
1540
+ for this pos.
1541
+
1542
+ >>> from nltk.corpus import wordnet as wn
1543
+ >>> print(wn.synset_from_pos_and_offset('n', 1740))
1544
+ Synset('entity.n.01')
1545
+ """
1546
+ # Check to see if the synset is in the cache
1547
+ if offset in self._synset_offset_cache[pos]:
1548
+ return self._synset_offset_cache[pos][offset]
1549
+
1550
+ data_file = self._data_file(pos)
1551
+ data_file.seek(offset)
1552
+ data_file_line = data_file.readline()
1553
+ # If valid, the offset equals the 8-digit 0-padded integer found at the start of the line:
1554
+ line_offset = data_file_line[:8]
1555
+ if (
1556
+ line_offset.isalnum()
1557
+ and line_offset == f"{'0'*(8-len(str(offset)))}{str(offset)}"
1558
+ ):
1559
+ synset = self._synset_from_pos_and_line(pos, data_file_line)
1560
+ assert synset._offset == offset
1561
+ self._synset_offset_cache[pos][offset] = synset
1562
+ else:
1563
+ synset = None
1564
+ warnings.warn(f"No WordNet synset found for pos={pos} at offset={offset}.")
1565
+ data_file.seek(0)
1566
+ return synset
1567
+
1568
+ @deprecated("Use public method synset_from_pos_and_offset() instead")
1569
+ def _synset_from_pos_and_offset(self, *args, **kwargs):
1570
+ """
1571
+ Hack to help people like the readers of
1572
+ https://stackoverflow.com/a/27145655/1709587
1573
+ who were using this function before it was officially a public method
1574
+ """
1575
+ return self.synset_from_pos_and_offset(*args, **kwargs)
1576
+
1577
+ def _synset_from_pos_and_line(self, pos, data_file_line):
1578
+ # Construct a new (empty) synset.
1579
+ synset = Synset(self)
1580
+
1581
+ # parse the entry for this synset
1582
+ try:
1583
+
1584
+ # parse out the definitions and examples from the gloss
1585
+ columns_str, gloss = data_file_line.strip().split("|")
1586
+ definition = re.sub(r"[\"].*?[\"]", "", gloss).strip()
1587
+ examples = re.findall(r'"([^"]*)"', gloss)
1588
+ for example in examples:
1589
+ synset._examples.append(example)
1590
+
1591
+ synset._definition = definition.strip("; ")
1592
+
1593
+ # split the other info into fields
1594
+ _iter = iter(columns_str.split())
1595
+
1596
+ def _next_token():
1597
+ return next(_iter)
1598
+
1599
+ # get the offset
1600
+ synset._offset = int(_next_token())
1601
+
1602
+ # determine the lexicographer file name
1603
+ lexname_index = int(_next_token())
1604
+ synset._lexname = self._lexnames[lexname_index]
1605
+
1606
+ # get the part of speech
1607
+ synset._pos = _next_token()
1608
+
1609
+ # create Lemma objects for each lemma
1610
+ n_lemmas = int(_next_token(), 16)
1611
+ for _ in range(n_lemmas):
1612
+ # get the lemma name
1613
+ lemma_name = _next_token()
1614
+ # get the lex_id (used for sense_keys)
1615
+ lex_id = int(_next_token(), 16)
1616
+ # If the lemma has a syntactic marker, extract it.
1617
+ m = re.match(r"(.*?)(\(.*\))?$", lemma_name)
1618
+ lemma_name, syn_mark = m.groups()
1619
+ # create the lemma object
1620
+ lemma = Lemma(self, synset, lemma_name, lexname_index, lex_id, syn_mark)
1621
+ synset._lemmas.append(lemma)
1622
+ synset._lemma_names.append(lemma._name)
1623
+
1624
+ # collect the pointer tuples
1625
+ n_pointers = int(_next_token())
1626
+ for _ in range(n_pointers):
1627
+ symbol = _next_token()
1628
+ offset = int(_next_token())
1629
+ pos = _next_token()
1630
+ lemma_ids_str = _next_token()
1631
+ if lemma_ids_str == "0000":
1632
+ synset._pointers[symbol].add((pos, offset))
1633
+ else:
1634
+ source_index = int(lemma_ids_str[:2], 16) - 1
1635
+ target_index = int(lemma_ids_str[2:], 16) - 1
1636
+ source_lemma_name = synset._lemmas[source_index]._name
1637
+ lemma_pointers = synset._lemma_pointers
1638
+ tups = lemma_pointers[source_lemma_name, symbol]
1639
+ tups.append((pos, offset, target_index))
1640
+
1641
+ # read the verb frames
1642
+ try:
1643
+ frame_count = int(_next_token())
1644
+ except StopIteration:
1645
+ pass
1646
+ else:
1647
+ for _ in range(frame_count):
1648
+ # read the plus sign
1649
+ plus = _next_token()
1650
+ assert plus == "+"
1651
+ # read the frame and lemma number
1652
+ frame_number = int(_next_token())
1653
+ frame_string_fmt = VERB_FRAME_STRINGS[frame_number]
1654
+ lemma_number = int(_next_token(), 16)
1655
+ # lemma number of 00 means all words in the synset
1656
+ if lemma_number == 0:
1657
+ synset._frame_ids.append(frame_number)
1658
+ for lemma in synset._lemmas:
1659
+ lemma._frame_ids.append(frame_number)
1660
+ lemma._frame_strings.append(frame_string_fmt % lemma._name)
1661
+ # only a specific word in the synset
1662
+ else:
1663
+ lemma = synset._lemmas[lemma_number - 1]
1664
+ lemma._frame_ids.append(frame_number)
1665
+ lemma._frame_strings.append(frame_string_fmt % lemma._name)
1666
+
1667
+ # raise a more informative error with line text
1668
+ except ValueError as e:
1669
+ raise WordNetError(f"line {data_file_line!r}: {e}") from e
1670
+
1671
+ # set sense keys for Lemma objects - note that this has to be
1672
+ # done afterwards so that the relations are available
1673
+ for lemma in synset._lemmas:
1674
+ if synset._pos == ADJ_SAT:
1675
+ head_lemma = synset.similar_tos()[0]._lemmas[0]
1676
+ head_name = head_lemma._name
1677
+ head_id = "%02d" % head_lemma._lex_id
1678
+ else:
1679
+ head_name = head_id = ""
1680
+ tup = (
1681
+ lemma._name,
1682
+ WordNetCorpusReader._pos_numbers[synset._pos],
1683
+ lemma._lexname_index,
1684
+ lemma._lex_id,
1685
+ head_name,
1686
+ head_id,
1687
+ )
1688
+ lemma._key = ("%s%%%d:%02d:%02d:%s:%s" % tup).lower()
1689
+
1690
+ # the canonical name is based on the first lemma
1691
+ lemma_name = synset._lemmas[0]._name.lower()
1692
+ offsets = self._lemma_pos_offset_map[lemma_name][synset._pos]
1693
+ sense_index = offsets.index(synset._offset)
1694
+ tup = lemma_name, synset._pos, sense_index + 1
1695
+ synset._name = "%s.%s.%02i" % tup
1696
+
1697
+ return synset
1698
+
1699
+ def synset_from_sense_key(self, sense_key):
1700
+ """
1701
+ Retrieves synset based on a given sense_key. Sense keys can be
1702
+ obtained from lemma.key()
1703
+
1704
+ From https://wordnet.princeton.edu/documentation/senseidx5wn:
1705
+ A sense_key is represented as::
1706
+
1707
+ lemma % lex_sense (e.g. 'dog%1:18:01::')
1708
+
1709
+ where lex_sense is encoded as::
1710
+
1711
+ ss_type:lex_filenum:lex_id:head_word:head_id
1712
+
1713
+ :lemma: ASCII text of word/collocation, in lower case
1714
+ :ss_type: synset type for the sense (1 digit int)
1715
+ The synset type is encoded as follows::
1716
+
1717
+ 1 NOUN
1718
+ 2 VERB
1719
+ 3 ADJECTIVE
1720
+ 4 ADVERB
1721
+ 5 ADJECTIVE SATELLITE
1722
+ :lex_filenum: name of lexicographer file containing the synset for the sense (2 digit int)
1723
+ :lex_id: when paired with lemma, uniquely identifies a sense in the lexicographer file (2 digit int)
1724
+ :head_word: lemma of the first word in satellite's head synset
1725
+ Only used if sense is in an adjective satellite synset
1726
+ :head_id: uniquely identifies sense in a lexicographer file when paired with head_word
1727
+ Only used if head_word is present (2 digit int)
1728
+
1729
+ >>> import nltk
1730
+ >>> from nltk.corpus import wordnet as wn
1731
+ >>> print(wn.synset_from_sense_key("drive%1:04:03::"))
1732
+ Synset('drive.n.06')
1733
+
1734
+ >>> print(wn.synset_from_sense_key("driving%1:04:03::"))
1735
+ Synset('drive.n.06')
1736
+ """
1737
+ return self.lemma_from_key(sense_key).synset()
1738
+
1739
+ #############################################################
1740
+ # Retrieve synsets and lemmas.
1741
+ #############################################################
1742
+
1743
+ def synsets(self, lemma, pos=None, lang="eng", check_exceptions=True):
1744
+ """Load all synsets with a given lemma and part of speech tag.
1745
+ If no pos is specified, all synsets for all parts of speech
1746
+ will be loaded.
1747
+ If lang is specified, all the synsets associated with the lemma name
1748
+ of that language will be returned.
1749
+ """
1750
+ lemma = lemma.lower()
1751
+
1752
+ if lang == "eng":
1753
+ get_synset = self.synset_from_pos_and_offset
1754
+ index = self._lemma_pos_offset_map
1755
+ if pos is None:
1756
+ pos = POS_LIST
1757
+ return [
1758
+ get_synset(p, offset)
1759
+ for p in pos
1760
+ for form in self._morphy(lemma, p, check_exceptions)
1761
+ for offset in index[form].get(p, [])
1762
+ ]
1763
+
1764
+ else:
1765
+ self._load_lang_data(lang)
1766
+ synset_list = []
1767
+ if lemma in self._lang_data[lang][1]:
1768
+ for l in self._lang_data[lang][1][lemma]:
1769
+ if pos is not None and l[-1] != pos:
1770
+ continue
1771
+ synset_list.append(self.of2ss(l))
1772
+ return synset_list
1773
+
1774
+ def lemmas(self, lemma, pos=None, lang="eng"):
1775
+ """Return all Lemma objects with a name matching the specified lemma
1776
+ name and part of speech tag. Matches any part of speech tag if none is
1777
+ specified."""
1778
+
1779
+ lemma = lemma.lower()
1780
+ if lang == "eng":
1781
+ return [
1782
+ lemma_obj
1783
+ for synset in self.synsets(lemma, pos)
1784
+ for lemma_obj in synset.lemmas()
1785
+ if lemma_obj.name().lower() == lemma
1786
+ ]
1787
+
1788
+ else:
1789
+ self._load_lang_data(lang)
1790
+ lemmas = []
1791
+ syn = self.synsets(lemma, lang=lang)
1792
+ for s in syn:
1793
+ if pos is not None and s.pos() != pos:
1794
+ continue
1795
+ for lemma_obj in s.lemmas(lang=lang):
1796
+ if lemma_obj.name().lower() == lemma:
1797
+ lemmas.append(lemma_obj)
1798
+ return lemmas
1799
+
1800
+ def all_lemma_names(self, pos=None, lang="eng"):
1801
+ """Return all lemma names for all synsets for the given
1802
+ part of speech tag and language or languages. If pos is
1803
+ not specified, all synsets for all parts of speech will
1804
+ be used."""
1805
+
1806
+ if lang == "eng":
1807
+ if pos is None:
1808
+ return iter(self._lemma_pos_offset_map)
1809
+ else:
1810
+ return (
1811
+ lemma
1812
+ for lemma in self._lemma_pos_offset_map
1813
+ if pos in self._lemma_pos_offset_map[lemma]
1814
+ )
1815
+ else:
1816
+ self._load_lang_data(lang)
1817
+ lemma = []
1818
+ for i in self._lang_data[lang][0]:
1819
+ if pos is not None and i[-1] != pos:
1820
+ continue
1821
+ lemma.extend(self._lang_data[lang][0][i])
1822
+
1823
+ lemma = iter(set(lemma))
1824
+ return lemma
1825
+
1826
+ def all_omw_synsets(self, pos=None, lang=None):
1827
+ if lang not in self.langs():
1828
+ return None
1829
+ self._load_lang_data(lang)
1830
+ for of in self._lang_data[lang][0]:
1831
+ if not pos or of[-1] == pos:
1832
+ ss = self.of2ss(of)
1833
+ if ss:
1834
+ yield ss
1835
+
1836
+ # else:
1837
+ # A few OMW offsets don't exist in Wordnet 3.0.
1838
+ # warnings.warn(f"Language {lang}: no synset found for {of}")
1839
+
1840
+ def all_synsets(self, pos=None, lang="eng"):
1841
+ """Iterate over all synsets with a given part of speech tag.
1842
+ If no pos is specified, all synsets for all parts of speech
1843
+ will be loaded.
1844
+ """
1845
+ if lang == "eng":
1846
+ return self.all_eng_synsets(pos=pos)
1847
+ else:
1848
+ return self.all_omw_synsets(pos=pos, lang=lang)
1849
+
1850
+ def all_eng_synsets(self, pos=None):
1851
+ if pos is None:
1852
+ pos_tags = self._FILEMAP.keys()
1853
+ else:
1854
+ pos_tags = [pos]
1855
+
1856
+ cache = self._synset_offset_cache
1857
+ from_pos_and_line = self._synset_from_pos_and_line
1858
+
1859
+ # generate all synsets for each part of speech
1860
+ for pos_tag in pos_tags:
1861
+ # Open the file for reading. Note that we can not re-use
1862
+ # the file pointers from self._data_file_map here, because
1863
+ # we're defining an iterator, and those file pointers might
1864
+ # be moved while we're not looking.
1865
+ if pos_tag == ADJ_SAT:
1866
+ pos_file = ADJ
1867
+ else:
1868
+ pos_file = pos_tag
1869
+ fileid = "data.%s" % self._FILEMAP[pos_file]
1870
+ data_file = self.open(fileid)
1871
+
1872
+ try:
1873
+ # generate synsets for each line in the POS file
1874
+ offset = data_file.tell()
1875
+ line = data_file.readline()
1876
+ while line:
1877
+ if not line[0].isspace():
1878
+ if offset in cache[pos_tag]:
1879
+ # See if the synset is cached
1880
+ synset = cache[pos_tag][offset]
1881
+ else:
1882
+ # Otherwise, parse the line
1883
+ synset = from_pos_and_line(pos_tag, line)
1884
+ cache[pos_tag][offset] = synset
1885
+
1886
+ # adjective satellites are in the same file as
1887
+ # adjectives so only yield the synset if it's actually
1888
+ # a satellite
1889
+ if pos_tag == ADJ_SAT and synset._pos == ADJ_SAT:
1890
+ yield synset
1891
+ # for all other POS tags, yield all synsets (this means
1892
+ # that adjectives also include adjective satellites)
1893
+ elif pos_tag != ADJ_SAT:
1894
+ yield synset
1895
+ offset = data_file.tell()
1896
+ line = data_file.readline()
1897
+
1898
+ # close the extra file handle we opened
1899
+ except:
1900
+ data_file.close()
1901
+ raise
1902
+ else:
1903
+ data_file.close()
1904
+
1905
+ def words(self, lang="eng"):
1906
+ """return lemmas of the given language as list of words"""
1907
+ return self.all_lemma_names(lang=lang)
1908
+
1909
+ def synonyms(self, word, lang="eng"):
1910
+ """return nested list with the synonyms of the different senses of word in the given language"""
1911
+ return [
1912
+ sorted(list(set(ss.lemma_names(lang=lang)) - {word}))
1913
+ for ss in self.synsets(word, lang=lang)
1914
+ ]
1915
+
1916
+ def doc(self, file="README", lang="eng"):
1917
+ """Return the contents of readme, license or citation file
1918
+ use lang=lang to get the file for an individual language"""
1919
+ if lang == "eng":
1920
+ reader = self
1921
+ else:
1922
+ reader = self._omw_reader
1923
+ if lang in self.langs():
1924
+ file = f"{os.path.join(self.provenances[lang],file)}"
1925
+ try:
1926
+ with reader.open(file) as fp:
1927
+ return fp.read()
1928
+ except:
1929
+ if lang in self._lang_data:
1930
+ return f"Cannot determine {file} for {lang}"
1931
+ else:
1932
+ return f"Language {lang} is not supported."
1933
+
1934
+ def license(self, lang="eng"):
1935
+ """Return the contents of LICENSE (for omw)
1936
+ use lang=lang to get the license for an individual language"""
1937
+ return self.doc(file="LICENSE", lang=lang)
1938
+
1939
+ def readme(self, lang="eng"):
1940
+ """Return the contents of README (for omw)
1941
+ use lang=lang to get the readme for an individual language"""
1942
+ return self.doc(file="README", lang=lang)
1943
+
1944
+ def citation(self, lang="eng"):
1945
+ """Return the contents of citation.bib file (for omw)
1946
+ use lang=lang to get the citation for an individual language"""
1947
+ return self.doc(file="citation.bib", lang=lang)
1948
+
1949
+ #############################################################
1950
+ # Misc
1951
+ #############################################################
1952
+ def lemma_count(self, lemma):
1953
+ """Return the frequency count for this Lemma"""
1954
+ # Currently, count is only work for English
1955
+ if lemma._lang != "eng":
1956
+ return 0
1957
+ # open the count file if we haven't already
1958
+ if self._key_count_file is None:
1959
+ self._key_count_file = self.open("cntlist.rev")
1960
+ # find the key in the counts file and return the count
1961
+ line = _binary_search_file(self._key_count_file, lemma._key)
1962
+ if line:
1963
+ return int(line.rsplit(" ", 1)[-1])
1964
+ else:
1965
+ return 0
1966
+
1967
+ def path_similarity(self, synset1, synset2, verbose=False, simulate_root=True):
1968
+ return synset1.path_similarity(synset2, verbose, simulate_root)
1969
+
1970
+ path_similarity.__doc__ = Synset.path_similarity.__doc__
1971
+
1972
+ def lch_similarity(self, synset1, synset2, verbose=False, simulate_root=True):
1973
+ return synset1.lch_similarity(synset2, verbose, simulate_root)
1974
+
1975
+ lch_similarity.__doc__ = Synset.lch_similarity.__doc__
1976
+
1977
+ def wup_similarity(self, synset1, synset2, verbose=False, simulate_root=True):
1978
+ return synset1.wup_similarity(synset2, verbose, simulate_root)
1979
+
1980
+ wup_similarity.__doc__ = Synset.wup_similarity.__doc__
1981
+
1982
+ def res_similarity(self, synset1, synset2, ic, verbose=False):
1983
+ return synset1.res_similarity(synset2, ic, verbose)
1984
+
1985
+ res_similarity.__doc__ = Synset.res_similarity.__doc__
1986
+
1987
+ def jcn_similarity(self, synset1, synset2, ic, verbose=False):
1988
+ return synset1.jcn_similarity(synset2, ic, verbose)
1989
+
1990
+ jcn_similarity.__doc__ = Synset.jcn_similarity.__doc__
1991
+
1992
+ def lin_similarity(self, synset1, synset2, ic, verbose=False):
1993
+ return synset1.lin_similarity(synset2, ic, verbose)
1994
+
1995
+ lin_similarity.__doc__ = Synset.lin_similarity.__doc__
1996
+
1997
+ #############################################################
1998
+ # Morphy
1999
+ #############################################################
2000
+ # Morphy, adapted from Oliver Steele's pywordnet
2001
+ def morphy(self, form, pos=None, check_exceptions=True):
2002
+ """
2003
+ Find a possible base form for the given form, with the given
2004
+ part of speech, by checking WordNet's list of exceptional
2005
+ forms, and by recursively stripping affixes for this part of
2006
+ speech until a form in WordNet is found.
2007
+
2008
+ >>> from nltk.corpus import wordnet as wn
2009
+ >>> print(wn.morphy('dogs'))
2010
+ dog
2011
+ >>> print(wn.morphy('churches'))
2012
+ church
2013
+ >>> print(wn.morphy('aardwolves'))
2014
+ aardwolf
2015
+ >>> print(wn.morphy('abaci'))
2016
+ abacus
2017
+ >>> wn.morphy('hardrock', wn.ADV)
2018
+ >>> print(wn.morphy('book', wn.NOUN))
2019
+ book
2020
+ >>> wn.morphy('book', wn.ADJ)
2021
+ """
2022
+
2023
+ if pos is None:
2024
+ morphy = self._morphy
2025
+ analyses = chain(a for p in POS_LIST for a in morphy(form, p))
2026
+ else:
2027
+ analyses = self._morphy(form, pos, check_exceptions)
2028
+
2029
+ # get the first one we find
2030
+ first = list(islice(analyses, 1))
2031
+ if len(first) == 1:
2032
+ return first[0]
2033
+ else:
2034
+ return None
2035
+
2036
+ MORPHOLOGICAL_SUBSTITUTIONS = {
2037
+ NOUN: [
2038
+ ("s", ""),
2039
+ ("ses", "s"),
2040
+ ("ves", "f"),
2041
+ ("xes", "x"),
2042
+ ("zes", "z"),
2043
+ ("ches", "ch"),
2044
+ ("shes", "sh"),
2045
+ ("men", "man"),
2046
+ ("ies", "y"),
2047
+ ],
2048
+ VERB: [
2049
+ ("s", ""),
2050
+ ("ies", "y"),
2051
+ ("es", "e"),
2052
+ ("es", ""),
2053
+ ("ed", "e"),
2054
+ ("ed", ""),
2055
+ ("ing", "e"),
2056
+ ("ing", ""),
2057
+ ],
2058
+ ADJ: [("er", ""), ("est", ""), ("er", "e"), ("est", "e")],
2059
+ ADV: [],
2060
+ }
2061
+
2062
+ MORPHOLOGICAL_SUBSTITUTIONS[ADJ_SAT] = MORPHOLOGICAL_SUBSTITUTIONS[ADJ]
2063
+
2064
+ def _morphy(self, form, pos, check_exceptions=True):
2065
+ # from jordanbg:
2066
+ # Given an original string x
2067
+ # 1. Apply rules once to the input to get y1, y2, y3, etc.
2068
+ # 2. Return all that are in the database
2069
+ # 3. If there are no matches, keep applying rules until you either
2070
+ # find a match or you can't go any further
2071
+
2072
+ exceptions = self._exception_map[pos]
2073
+ substitutions = self.MORPHOLOGICAL_SUBSTITUTIONS[pos]
2074
+
2075
+ def apply_rules(forms):
2076
+ return [
2077
+ form[: -len(old)] + new
2078
+ for form in forms
2079
+ for old, new in substitutions
2080
+ if form.endswith(old)
2081
+ ]
2082
+
2083
+ def filter_forms(forms):
2084
+ result = []
2085
+ seen = set()
2086
+ for form in forms:
2087
+ if form in self._lemma_pos_offset_map:
2088
+ if pos in self._lemma_pos_offset_map[form]:
2089
+ if form not in seen:
2090
+ result.append(form)
2091
+ seen.add(form)
2092
+ return result
2093
+
2094
+ # 0. Check the exception lists
2095
+ if check_exceptions:
2096
+ if form in exceptions:
2097
+ return filter_forms([form] + exceptions[form])
2098
+
2099
+ # 1. Apply rules once to the input to get y1, y2, y3, etc.
2100
+ forms = apply_rules([form])
2101
+
2102
+ # 2. Return all that are in the database (and check the original too)
2103
+ results = filter_forms([form] + forms)
2104
+ if results:
2105
+ return results
2106
+
2107
+ # 3. If there are no matches, keep applying rules until we find a match
2108
+ while forms:
2109
+ forms = apply_rules(forms)
2110
+ results = filter_forms(forms)
2111
+ if results:
2112
+ return results
2113
+
2114
+ # Return an empty list if we can't find anything
2115
+ return []
2116
+
2117
+ #############################################################
2118
+ # Create information content from corpus
2119
+ #############################################################
2120
+ def ic(self, corpus, weight_senses_equally=False, smoothing=1.0):
2121
+ """
2122
+ Creates an information content lookup dictionary from a corpus.
2123
+
2124
+ :type corpus: CorpusReader
2125
+ :param corpus: The corpus from which we create an information
2126
+ content dictionary.
2127
+ :type weight_senses_equally: bool
2128
+ :param weight_senses_equally: If this is True, gives all
2129
+ possible senses equal weight rather than dividing by the
2130
+ number of possible senses. (If a word has 3 synses, each
2131
+ sense gets 0.3333 per appearance when this is False, 1.0 when
2132
+ it is true.)
2133
+ :param smoothing: How much do we smooth synset counts (default is 1.0)
2134
+ :type smoothing: float
2135
+ :return: An information content dictionary
2136
+ """
2137
+ counts = FreqDist()
2138
+ for ww in corpus.words():
2139
+ counts[ww] += 1
2140
+
2141
+ ic = {}
2142
+ for pp in POS_LIST:
2143
+ ic[pp] = defaultdict(float)
2144
+
2145
+ # Initialize the counts with the smoothing value
2146
+ if smoothing > 0.0:
2147
+ for pp in POS_LIST:
2148
+ ic[pp][0] = smoothing
2149
+ for ss in self.all_synsets():
2150
+ pos = ss._pos
2151
+ if pos == ADJ_SAT:
2152
+ pos = ADJ
2153
+ ic[pos][ss._offset] = smoothing
2154
+
2155
+ for ww in counts:
2156
+ possible_synsets = self.synsets(ww)
2157
+ if len(possible_synsets) == 0:
2158
+ continue
2159
+
2160
+ # Distribute weight among possible synsets
2161
+ weight = float(counts[ww])
2162
+ if not weight_senses_equally:
2163
+ weight /= float(len(possible_synsets))
2164
+
2165
+ for ss in possible_synsets:
2166
+ pos = ss._pos
2167
+ if pos == ADJ_SAT:
2168
+ pos = ADJ
2169
+ for level in ss._iter_hypernym_lists():
2170
+ for hh in level:
2171
+ ic[pos][hh._offset] += weight
2172
+ # Add the weight to the root
2173
+ ic[pos][0] += weight
2174
+ return ic
2175
+
2176
+ def custom_lemmas(self, tab_file, lang):
2177
+ """
2178
+ Reads a custom tab file containing mappings of lemmas in the given
2179
+ language to Princeton WordNet 3.0 synset offsets, allowing NLTK's
2180
+ WordNet functions to then be used with that language.
2181
+
2182
+ See the "Tab files" section at https://omwn.org/omw1.html for
2183
+ documentation on the Multilingual WordNet tab file format.
2184
+
2185
+ :param tab_file: Tab file as a file or file-like object
2186
+ :type: lang str
2187
+ :param: lang ISO 639-3 code of the language of the tab file
2188
+ """
2189
+ lg = lang.split("_")[0]
2190
+ if len(lg) != 3:
2191
+ raise ValueError("lang should be a (3 character) ISO 639-3 code")
2192
+ self._lang_data[lang] = [
2193
+ defaultdict(list),
2194
+ defaultdict(list),
2195
+ defaultdict(list),
2196
+ defaultdict(list),
2197
+ ]
2198
+ for line in tab_file.readlines():
2199
+ if isinstance(line, bytes):
2200
+ # Support byte-stream files (e.g. as returned by Python 2's
2201
+ # open() function) as well as text-stream ones
2202
+ line = line.decode("utf-8")
2203
+ if not line.startswith("#"):
2204
+ triple = line.strip().split("\t")
2205
+ if len(triple) < 3:
2206
+ continue
2207
+ offset_pos, label = triple[:2]
2208
+ val = triple[-1]
2209
+ if self.map30:
2210
+ if offset_pos in self.map30:
2211
+ # Map offset_pos to current Wordnet version:
2212
+ offset_pos = self.map30[offset_pos]
2213
+ else:
2214
+ # Some OMW offsets were never in Wordnet:
2215
+ if (
2216
+ offset_pos not in self.nomap
2217
+ and offset_pos.replace("a", "s") not in self.nomap
2218
+ ):
2219
+ warnings.warn(
2220
+ f"{lang}: invalid offset {offset_pos} in '{line}'"
2221
+ )
2222
+ continue
2223
+ elif offset_pos[-1] == "a":
2224
+ wnss = self.of2ss(offset_pos)
2225
+ if wnss and wnss.pos() == "s": # Wordnet pos is "s"
2226
+ # Label OMW adjective satellites back to their Wordnet pos ("s")
2227
+ offset_pos = self.ss2of(wnss)
2228
+ pair = label.split(":")
2229
+ attr = pair[-1]
2230
+ if len(pair) == 1 or pair[0] == lg:
2231
+ if attr == "lemma":
2232
+ val = val.strip().replace(" ", "_")
2233
+ self._lang_data[lang][1][val.lower()].append(offset_pos)
2234
+ if attr in self.lg_attrs:
2235
+ self._lang_data[lang][self.lg_attrs.index(attr)][
2236
+ offset_pos
2237
+ ].append(val)
2238
+
2239
+ def disable_custom_lemmas(self, lang):
2240
+ """prevent synsets from being mistakenly added"""
2241
+ for n in range(len(self.lg_attrs)):
2242
+ self._lang_data[lang][n].default_factory = None
2243
+
2244
+ ######################################################################
2245
+ # Visualize WordNet relation graphs using Graphviz
2246
+ ######################################################################
2247
+
2248
+ def digraph(
2249
+ self,
2250
+ inputs,
2251
+ rel=lambda s: s.hypernyms(),
2252
+ pos=None,
2253
+ maxdepth=-1,
2254
+ shapes=None,
2255
+ attr=None,
2256
+ verbose=False,
2257
+ ):
2258
+ """
2259
+ Produce a graphical representation from 'inputs' (a list of
2260
+ start nodes, which can be a mix of Synsets, Lemmas and/or words),
2261
+ and a synset relation, for drawing with the 'dot' graph visualisation
2262
+ program from the Graphviz package.
2263
+
2264
+ Return a string in the DOT graph file language, which can then be
2265
+ converted to an image by nltk.parse.dependencygraph.dot2img(dot_string).
2266
+
2267
+ Optional Parameters:
2268
+ :rel: Wordnet synset relation
2269
+ :pos: for words, restricts Part of Speech to 'n', 'v', 'a' or 'r'
2270
+ :maxdepth: limit the longest path
2271
+ :shapes: dictionary of strings that trigger a specified shape
2272
+ :attr: dictionary with global graph attributes
2273
+ :verbose: warn about cycles
2274
+
2275
+ >>> from nltk.corpus import wordnet as wn
2276
+ >>> print(wn.digraph([wn.synset('dog.n.01')]))
2277
+ digraph G {
2278
+ "Synset('animal.n.01')" -> "Synset('organism.n.01')";
2279
+ "Synset('canine.n.02')" -> "Synset('carnivore.n.01')";
2280
+ "Synset('carnivore.n.01')" -> "Synset('placental.n.01')";
2281
+ "Synset('chordate.n.01')" -> "Synset('animal.n.01')";
2282
+ "Synset('dog.n.01')" -> "Synset('canine.n.02')";
2283
+ "Synset('dog.n.01')" -> "Synset('domestic_animal.n.01')";
2284
+ "Synset('domestic_animal.n.01')" -> "Synset('animal.n.01')";
2285
+ "Synset('living_thing.n.01')" -> "Synset('whole.n.02')";
2286
+ "Synset('mammal.n.01')" -> "Synset('vertebrate.n.01')";
2287
+ "Synset('object.n.01')" -> "Synset('physical_entity.n.01')";
2288
+ "Synset('organism.n.01')" -> "Synset('living_thing.n.01')";
2289
+ "Synset('physical_entity.n.01')" -> "Synset('entity.n.01')";
2290
+ "Synset('placental.n.01')" -> "Synset('mammal.n.01')";
2291
+ "Synset('vertebrate.n.01')" -> "Synset('chordate.n.01')";
2292
+ "Synset('whole.n.02')" -> "Synset('object.n.01')";
2293
+ }
2294
+ <BLANKLINE>
2295
+ """
2296
+ from nltk.util import edge_closure, edges2dot
2297
+
2298
+ synsets = set()
2299
+ edges = set()
2300
+ if not shapes:
2301
+ shapes = dict()
2302
+ if not attr:
2303
+ attr = dict()
2304
+
2305
+ def add_lemma(lem):
2306
+ ss = lem.synset()
2307
+ synsets.add(ss)
2308
+ edges.add((lem, ss))
2309
+
2310
+ for node in inputs:
2311
+ typ = type(node)
2312
+ if typ == Synset:
2313
+ synsets.add(node)
2314
+ elif typ == Lemma:
2315
+ add_lemma(node)
2316
+ elif typ == str:
2317
+ for lemma in self.lemmas(node, pos):
2318
+ add_lemma(lemma)
2319
+
2320
+ for ss in synsets:
2321
+ edges = edges.union(edge_closure(ss, rel, maxdepth, verbose))
2322
+ dot_string = edges2dot(sorted(list(edges)), shapes=shapes, attr=attr)
2323
+ return dot_string
2324
+
2325
+
2326
+ ######################################################################
2327
+ # WordNet Information Content Corpus Reader
2328
+ ######################################################################
2329
+
2330
+
2331
+ class WordNetICCorpusReader(CorpusReader):
2332
+ """
2333
+ A corpus reader for the WordNet information content corpus.
2334
+ """
2335
+
2336
+ def __init__(self, root, fileids):
2337
+ CorpusReader.__init__(self, root, fileids, encoding="utf8")
2338
+
2339
+ # this load function would be more efficient if the data was pickled
2340
+ # Note that we can't use NLTK's frequency distributions because
2341
+ # synsets are overlapping (each instance of a synset also counts
2342
+ # as an instance of its hypernyms)
2343
+ def ic(self, icfile):
2344
+ """
2345
+ Load an information content file from the wordnet_ic corpus
2346
+ and return a dictionary. This dictionary has just two keys,
2347
+ NOUN and VERB, whose values are dictionaries that map from
2348
+ synsets to information content values.
2349
+
2350
+ :type icfile: str
2351
+ :param icfile: The name of the wordnet_ic file (e.g. "ic-brown.dat")
2352
+ :return: An information content dictionary
2353
+ """
2354
+ ic = {}
2355
+ ic[NOUN] = defaultdict(float)
2356
+ ic[VERB] = defaultdict(float)
2357
+ with self.open(icfile) as fp:
2358
+ for num, line in enumerate(fp):
2359
+ if num == 0: # skip the header
2360
+ continue
2361
+ fields = line.split()
2362
+ offset = int(fields[0][:-1])
2363
+ value = float(fields[1])
2364
+ pos = _get_pos(fields[0])
2365
+ if len(fields) == 3 and fields[2] == "ROOT":
2366
+ # Store root count.
2367
+ ic[pos][0] += value
2368
+ if value != 0:
2369
+ ic[pos][offset] = value
2370
+ return ic
2371
+
2372
+
2373
+ ######################################################################
2374
+ # Similarity metrics
2375
+ ######################################################################
2376
+
2377
+ # TODO: Add in the option to manually add a new root node; this will be
2378
+ # useful for verb similarity as there exist multiple verb taxonomies.
2379
+
2380
+ # More information about the metrics is available at
2381
+ # http://marimba.d.umn.edu/similarity/measures.html
2382
+
2383
+
2384
+ def path_similarity(synset1, synset2, verbose=False, simulate_root=True):
2385
+ return synset1.path_similarity(
2386
+ synset2, verbose=verbose, simulate_root=simulate_root
2387
+ )
2388
+
2389
+
2390
+ def lch_similarity(synset1, synset2, verbose=False, simulate_root=True):
2391
+ return synset1.lch_similarity(synset2, verbose=verbose, simulate_root=simulate_root)
2392
+
2393
+
2394
+ def wup_similarity(synset1, synset2, verbose=False, simulate_root=True):
2395
+ return synset1.wup_similarity(synset2, verbose=verbose, simulate_root=simulate_root)
2396
+
2397
+
2398
+ def res_similarity(synset1, synset2, ic, verbose=False):
2399
+ return synset1.res_similarity(synset2, ic, verbose=verbose)
2400
+
2401
+
2402
+ def jcn_similarity(synset1, synset2, ic, verbose=False):
2403
+ return synset1.jcn_similarity(synset2, ic, verbose=verbose)
2404
+
2405
+
2406
+ def lin_similarity(synset1, synset2, ic, verbose=False):
2407
+ return synset1.lin_similarity(synset2, ic, verbose=verbose)
2408
+
2409
+
2410
+ path_similarity.__doc__ = Synset.path_similarity.__doc__
2411
+ lch_similarity.__doc__ = Synset.lch_similarity.__doc__
2412
+ wup_similarity.__doc__ = Synset.wup_similarity.__doc__
2413
+ res_similarity.__doc__ = Synset.res_similarity.__doc__
2414
+ jcn_similarity.__doc__ = Synset.jcn_similarity.__doc__
2415
+ lin_similarity.__doc__ = Synset.lin_similarity.__doc__
2416
+
2417
+
2418
+ def _lcs_ic(synset1, synset2, ic, verbose=False):
2419
+ """
2420
+ Get the information content of the least common subsumer that has
2421
+ the highest information content value. If two nodes have no
2422
+ explicit common subsumer, assume that they share an artificial
2423
+ root node that is the hypernym of all explicit roots.
2424
+
2425
+ :type synset1: Synset
2426
+ :param synset1: First input synset.
2427
+ :type synset2: Synset
2428
+ :param synset2: Second input synset. Must be the same part of
2429
+ speech as the first synset.
2430
+ :type ic: dict
2431
+ :param ic: an information content object (as returned by ``load_ic()``).
2432
+ :return: The information content of the two synsets and their most
2433
+ informative subsumer
2434
+ """
2435
+ if synset1._pos != synset2._pos:
2436
+ raise WordNetError(
2437
+ "Computing the least common subsumer requires "
2438
+ "%s and %s to have the same part of speech." % (synset1, synset2)
2439
+ )
2440
+
2441
+ ic1 = information_content(synset1, ic)
2442
+ ic2 = information_content(synset2, ic)
2443
+ subsumers = synset1.common_hypernyms(synset2)
2444
+ if len(subsumers) == 0:
2445
+ subsumer_ic = 0
2446
+ else:
2447
+ subsumer_ic = max(information_content(s, ic) for s in subsumers)
2448
+
2449
+ if verbose:
2450
+ print("> LCS Subsumer by content:", subsumer_ic)
2451
+
2452
+ return ic1, ic2, subsumer_ic
2453
+
2454
+
2455
+ # Utility functions
2456
+
2457
+
2458
+ def information_content(synset, ic):
2459
+ pos = synset._pos
2460
+ if pos == ADJ_SAT:
2461
+ pos = ADJ
2462
+ try:
2463
+ icpos = ic[pos]
2464
+ except KeyError as e:
2465
+ msg = "Information content file has no entries for part-of-speech: %s"
2466
+ raise WordNetError(msg % pos) from e
2467
+
2468
+ counts = icpos[synset._offset]
2469
+ if counts == 0:
2470
+ return _INF
2471
+ else:
2472
+ return -math.log(counts / icpos[0])
2473
+
2474
+
2475
+ # get the part of speech (NOUN or VERB) from the information content record
2476
+ # (each identifier has a 'n' or 'v' suffix)
2477
+
2478
+
2479
+ def _get_pos(field):
2480
+ if field[-1] == "n":
2481
+ return NOUN
2482
+ elif field[-1] == "v":
2483
+ return VERB
2484
+ else:
2485
+ msg = (
2486
+ "Unidentified part of speech in WordNet Information Content file "
2487
+ "for field %s" % field
2488
+ )
2489
+ raise ValueError(msg)