applied-ai-018 commited on
Commit
c926b23
·
verified ·
1 Parent(s): de33670

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step40/zero/18.attention.dense.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step40/zero/18.attention.query_key_value.weight/exp_avg.pt +3 -0
  3. ckpts/universal/global_step40/zero/18.attention.query_key_value.weight/fp32.pt +3 -0
  4. ckpts/universal/global_step40/zero/21.attention.dense.weight/exp_avg_sq.pt +3 -0
  5. ckpts/universal/global_step40/zero/21.attention.dense.weight/fp32.pt +3 -0
  6. ckpts/universal/global_step40/zero/4.post_attention_layernorm.weight/exp_avg_sq.pt +3 -0
  7. venv/lib/python3.10/site-packages/nltk/__pycache__/compat.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/nltk/__pycache__/decorators.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/nltk/__pycache__/langnames.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/nltk/__pycache__/tgrep.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/nltk/__pycache__/treetransforms.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/nltk/corpus/__init__.py +529 -0
  13. venv/lib/python3.10/site-packages/nltk/corpus/__pycache__/__init__.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/nltk/corpus/__pycache__/europarl_raw.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/nltk/corpus/__pycache__/util.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/nltk/corpus/europarl_raw.py +56 -0
  17. venv/lib/python3.10/site-packages/nltk/corpus/reader/api.py +516 -0
  18. venv/lib/python3.10/site-packages/nltk/corpus/reader/bcp47.py +218 -0
  19. venv/lib/python3.10/site-packages/nltk/corpus/reader/bnc.py +265 -0
  20. venv/lib/python3.10/site-packages/nltk/corpus/reader/chasen.py +158 -0
  21. venv/lib/python3.10/site-packages/nltk/corpus/reader/childes.py +630 -0
  22. venv/lib/python3.10/site-packages/nltk/corpus/reader/chunked.py +273 -0
  23. venv/lib/python3.10/site-packages/nltk/corpus/reader/cmudict.py +88 -0
  24. venv/lib/python3.10/site-packages/nltk/corpus/reader/comparative_sents.py +309 -0
  25. venv/lib/python3.10/site-packages/nltk/corpus/reader/dependency.py +115 -0
  26. venv/lib/python3.10/site-packages/nltk/corpus/reader/indian.py +93 -0
  27. venv/lib/python3.10/site-packages/nltk/corpus/reader/knbc.py +188 -0
  28. venv/lib/python3.10/site-packages/nltk/corpus/reader/lin.py +183 -0
  29. venv/lib/python3.10/site-packages/nltk/corpus/reader/nps_chat.py +90 -0
  30. venv/lib/python3.10/site-packages/nltk/corpus/reader/rte.py +146 -0
  31. venv/lib/python3.10/site-packages/nltk/corpus/reader/semcor.py +296 -0
  32. venv/lib/python3.10/site-packages/nltk/corpus/reader/sentiwordnet.py +136 -0
  33. venv/lib/python3.10/site-packages/nltk/corpus/reader/sinica_treebank.py +75 -0
  34. venv/lib/python3.10/site-packages/nltk/corpus/reader/switchboard.py +125 -0
  35. venv/lib/python3.10/site-packages/nltk/corpus/reader/timit.py +510 -0
  36. venv/lib/python3.10/site-packages/nltk/corpus/reader/toolbox.py +76 -0
  37. venv/lib/python3.10/site-packages/nltk/corpus/reader/twitter.py +136 -0
  38. venv/lib/python3.10/site-packages/nltk/corpus/reader/wordnet.py +2489 -0
  39. venv/lib/python3.10/site-packages/nltk/corpus/reader/ycoe.py +256 -0
  40. venv/lib/python3.10/site-packages/nltk/corpus/util.py +154 -0
  41. venv/lib/python3.10/site-packages/nltk/tbl/__init__.py +31 -0
  42. venv/lib/python3.10/site-packages/nltk/tbl/__pycache__/__init__.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/nltk/tbl/__pycache__/api.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/nltk/tbl/__pycache__/demo.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/nltk/tbl/__pycache__/erroranalysis.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/nltk/tbl/__pycache__/feature.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/nltk/tbl/__pycache__/rule.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/nltk/tbl/__pycache__/template.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/nltk/tbl/api.py +0 -0
  50. venv/lib/python3.10/site-packages/nltk/tbl/demo.py +418 -0
ckpts/universal/global_step40/zero/18.attention.dense.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee3123e3cc7d1f751d60031d977e500b75fe3aace2d29b6584740b0590abb583
3
+ size 16778396
ckpts/universal/global_step40/zero/18.attention.query_key_value.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af035e319b0da1b229568996e8f7244178c9d79a1419221cf1e52986cc6f3aa2
3
+ size 50332828
ckpts/universal/global_step40/zero/18.attention.query_key_value.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5348b5f910e0d2abd0a0a0289104952d95ac88a00a237232d69e63c299d291dc
3
+ size 50332749
ckpts/universal/global_step40/zero/21.attention.dense.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09ea2cbd80bd1baa7bea719caf7632905115d09c98cfaa654e0a7fce1e7b9ea1
3
+ size 16778411
ckpts/universal/global_step40/zero/21.attention.dense.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fcbf5788ccdba1bac28169fe41079e1cb110bacd42c90655c6247dd45e975cea
3
+ size 16778317
ckpts/universal/global_step40/zero/4.post_attention_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:058194a69d6117cc31775d6b9b1698f71bb01fdee4ec970e1bd0a2e7a8044d42
3
+ size 9387
venv/lib/python3.10/site-packages/nltk/__pycache__/compat.cpython-310.pyc ADDED
Binary file (1.15 kB). View file
 
venv/lib/python3.10/site-packages/nltk/__pycache__/decorators.cpython-310.pyc ADDED
Binary file (6.43 kB). View file
 
venv/lib/python3.10/site-packages/nltk/__pycache__/langnames.cpython-310.pyc ADDED
Binary file (21.2 kB). View file
 
venv/lib/python3.10/site-packages/nltk/__pycache__/tgrep.cpython-310.pyc ADDED
Binary file (33.8 kB). View file
 
venv/lib/python3.10/site-packages/nltk/__pycache__/treetransforms.cpython-310.pyc ADDED
Binary file (5 kB). View file
 
venv/lib/python3.10/site-packages/nltk/corpus/__init__.py ADDED
@@ -0,0 +1,529 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Corpus Readers
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ # TODO this docstring isn't up-to-date!
9
+ """
10
+ NLTK corpus readers. The modules in this package provide functions
11
+ that can be used to read corpus files in a variety of formats. These
12
+ functions can be used to read both the corpus files that are
13
+ distributed in the NLTK corpus package, and corpus files that are part
14
+ of external corpora.
15
+
16
+ Available Corpora
17
+ =================
18
+
19
+ Please see https://www.nltk.org/nltk_data/ for a complete list.
20
+ Install corpora using nltk.download().
21
+
22
+ Corpus Reader Functions
23
+ =======================
24
+ Each corpus module defines one or more "corpus reader functions",
25
+ which can be used to read documents from that corpus. These functions
26
+ take an argument, ``item``, which is used to indicate which document
27
+ should be read from the corpus:
28
+
29
+ - If ``item`` is one of the unique identifiers listed in the corpus
30
+ module's ``items`` variable, then the corresponding document will
31
+ be loaded from the NLTK corpus package.
32
+ - If ``item`` is a filename, then that file will be read.
33
+
34
+ Additionally, corpus reader functions can be given lists of item
35
+ names; in which case, they will return a concatenation of the
36
+ corresponding documents.
37
+
38
+ Corpus reader functions are named based on the type of information
39
+ they return. Some common examples, and their return types, are:
40
+
41
+ - words(): list of str
42
+ - sents(): list of (list of str)
43
+ - paras(): list of (list of (list of str))
44
+ - tagged_words(): list of (str,str) tuple
45
+ - tagged_sents(): list of (list of (str,str))
46
+ - tagged_paras(): list of (list of (list of (str,str)))
47
+ - chunked_sents(): list of (Tree w/ (str,str) leaves)
48
+ - parsed_sents(): list of (Tree with str leaves)
49
+ - parsed_paras(): list of (list of (Tree with str leaves))
50
+ - xml(): A single xml ElementTree
51
+ - raw(): unprocessed corpus contents
52
+
53
+ For example, to read a list of the words in the Brown Corpus, use
54
+ ``nltk.corpus.brown.words()``:
55
+
56
+ >>> from nltk.corpus import brown
57
+ >>> print(", ".join(brown.words())) # doctest: +ELLIPSIS
58
+ The, Fulton, County, Grand, Jury, said, ...
59
+
60
+ """
61
+
62
+ import re
63
+
64
+ from nltk.corpus.reader import *
65
+ from nltk.corpus.util import LazyCorpusLoader
66
+ from nltk.tokenize import RegexpTokenizer
67
+
68
+ abc: PlaintextCorpusReader = LazyCorpusLoader(
69
+ "abc",
70
+ PlaintextCorpusReader,
71
+ r"(?!\.).*\.txt",
72
+ encoding=[("science", "latin_1"), ("rural", "utf8")],
73
+ )
74
+ alpino: AlpinoCorpusReader = LazyCorpusLoader(
75
+ "alpino", AlpinoCorpusReader, tagset="alpino"
76
+ )
77
+ bcp47: BCP47CorpusReader = LazyCorpusLoader(
78
+ "bcp47", BCP47CorpusReader, r"(cldr|iana)/*"
79
+ )
80
+ brown: CategorizedTaggedCorpusReader = LazyCorpusLoader(
81
+ "brown",
82
+ CategorizedTaggedCorpusReader,
83
+ r"c[a-z]\d\d",
84
+ cat_file="cats.txt",
85
+ tagset="brown",
86
+ encoding="ascii",
87
+ )
88
+ cess_cat: BracketParseCorpusReader = LazyCorpusLoader(
89
+ "cess_cat",
90
+ BracketParseCorpusReader,
91
+ r"(?!\.).*\.tbf",
92
+ tagset="unknown",
93
+ encoding="ISO-8859-15",
94
+ )
95
+ cess_esp: BracketParseCorpusReader = LazyCorpusLoader(
96
+ "cess_esp",
97
+ BracketParseCorpusReader,
98
+ r"(?!\.).*\.tbf",
99
+ tagset="unknown",
100
+ encoding="ISO-8859-15",
101
+ )
102
+ cmudict: CMUDictCorpusReader = LazyCorpusLoader(
103
+ "cmudict", CMUDictCorpusReader, ["cmudict"]
104
+ )
105
+ comtrans: AlignedCorpusReader = LazyCorpusLoader(
106
+ "comtrans", AlignedCorpusReader, r"(?!\.).*\.txt"
107
+ )
108
+ comparative_sentences: ComparativeSentencesCorpusReader = LazyCorpusLoader(
109
+ "comparative_sentences",
110
+ ComparativeSentencesCorpusReader,
111
+ r"labeledSentences\.txt",
112
+ encoding="latin-1",
113
+ )
114
+ conll2000: ConllChunkCorpusReader = LazyCorpusLoader(
115
+ "conll2000",
116
+ ConllChunkCorpusReader,
117
+ ["train.txt", "test.txt"],
118
+ ("NP", "VP", "PP"),
119
+ tagset="wsj",
120
+ encoding="ascii",
121
+ )
122
+ conll2002: ConllChunkCorpusReader = LazyCorpusLoader(
123
+ "conll2002",
124
+ ConllChunkCorpusReader,
125
+ r".*\.(test|train).*",
126
+ ("LOC", "PER", "ORG", "MISC"),
127
+ encoding="utf-8",
128
+ )
129
+ conll2007: DependencyCorpusReader = LazyCorpusLoader(
130
+ "conll2007",
131
+ DependencyCorpusReader,
132
+ r".*\.(test|train).*",
133
+ encoding=[("eus", "ISO-8859-2"), ("esp", "utf8")],
134
+ )
135
+ crubadan: CrubadanCorpusReader = LazyCorpusLoader(
136
+ "crubadan", CrubadanCorpusReader, r".*\.txt"
137
+ )
138
+ dependency_treebank: DependencyCorpusReader = LazyCorpusLoader(
139
+ "dependency_treebank", DependencyCorpusReader, r".*\.dp", encoding="ascii"
140
+ )
141
+ extended_omw: CorpusReader = LazyCorpusLoader(
142
+ "extended_omw", CorpusReader, r".*/wn-[a-z\-]*\.tab", encoding="utf8"
143
+ )
144
+ floresta: BracketParseCorpusReader = LazyCorpusLoader(
145
+ "floresta",
146
+ BracketParseCorpusReader,
147
+ r"(?!\.).*\.ptb",
148
+ "#",
149
+ tagset="unknown",
150
+ encoding="ISO-8859-15",
151
+ )
152
+ framenet15: FramenetCorpusReader = LazyCorpusLoader(
153
+ "framenet_v15",
154
+ FramenetCorpusReader,
155
+ [
156
+ "frRelation.xml",
157
+ "frameIndex.xml",
158
+ "fulltextIndex.xml",
159
+ "luIndex.xml",
160
+ "semTypes.xml",
161
+ ],
162
+ )
163
+ framenet: FramenetCorpusReader = LazyCorpusLoader(
164
+ "framenet_v17",
165
+ FramenetCorpusReader,
166
+ [
167
+ "frRelation.xml",
168
+ "frameIndex.xml",
169
+ "fulltextIndex.xml",
170
+ "luIndex.xml",
171
+ "semTypes.xml",
172
+ ],
173
+ )
174
+ gazetteers: WordListCorpusReader = LazyCorpusLoader(
175
+ "gazetteers", WordListCorpusReader, r"(?!LICENSE|\.).*\.txt", encoding="ISO-8859-2"
176
+ )
177
+ genesis: PlaintextCorpusReader = LazyCorpusLoader(
178
+ "genesis",
179
+ PlaintextCorpusReader,
180
+ r"(?!\.).*\.txt",
181
+ encoding=[
182
+ ("finnish|french|german", "latin_1"),
183
+ ("swedish", "cp865"),
184
+ (".*", "utf_8"),
185
+ ],
186
+ )
187
+ gutenberg: PlaintextCorpusReader = LazyCorpusLoader(
188
+ "gutenberg", PlaintextCorpusReader, r"(?!\.).*\.txt", encoding="latin1"
189
+ )
190
+ ieer: IEERCorpusReader = LazyCorpusLoader("ieer", IEERCorpusReader, r"(?!README|\.).*")
191
+ inaugural: PlaintextCorpusReader = LazyCorpusLoader(
192
+ "inaugural", PlaintextCorpusReader, r"(?!\.).*\.txt", encoding="latin1"
193
+ )
194
+ # [XX] This should probably just use TaggedCorpusReader:
195
+ indian: IndianCorpusReader = LazyCorpusLoader(
196
+ "indian", IndianCorpusReader, r"(?!\.).*\.pos", tagset="unknown", encoding="utf8"
197
+ )
198
+
199
+ jeita: ChasenCorpusReader = LazyCorpusLoader(
200
+ "jeita", ChasenCorpusReader, r".*\.chasen", encoding="utf-8"
201
+ )
202
+ knbc: KNBCorpusReader = LazyCorpusLoader(
203
+ "knbc/corpus1", KNBCorpusReader, r".*/KN.*", encoding="euc-jp"
204
+ )
205
+ lin_thesaurus: LinThesaurusCorpusReader = LazyCorpusLoader(
206
+ "lin_thesaurus", LinThesaurusCorpusReader, r".*\.lsp"
207
+ )
208
+ mac_morpho: MacMorphoCorpusReader = LazyCorpusLoader(
209
+ "mac_morpho",
210
+ MacMorphoCorpusReader,
211
+ r"(?!\.).*\.txt",
212
+ tagset="unknown",
213
+ encoding="latin-1",
214
+ )
215
+ machado: PortugueseCategorizedPlaintextCorpusReader = LazyCorpusLoader(
216
+ "machado",
217
+ PortugueseCategorizedPlaintextCorpusReader,
218
+ r"(?!\.).*\.txt",
219
+ cat_pattern=r"([a-z]*)/.*",
220
+ encoding="latin-1",
221
+ )
222
+ masc_tagged: CategorizedTaggedCorpusReader = LazyCorpusLoader(
223
+ "masc_tagged",
224
+ CategorizedTaggedCorpusReader,
225
+ r"(spoken|written)/.*\.txt",
226
+ cat_file="categories.txt",
227
+ tagset="wsj",
228
+ encoding="utf-8",
229
+ sep="_",
230
+ )
231
+ movie_reviews: CategorizedPlaintextCorpusReader = LazyCorpusLoader(
232
+ "movie_reviews",
233
+ CategorizedPlaintextCorpusReader,
234
+ r"(?!\.).*\.txt",
235
+ cat_pattern=r"(neg|pos)/.*",
236
+ encoding="ascii",
237
+ )
238
+ multext_east: MTECorpusReader = LazyCorpusLoader(
239
+ "mte_teip5", MTECorpusReader, r"(oana).*\.xml", encoding="utf-8"
240
+ )
241
+ names: WordListCorpusReader = LazyCorpusLoader(
242
+ "names", WordListCorpusReader, r"(?!\.).*\.txt", encoding="ascii"
243
+ )
244
+ nps_chat: NPSChatCorpusReader = LazyCorpusLoader(
245
+ "nps_chat", NPSChatCorpusReader, r"(?!README|\.).*\.xml", tagset="wsj"
246
+ )
247
+ opinion_lexicon: OpinionLexiconCorpusReader = LazyCorpusLoader(
248
+ "opinion_lexicon",
249
+ OpinionLexiconCorpusReader,
250
+ r"(\w+)\-words\.txt",
251
+ encoding="ISO-8859-2",
252
+ )
253
+ ppattach: PPAttachmentCorpusReader = LazyCorpusLoader(
254
+ "ppattach", PPAttachmentCorpusReader, ["training", "test", "devset"]
255
+ )
256
+ product_reviews_1: ReviewsCorpusReader = LazyCorpusLoader(
257
+ "product_reviews_1", ReviewsCorpusReader, r"^(?!Readme).*\.txt", encoding="utf8"
258
+ )
259
+ product_reviews_2: ReviewsCorpusReader = LazyCorpusLoader(
260
+ "product_reviews_2", ReviewsCorpusReader, r"^(?!Readme).*\.txt", encoding="utf8"
261
+ )
262
+ pros_cons: ProsConsCorpusReader = LazyCorpusLoader(
263
+ "pros_cons",
264
+ ProsConsCorpusReader,
265
+ r"Integrated(Cons|Pros)\.txt",
266
+ cat_pattern=r"Integrated(Cons|Pros)\.txt",
267
+ encoding="ISO-8859-2",
268
+ )
269
+ ptb: CategorizedBracketParseCorpusReader = (
270
+ LazyCorpusLoader( # Penn Treebank v3: WSJ and Brown portions
271
+ "ptb",
272
+ CategorizedBracketParseCorpusReader,
273
+ r"(WSJ/\d\d/WSJ_\d\d|BROWN/C[A-Z]/C[A-Z])\d\d.MRG",
274
+ cat_file="allcats.txt",
275
+ tagset="wsj",
276
+ )
277
+ )
278
+ qc: StringCategoryCorpusReader = LazyCorpusLoader(
279
+ "qc", StringCategoryCorpusReader, ["train.txt", "test.txt"], encoding="ISO-8859-2"
280
+ )
281
+ reuters: CategorizedPlaintextCorpusReader = LazyCorpusLoader(
282
+ "reuters",
283
+ CategorizedPlaintextCorpusReader,
284
+ "(training|test).*",
285
+ cat_file="cats.txt",
286
+ encoding="ISO-8859-2",
287
+ )
288
+ rte: RTECorpusReader = LazyCorpusLoader("rte", RTECorpusReader, r"(?!\.).*\.xml")
289
+ senseval: SensevalCorpusReader = LazyCorpusLoader(
290
+ "senseval", SensevalCorpusReader, r"(?!\.).*\.pos"
291
+ )
292
+ sentence_polarity: CategorizedSentencesCorpusReader = LazyCorpusLoader(
293
+ "sentence_polarity",
294
+ CategorizedSentencesCorpusReader,
295
+ r"rt-polarity\.(neg|pos)",
296
+ cat_pattern=r"rt-polarity\.(neg|pos)",
297
+ encoding="utf-8",
298
+ )
299
+ sentiwordnet: SentiWordNetCorpusReader = LazyCorpusLoader(
300
+ "sentiwordnet", SentiWordNetCorpusReader, "SentiWordNet_3.0.0.txt", encoding="utf-8"
301
+ )
302
+ shakespeare: XMLCorpusReader = LazyCorpusLoader(
303
+ "shakespeare", XMLCorpusReader, r"(?!\.).*\.xml"
304
+ )
305
+ sinica_treebank: SinicaTreebankCorpusReader = LazyCorpusLoader(
306
+ "sinica_treebank",
307
+ SinicaTreebankCorpusReader,
308
+ ["parsed"],
309
+ tagset="unknown",
310
+ encoding="utf-8",
311
+ )
312
+ state_union: PlaintextCorpusReader = LazyCorpusLoader(
313
+ "state_union", PlaintextCorpusReader, r"(?!\.).*\.txt", encoding="ISO-8859-2"
314
+ )
315
+ stopwords: WordListCorpusReader = LazyCorpusLoader(
316
+ "stopwords", WordListCorpusReader, r"(?!README|\.).*", encoding="utf8"
317
+ )
318
+ subjectivity: CategorizedSentencesCorpusReader = LazyCorpusLoader(
319
+ "subjectivity",
320
+ CategorizedSentencesCorpusReader,
321
+ r"(quote.tok.gt9|plot.tok.gt9)\.5000",
322
+ cat_map={"quote.tok.gt9.5000": ["subj"], "plot.tok.gt9.5000": ["obj"]},
323
+ encoding="latin-1",
324
+ )
325
+ swadesh: SwadeshCorpusReader = LazyCorpusLoader(
326
+ "swadesh", SwadeshCorpusReader, r"(?!README|\.).*", encoding="utf8"
327
+ )
328
+ swadesh110: PanlexSwadeshCorpusReader = LazyCorpusLoader(
329
+ "panlex_swadesh", PanlexSwadeshCorpusReader, r"swadesh110/.*\.txt", encoding="utf8"
330
+ )
331
+ swadesh207: PanlexSwadeshCorpusReader = LazyCorpusLoader(
332
+ "panlex_swadesh", PanlexSwadeshCorpusReader, r"swadesh207/.*\.txt", encoding="utf8"
333
+ )
334
+ switchboard: SwitchboardCorpusReader = LazyCorpusLoader(
335
+ "switchboard", SwitchboardCorpusReader, tagset="wsj"
336
+ )
337
+ timit: TimitCorpusReader = LazyCorpusLoader("timit", TimitCorpusReader)
338
+ timit_tagged: TimitTaggedCorpusReader = LazyCorpusLoader(
339
+ "timit", TimitTaggedCorpusReader, r".+\.tags", tagset="wsj", encoding="ascii"
340
+ )
341
+ toolbox: ToolboxCorpusReader = LazyCorpusLoader(
342
+ "toolbox", ToolboxCorpusReader, r"(?!.*(README|\.)).*\.(dic|txt)"
343
+ )
344
+ treebank: BracketParseCorpusReader = LazyCorpusLoader(
345
+ "treebank/combined",
346
+ BracketParseCorpusReader,
347
+ r"wsj_.*\.mrg",
348
+ tagset="wsj",
349
+ encoding="ascii",
350
+ )
351
+ treebank_chunk: ChunkedCorpusReader = LazyCorpusLoader(
352
+ "treebank/tagged",
353
+ ChunkedCorpusReader,
354
+ r"wsj_.*\.pos",
355
+ sent_tokenizer=RegexpTokenizer(r"(?<=/\.)\s*(?![^\[]*\])", gaps=True),
356
+ para_block_reader=tagged_treebank_para_block_reader,
357
+ tagset="wsj",
358
+ encoding="ascii",
359
+ )
360
+ treebank_raw: PlaintextCorpusReader = LazyCorpusLoader(
361
+ "treebank/raw", PlaintextCorpusReader, r"wsj_.*", encoding="ISO-8859-2"
362
+ )
363
+ twitter_samples: TwitterCorpusReader = LazyCorpusLoader(
364
+ "twitter_samples", TwitterCorpusReader, r".*\.json"
365
+ )
366
+ udhr: UdhrCorpusReader = LazyCorpusLoader("udhr", UdhrCorpusReader)
367
+ udhr2: PlaintextCorpusReader = LazyCorpusLoader(
368
+ "udhr2", PlaintextCorpusReader, r".*\.txt", encoding="utf8"
369
+ )
370
+ universal_treebanks: ConllCorpusReader = LazyCorpusLoader(
371
+ "universal_treebanks_v20",
372
+ ConllCorpusReader,
373
+ r".*\.conll",
374
+ columntypes=(
375
+ "ignore",
376
+ "words",
377
+ "ignore",
378
+ "ignore",
379
+ "pos",
380
+ "ignore",
381
+ "ignore",
382
+ "ignore",
383
+ "ignore",
384
+ "ignore",
385
+ ),
386
+ )
387
+ verbnet: VerbnetCorpusReader = LazyCorpusLoader(
388
+ "verbnet", VerbnetCorpusReader, r"(?!\.).*\.xml"
389
+ )
390
+ webtext: PlaintextCorpusReader = LazyCorpusLoader(
391
+ "webtext", PlaintextCorpusReader, r"(?!README|\.).*\.txt", encoding="ISO-8859-2"
392
+ )
393
+ wordnet: WordNetCorpusReader = LazyCorpusLoader(
394
+ "wordnet",
395
+ WordNetCorpusReader,
396
+ LazyCorpusLoader("omw-1.4", CorpusReader, r".*/wn-data-.*\.tab", encoding="utf8"),
397
+ )
398
+ wordnet31: WordNetCorpusReader = LazyCorpusLoader(
399
+ "wordnet31",
400
+ WordNetCorpusReader,
401
+ LazyCorpusLoader("omw-1.4", CorpusReader, r".*/wn-data-.*\.tab", encoding="utf8"),
402
+ )
403
+ wordnet2021: WordNetCorpusReader = LazyCorpusLoader(
404
+ "wordnet2021",
405
+ WordNetCorpusReader,
406
+ LazyCorpusLoader("omw-1.4", CorpusReader, r".*/wn-data-.*\.tab", encoding="utf8"),
407
+ )
408
+ wordnet_ic: WordNetICCorpusReader = LazyCorpusLoader(
409
+ "wordnet_ic", WordNetICCorpusReader, r".*\.dat"
410
+ )
411
+ words: WordListCorpusReader = LazyCorpusLoader(
412
+ "words", WordListCorpusReader, r"(?!README|\.).*", encoding="ascii"
413
+ )
414
+
415
+ # defined after treebank
416
+ propbank: PropbankCorpusReader = LazyCorpusLoader(
417
+ "propbank",
418
+ PropbankCorpusReader,
419
+ "prop.txt",
420
+ r"frames/.*\.xml",
421
+ "verbs.txt",
422
+ lambda filename: re.sub(r"^wsj/\d\d/", "", filename),
423
+ treebank,
424
+ ) # Must be defined *after* treebank corpus.
425
+ nombank: NombankCorpusReader = LazyCorpusLoader(
426
+ "nombank.1.0",
427
+ NombankCorpusReader,
428
+ "nombank.1.0",
429
+ r"frames/.*\.xml",
430
+ "nombank.1.0.words",
431
+ lambda filename: re.sub(r"^wsj/\d\d/", "", filename),
432
+ treebank,
433
+ ) # Must be defined *after* treebank corpus.
434
+ propbank_ptb: PropbankCorpusReader = LazyCorpusLoader(
435
+ "propbank",
436
+ PropbankCorpusReader,
437
+ "prop.txt",
438
+ r"frames/.*\.xml",
439
+ "verbs.txt",
440
+ lambda filename: filename.upper(),
441
+ ptb,
442
+ ) # Must be defined *after* ptb corpus.
443
+ nombank_ptb: NombankCorpusReader = LazyCorpusLoader(
444
+ "nombank.1.0",
445
+ NombankCorpusReader,
446
+ "nombank.1.0",
447
+ r"frames/.*\.xml",
448
+ "nombank.1.0.words",
449
+ lambda filename: filename.upper(),
450
+ ptb,
451
+ ) # Must be defined *after* ptb corpus.
452
+ semcor: SemcorCorpusReader = LazyCorpusLoader(
453
+ "semcor", SemcorCorpusReader, r"brown./tagfiles/br-.*\.xml", wordnet
454
+ ) # Must be defined *after* wordnet corpus.
455
+
456
+ nonbreaking_prefixes: NonbreakingPrefixesCorpusReader = LazyCorpusLoader(
457
+ "nonbreaking_prefixes",
458
+ NonbreakingPrefixesCorpusReader,
459
+ r"(?!README|\.).*",
460
+ encoding="utf8",
461
+ )
462
+ perluniprops: UnicharsCorpusReader = LazyCorpusLoader(
463
+ "perluniprops",
464
+ UnicharsCorpusReader,
465
+ r"(?!README|\.).*",
466
+ nltk_data_subdir="misc",
467
+ encoding="utf8",
468
+ )
469
+
470
+ # mwa_ppdb = LazyCorpusLoader(
471
+ # 'mwa_ppdb', MWAPPDBCorpusReader, r'(?!README|\.).*', nltk_data_subdir='misc', encoding='utf8')
472
+
473
+ # See https://github.com/nltk/nltk/issues/1579
474
+ # and https://github.com/nltk/nltk/issues/1716
475
+ #
476
+ # pl196x = LazyCorpusLoader(
477
+ # 'pl196x', Pl196xCorpusReader, r'[a-z]-.*\.xml',
478
+ # cat_file='cats.txt', textid_file='textids.txt', encoding='utf8')
479
+ #
480
+ # ipipan = LazyCorpusLoader(
481
+ # 'ipipan', IPIPANCorpusReader, r'(?!\.).*morph\.xml')
482
+ #
483
+ # nkjp = LazyCorpusLoader(
484
+ # 'nkjp', NKJPCorpusReader, r'', encoding='utf8')
485
+ #
486
+ # panlex_lite = LazyCorpusLoader(
487
+ # 'panlex_lite', PanLexLiteCorpusReader)
488
+ #
489
+ # ycoe = LazyCorpusLoader(
490
+ # 'ycoe', YCOECorpusReader)
491
+ #
492
+ # corpus not available with NLTK; these lines caused help(nltk.corpus) to break
493
+ # hebrew_treebank = LazyCorpusLoader(
494
+ # 'hebrew_treebank', BracketParseCorpusReader, r'.*\.txt')
495
+
496
+ # FIXME: override any imported demo from various corpora, see https://github.com/nltk/nltk/issues/2116
497
+ def demo():
498
+ # This is out-of-date:
499
+ abc.demo()
500
+ brown.demo()
501
+ # chat80.demo()
502
+ cmudict.demo()
503
+ conll2000.demo()
504
+ conll2002.demo()
505
+ genesis.demo()
506
+ gutenberg.demo()
507
+ ieer.demo()
508
+ inaugural.demo()
509
+ indian.demo()
510
+ names.demo()
511
+ ppattach.demo()
512
+ senseval.demo()
513
+ shakespeare.demo()
514
+ sinica_treebank.demo()
515
+ state_union.demo()
516
+ stopwords.demo()
517
+ timit.demo()
518
+ toolbox.demo()
519
+ treebank.demo()
520
+ udhr.demo()
521
+ webtext.demo()
522
+ words.demo()
523
+
524
+
525
+ # ycoe.demo()
526
+
527
+ if __name__ == "__main__":
528
+ # demo()
529
+ pass
venv/lib/python3.10/site-packages/nltk/corpus/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (10.3 kB). View file
 
venv/lib/python3.10/site-packages/nltk/corpus/__pycache__/europarl_raw.cpython-310.pyc ADDED
Binary file (1.19 kB). View file
 
venv/lib/python3.10/site-packages/nltk/corpus/__pycache__/util.cpython-310.pyc ADDED
Binary file (4.53 kB). View file
 
venv/lib/python3.10/site-packages/nltk/corpus/europarl_raw.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Europarl Corpus Readers
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Nitin Madnani <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ import re
9
+
10
+ from nltk.corpus.reader import *
11
+ from nltk.corpus.util import LazyCorpusLoader
12
+
13
+ # Create a new corpus reader instance for each European language
14
+ danish: EuroparlCorpusReader = LazyCorpusLoader(
15
+ "europarl_raw/danish", EuroparlCorpusReader, r"ep-.*\.da", encoding="utf-8"
16
+ )
17
+
18
+ dutch: EuroparlCorpusReader = LazyCorpusLoader(
19
+ "europarl_raw/dutch", EuroparlCorpusReader, r"ep-.*\.nl", encoding="utf-8"
20
+ )
21
+
22
+ english: EuroparlCorpusReader = LazyCorpusLoader(
23
+ "europarl_raw/english", EuroparlCorpusReader, r"ep-.*\.en", encoding="utf-8"
24
+ )
25
+
26
+ finnish: EuroparlCorpusReader = LazyCorpusLoader(
27
+ "europarl_raw/finnish", EuroparlCorpusReader, r"ep-.*\.fi", encoding="utf-8"
28
+ )
29
+
30
+ french: EuroparlCorpusReader = LazyCorpusLoader(
31
+ "europarl_raw/french", EuroparlCorpusReader, r"ep-.*\.fr", encoding="utf-8"
32
+ )
33
+
34
+ german: EuroparlCorpusReader = LazyCorpusLoader(
35
+ "europarl_raw/german", EuroparlCorpusReader, r"ep-.*\.de", encoding="utf-8"
36
+ )
37
+
38
+ greek: EuroparlCorpusReader = LazyCorpusLoader(
39
+ "europarl_raw/greek", EuroparlCorpusReader, r"ep-.*\.el", encoding="utf-8"
40
+ )
41
+
42
+ italian: EuroparlCorpusReader = LazyCorpusLoader(
43
+ "europarl_raw/italian", EuroparlCorpusReader, r"ep-.*\.it", encoding="utf-8"
44
+ )
45
+
46
+ portuguese: EuroparlCorpusReader = LazyCorpusLoader(
47
+ "europarl_raw/portuguese", EuroparlCorpusReader, r"ep-.*\.pt", encoding="utf-8"
48
+ )
49
+
50
+ spanish: EuroparlCorpusReader = LazyCorpusLoader(
51
+ "europarl_raw/spanish", EuroparlCorpusReader, r"ep-.*\.es", encoding="utf-8"
52
+ )
53
+
54
+ swedish: EuroparlCorpusReader = LazyCorpusLoader(
55
+ "europarl_raw/swedish", EuroparlCorpusReader, r"ep-.*\.sv", encoding="utf-8"
56
+ )
venv/lib/python3.10/site-packages/nltk/corpus/reader/api.py ADDED
@@ -0,0 +1,516 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: API for Corpus Readers
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ API for corpus readers.
11
+ """
12
+
13
+ import os
14
+ import re
15
+ from collections import defaultdict
16
+ from itertools import chain
17
+
18
+ from nltk.corpus.reader.util import *
19
+ from nltk.data import FileSystemPathPointer, PathPointer, ZipFilePathPointer
20
+
21
+
22
+ class CorpusReader:
23
+ """
24
+ A base class for "corpus reader" classes, each of which can be
25
+ used to read a specific corpus format. Each individual corpus
26
+ reader instance is used to read a specific corpus, consisting of
27
+ one or more files under a common root directory. Each file is
28
+ identified by its ``file identifier``, which is the relative path
29
+ to the file from the root directory.
30
+
31
+ A separate subclass is defined for each corpus format. These
32
+ subclasses define one or more methods that provide 'views' on the
33
+ corpus contents, such as ``words()`` (for a list of words) and
34
+ ``parsed_sents()`` (for a list of parsed sentences). Called with
35
+ no arguments, these methods will return the contents of the entire
36
+ corpus. For most corpora, these methods define one or more
37
+ selection arguments, such as ``fileids`` or ``categories``, which can
38
+ be used to select which portion of the corpus should be returned.
39
+ """
40
+
41
+ def __init__(self, root, fileids, encoding="utf8", tagset=None):
42
+ """
43
+ :type root: PathPointer or str
44
+ :param root: A path pointer identifying the root directory for
45
+ this corpus. If a string is specified, then it will be
46
+ converted to a ``PathPointer`` automatically.
47
+ :param fileids: A list of the files that make up this corpus.
48
+ This list can either be specified explicitly, as a list of
49
+ strings; or implicitly, as a regular expression over file
50
+ paths. The absolute path for each file will be constructed
51
+ by joining the reader's root to each file name.
52
+ :param encoding: The default unicode encoding for the files
53
+ that make up the corpus. The value of ``encoding`` can be any
54
+ of the following:
55
+
56
+ - A string: ``encoding`` is the encoding name for all files.
57
+ - A dictionary: ``encoding[file_id]`` is the encoding
58
+ name for the file whose identifier is ``file_id``. If
59
+ ``file_id`` is not in ``encoding``, then the file
60
+ contents will be processed using non-unicode byte strings.
61
+ - A list: ``encoding`` should be a list of ``(regexp, encoding)``
62
+ tuples. The encoding for a file whose identifier is ``file_id``
63
+ will be the ``encoding`` value for the first tuple whose
64
+ ``regexp`` matches the ``file_id``. If no tuple's ``regexp``
65
+ matches the ``file_id``, the file contents will be processed
66
+ using non-unicode byte strings.
67
+ - None: the file contents of all files will be
68
+ processed using non-unicode byte strings.
69
+ :param tagset: The name of the tagset used by this corpus, to be used
70
+ for normalizing or converting the POS tags returned by the
71
+ ``tagged_...()`` methods.
72
+ """
73
+ # Convert the root to a path pointer, if necessary.
74
+ if isinstance(root, str) and not isinstance(root, PathPointer):
75
+ m = re.match(r"(.*\.zip)/?(.*)$|", root)
76
+ zipfile, zipentry = m.groups()
77
+ if zipfile:
78
+ root = ZipFilePathPointer(zipfile, zipentry)
79
+ else:
80
+ root = FileSystemPathPointer(root)
81
+ elif not isinstance(root, PathPointer):
82
+ raise TypeError("CorpusReader: expected a string or a PathPointer")
83
+
84
+ # If `fileids` is a regexp, then expand it.
85
+ if isinstance(fileids, str):
86
+ fileids = find_corpus_fileids(root, fileids)
87
+
88
+ self._fileids = fileids
89
+ """A list of the relative paths for the fileids that make up
90
+ this corpus."""
91
+
92
+ self._root = root
93
+ """The root directory for this corpus."""
94
+
95
+ self._readme = "README"
96
+ self._license = "LICENSE"
97
+ self._citation = "citation.bib"
98
+
99
+ # If encoding was specified as a list of regexps, then convert
100
+ # it to a dictionary.
101
+ if isinstance(encoding, list):
102
+ encoding_dict = {}
103
+ for fileid in self._fileids:
104
+ for x in encoding:
105
+ (regexp, enc) = x
106
+ if re.match(regexp, fileid):
107
+ encoding_dict[fileid] = enc
108
+ break
109
+ encoding = encoding_dict
110
+
111
+ self._encoding = encoding
112
+ """The default unicode encoding for the fileids that make up
113
+ this corpus. If ``encoding`` is None, then the file
114
+ contents are processed using byte strings."""
115
+ self._tagset = tagset
116
+
117
+ def __repr__(self):
118
+ if isinstance(self._root, ZipFilePathPointer):
119
+ path = f"{self._root.zipfile.filename}/{self._root.entry}"
120
+ else:
121
+ path = "%s" % self._root.path
122
+ return f"<{self.__class__.__name__} in {path!r}>"
123
+
124
+ def ensure_loaded(self):
125
+ """
126
+ Load this corpus (if it has not already been loaded). This is
127
+ used by LazyCorpusLoader as a simple method that can be used to
128
+ make sure a corpus is loaded -- e.g., in case a user wants to
129
+ do help(some_corpus).
130
+ """
131
+ pass # no need to actually do anything.
132
+
133
+ def readme(self):
134
+ """
135
+ Return the contents of the corpus README file, if it exists.
136
+ """
137
+ with self.open(self._readme) as f:
138
+ return f.read()
139
+
140
+ def license(self):
141
+ """
142
+ Return the contents of the corpus LICENSE file, if it exists.
143
+ """
144
+ with self.open(self._license) as f:
145
+ return f.read()
146
+
147
+ def citation(self):
148
+ """
149
+ Return the contents of the corpus citation.bib file, if it exists.
150
+ """
151
+ with self.open(self._citation) as f:
152
+ return f.read()
153
+
154
+ def fileids(self):
155
+ """
156
+ Return a list of file identifiers for the fileids that make up
157
+ this corpus.
158
+ """
159
+ return self._fileids
160
+
161
+ def abspath(self, fileid):
162
+ """
163
+ Return the absolute path for the given file.
164
+
165
+ :type fileid: str
166
+ :param fileid: The file identifier for the file whose path
167
+ should be returned.
168
+ :rtype: PathPointer
169
+ """
170
+ return self._root.join(fileid)
171
+
172
+ def abspaths(self, fileids=None, include_encoding=False, include_fileid=False):
173
+ """
174
+ Return a list of the absolute paths for all fileids in this corpus;
175
+ or for the given list of fileids, if specified.
176
+
177
+ :type fileids: None or str or list
178
+ :param fileids: Specifies the set of fileids for which paths should
179
+ be returned. Can be None, for all fileids; a list of
180
+ file identifiers, for a specified set of fileids; or a single
181
+ file identifier, for a single file. Note that the return
182
+ value is always a list of paths, even if ``fileids`` is a
183
+ single file identifier.
184
+
185
+ :param include_encoding: If true, then return a list of
186
+ ``(path_pointer, encoding)`` tuples.
187
+
188
+ :rtype: list(PathPointer)
189
+ """
190
+ if fileids is None:
191
+ fileids = self._fileids
192
+ elif isinstance(fileids, str):
193
+ fileids = [fileids]
194
+
195
+ paths = [self._root.join(f) for f in fileids]
196
+
197
+ if include_encoding and include_fileid:
198
+ return list(zip(paths, [self.encoding(f) for f in fileids], fileids))
199
+ elif include_fileid:
200
+ return list(zip(paths, fileids))
201
+ elif include_encoding:
202
+ return list(zip(paths, [self.encoding(f) for f in fileids]))
203
+ else:
204
+ return paths
205
+
206
+ def raw(self, fileids=None):
207
+ """
208
+ :param fileids: A list specifying the fileids that should be used.
209
+ :return: the given file(s) as a single string.
210
+ :rtype: str
211
+ """
212
+ if fileids is None:
213
+ fileids = self._fileids
214
+ elif isinstance(fileids, str):
215
+ fileids = [fileids]
216
+ contents = []
217
+ for f in fileids:
218
+ with self.open(f) as fp:
219
+ contents.append(fp.read())
220
+ return concat(contents)
221
+
222
+ def open(self, file):
223
+ """
224
+ Return an open stream that can be used to read the given file.
225
+ If the file's encoding is not None, then the stream will
226
+ automatically decode the file's contents into unicode.
227
+
228
+ :param file: The file identifier of the file to read.
229
+ """
230
+ encoding = self.encoding(file)
231
+ stream = self._root.join(file).open(encoding)
232
+ return stream
233
+
234
+ def encoding(self, file):
235
+ """
236
+ Return the unicode encoding for the given corpus file, if known.
237
+ If the encoding is unknown, or if the given file should be
238
+ processed using byte strings (str), then return None.
239
+ """
240
+ if isinstance(self._encoding, dict):
241
+ return self._encoding.get(file)
242
+ else:
243
+ return self._encoding
244
+
245
+ def _get_root(self):
246
+ return self._root
247
+
248
+ root = property(
249
+ _get_root,
250
+ doc="""
251
+ The directory where this corpus is stored.
252
+
253
+ :type: PathPointer""",
254
+ )
255
+
256
+
257
+ ######################################################################
258
+ # { Corpora containing categorized items
259
+ ######################################################################
260
+
261
+
262
+ class CategorizedCorpusReader:
263
+ """
264
+ A mixin class used to aid in the implementation of corpus readers
265
+ for categorized corpora. This class defines the method
266
+ ``categories()``, which returns a list of the categories for the
267
+ corpus or for a specified set of fileids; and overrides ``fileids()``
268
+ to take a ``categories`` argument, restricting the set of fileids to
269
+ be returned.
270
+
271
+ Subclasses are expected to:
272
+
273
+ - Call ``__init__()`` to set up the mapping.
274
+
275
+ - Override all view methods to accept a ``categories`` parameter,
276
+ which can be used *instead* of the ``fileids`` parameter, to
277
+ select which fileids should be included in the returned view.
278
+ """
279
+
280
+ def __init__(self, kwargs):
281
+ """
282
+ Initialize this mapping based on keyword arguments, as
283
+ follows:
284
+
285
+ - cat_pattern: A regular expression pattern used to find the
286
+ category for each file identifier. The pattern will be
287
+ applied to each file identifier, and the first matching
288
+ group will be used as the category label for that file.
289
+
290
+ - cat_map: A dictionary, mapping from file identifiers to
291
+ category labels.
292
+
293
+ - cat_file: The name of a file that contains the mapping
294
+ from file identifiers to categories. The argument
295
+ ``cat_delimiter`` can be used to specify a delimiter.
296
+
297
+ The corresponding argument will be deleted from ``kwargs``. If
298
+ more than one argument is specified, an exception will be
299
+ raised.
300
+ """
301
+ self._f2c = None #: file-to-category mapping
302
+ self._c2f = None #: category-to-file mapping
303
+
304
+ self._pattern = None #: regexp specifying the mapping
305
+ self._map = None #: dict specifying the mapping
306
+ self._file = None #: fileid of file containing the mapping
307
+ self._delimiter = None #: delimiter for ``self._file``
308
+
309
+ if "cat_pattern" in kwargs:
310
+ self._pattern = kwargs["cat_pattern"]
311
+ del kwargs["cat_pattern"]
312
+ elif "cat_map" in kwargs:
313
+ self._map = kwargs["cat_map"]
314
+ del kwargs["cat_map"]
315
+ elif "cat_file" in kwargs:
316
+ self._file = kwargs["cat_file"]
317
+ del kwargs["cat_file"]
318
+ if "cat_delimiter" in kwargs:
319
+ self._delimiter = kwargs["cat_delimiter"]
320
+ del kwargs["cat_delimiter"]
321
+ else:
322
+ raise ValueError(
323
+ "Expected keyword argument cat_pattern or " "cat_map or cat_file."
324
+ )
325
+
326
+ if "cat_pattern" in kwargs or "cat_map" in kwargs or "cat_file" in kwargs:
327
+ raise ValueError(
328
+ "Specify exactly one of: cat_pattern, " "cat_map, cat_file."
329
+ )
330
+
331
+ def _init(self):
332
+ self._f2c = defaultdict(set)
333
+ self._c2f = defaultdict(set)
334
+
335
+ if self._pattern is not None:
336
+ for file_id in self._fileids:
337
+ category = re.match(self._pattern, file_id).group(1)
338
+ self._add(file_id, category)
339
+
340
+ elif self._map is not None:
341
+ for (file_id, categories) in self._map.items():
342
+ for category in categories:
343
+ self._add(file_id, category)
344
+
345
+ elif self._file is not None:
346
+ with self.open(self._file) as f:
347
+ for line in f.readlines():
348
+ line = line.strip()
349
+ file_id, categories = line.split(self._delimiter, 1)
350
+ if file_id not in self.fileids():
351
+ raise ValueError(
352
+ "In category mapping file %s: %s "
353
+ "not found" % (self._file, file_id)
354
+ )
355
+ for category in categories.split(self._delimiter):
356
+ self._add(file_id, category)
357
+
358
+ def _add(self, file_id, category):
359
+ self._f2c[file_id].add(category)
360
+ self._c2f[category].add(file_id)
361
+
362
+ def categories(self, fileids=None):
363
+ """
364
+ Return a list of the categories that are defined for this corpus,
365
+ or for the file(s) if it is given.
366
+ """
367
+ if self._f2c is None:
368
+ self._init()
369
+ if fileids is None:
370
+ return sorted(self._c2f)
371
+ if isinstance(fileids, str):
372
+ fileids = [fileids]
373
+ return sorted(set.union(*(self._f2c[d] for d in fileids)))
374
+
375
+ def fileids(self, categories=None):
376
+ """
377
+ Return a list of file identifiers for the files that make up
378
+ this corpus, or that make up the given category(s) if specified.
379
+ """
380
+ if categories is None:
381
+ return super().fileids()
382
+ elif isinstance(categories, str):
383
+ if self._f2c is None:
384
+ self._init()
385
+ if categories in self._c2f:
386
+ return sorted(self._c2f[categories])
387
+ else:
388
+ raise ValueError("Category %s not found" % categories)
389
+ else:
390
+ if self._f2c is None:
391
+ self._init()
392
+ return sorted(set.union(*(self._c2f[c] for c in categories)))
393
+
394
+ def _resolve(self, fileids, categories):
395
+ if fileids is not None and categories is not None:
396
+ raise ValueError("Specify fileids or categories, not both")
397
+ if categories is not None:
398
+ return self.fileids(categories)
399
+ else:
400
+ return fileids
401
+
402
+ def raw(self, fileids=None, categories=None):
403
+ return super().raw(self._resolve(fileids, categories))
404
+
405
+ def words(self, fileids=None, categories=None):
406
+ return super().words(self._resolve(fileids, categories))
407
+
408
+ def sents(self, fileids=None, categories=None):
409
+ return super().sents(self._resolve(fileids, categories))
410
+
411
+ def paras(self, fileids=None, categories=None):
412
+ return super().paras(self._resolve(fileids, categories))
413
+
414
+
415
+ ######################################################################
416
+ # { Treebank readers
417
+ ######################################################################
418
+
419
+ # [xx] is it worth it to factor this out?
420
+ class SyntaxCorpusReader(CorpusReader):
421
+ """
422
+ An abstract base class for reading corpora consisting of
423
+ syntactically parsed text. Subclasses should define:
424
+
425
+ - ``__init__``, which specifies the location of the corpus
426
+ and a method for detecting the sentence blocks in corpus files.
427
+ - ``_read_block``, which reads a block from the input stream.
428
+ - ``_word``, which takes a block and returns a list of list of words.
429
+ - ``_tag``, which takes a block and returns a list of list of tagged
430
+ words.
431
+ - ``_parse``, which takes a block and returns a list of parsed
432
+ sentences.
433
+ """
434
+
435
+ def _parse(self, s):
436
+ raise NotImplementedError()
437
+
438
+ def _word(self, s):
439
+ raise NotImplementedError()
440
+
441
+ def _tag(self, s):
442
+ raise NotImplementedError()
443
+
444
+ def _read_block(self, stream):
445
+ raise NotImplementedError()
446
+
447
+ def parsed_sents(self, fileids=None):
448
+ reader = self._read_parsed_sent_block
449
+ return concat(
450
+ [
451
+ StreamBackedCorpusView(fileid, reader, encoding=enc)
452
+ for fileid, enc in self.abspaths(fileids, True)
453
+ ]
454
+ )
455
+
456
+ def tagged_sents(self, fileids=None, tagset=None):
457
+ def reader(stream):
458
+ return self._read_tagged_sent_block(stream, tagset)
459
+
460
+ return concat(
461
+ [
462
+ StreamBackedCorpusView(fileid, reader, encoding=enc)
463
+ for fileid, enc in self.abspaths(fileids, True)
464
+ ]
465
+ )
466
+
467
+ def sents(self, fileids=None):
468
+ reader = self._read_sent_block
469
+ return concat(
470
+ [
471
+ StreamBackedCorpusView(fileid, reader, encoding=enc)
472
+ for fileid, enc in self.abspaths(fileids, True)
473
+ ]
474
+ )
475
+
476
+ def tagged_words(self, fileids=None, tagset=None):
477
+ def reader(stream):
478
+ return self._read_tagged_word_block(stream, tagset)
479
+
480
+ return concat(
481
+ [
482
+ StreamBackedCorpusView(fileid, reader, encoding=enc)
483
+ for fileid, enc in self.abspaths(fileids, True)
484
+ ]
485
+ )
486
+
487
+ def words(self, fileids=None):
488
+ return concat(
489
+ [
490
+ StreamBackedCorpusView(fileid, self._read_word_block, encoding=enc)
491
+ for fileid, enc in self.abspaths(fileids, True)
492
+ ]
493
+ )
494
+
495
+ # ------------------------------------------------------------
496
+ # { Block Readers
497
+
498
+ def _read_word_block(self, stream):
499
+ return list(chain.from_iterable(self._read_sent_block(stream)))
500
+
501
+ def _read_tagged_word_block(self, stream, tagset=None):
502
+ return list(chain.from_iterable(self._read_tagged_sent_block(stream, tagset)))
503
+
504
+ def _read_sent_block(self, stream):
505
+ return list(filter(None, [self._word(t) for t in self._read_block(stream)]))
506
+
507
+ def _read_tagged_sent_block(self, stream, tagset=None):
508
+ return list(
509
+ filter(None, [self._tag(t, tagset) for t in self._read_block(stream)])
510
+ )
511
+
512
+ def _read_parsed_sent_block(self, stream):
513
+ return list(filter(None, [self._parse(t) for t in self._read_block(stream)]))
514
+
515
+ # } End of Block Readers
516
+ # ------------------------------------------------------------
venv/lib/python3.10/site-packages/nltk/corpus/reader/bcp47.py ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: BCP-47 language tags
2
+ #
3
+ # Copyright (C) 2022-2023 NLTK Project
4
+ # Author: Eric Kafe <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ import re
9
+ from warnings import warn
10
+ from xml.etree import ElementTree as et
11
+
12
+ from nltk.corpus.reader import CorpusReader
13
+
14
+
15
+ class BCP47CorpusReader(CorpusReader):
16
+ """
17
+ Parse BCP-47 composite language tags
18
+
19
+ Supports all the main subtags, and the 'u-sd' extension:
20
+
21
+ >>> from nltk.corpus import bcp47
22
+ >>> bcp47.name('oc-gascon-u-sd-fr64')
23
+ 'Occitan (post 1500): Gascon: Pyrénées-Atlantiques'
24
+
25
+ Can load a conversion table to Wikidata Q-codes:
26
+ >>> bcp47.load_wiki_q()
27
+ >>> bcp47.wiki_q['en-GI-spanglis']
28
+ 'Q79388'
29
+
30
+ """
31
+
32
+ def __init__(self, root, fileids):
33
+ """Read the BCP-47 database"""
34
+ super().__init__(root, fileids)
35
+ self.langcode = {}
36
+ with self.open("iana/language-subtag-registry.txt") as fp:
37
+ self.db = self.data_dict(fp.read().split("%%\n"))
38
+ with self.open("cldr/common-subdivisions-en.xml") as fp:
39
+ self.subdiv = self.subdiv_dict(
40
+ et.parse(fp).iterfind("localeDisplayNames/subdivisions/subdivision")
41
+ )
42
+ self.morphology()
43
+
44
+ def load_wiki_q(self):
45
+ """Load conversion table to Wikidata Q-codes (only if needed)"""
46
+ with self.open("cldr/tools-cldr-rdf-external-entityToCode.tsv") as fp:
47
+ self.wiki_q = self.wiki_dict(fp.read().strip().split("\n")[1:])
48
+
49
+ def wiki_dict(self, lines):
50
+ """Convert Wikidata list of Q-codes to a BCP-47 dictionary"""
51
+ return {
52
+ pair[1]: pair[0].split("/")[-1]
53
+ for pair in [line.strip().split("\t") for line in lines]
54
+ }
55
+
56
+ def subdiv_dict(self, subdivs):
57
+ """Convert the CLDR subdivisions list to a dictionary"""
58
+ return {sub.attrib["type"]: sub.text for sub in subdivs}
59
+
60
+ def morphology(self):
61
+ self.casing = {
62
+ "language": str.lower,
63
+ "extlang": str.lower,
64
+ "script": str.title,
65
+ "region": str.upper,
66
+ "variant": str.lower,
67
+ }
68
+ dig = "[0-9]"
69
+ low = "[a-z]"
70
+ up = "[A-Z]"
71
+ alnum = "[a-zA-Z0-9]"
72
+ self.format = {
73
+ "language": re.compile(f"{low*3}?"),
74
+ "extlang": re.compile(f"{low*3}"),
75
+ "script": re.compile(f"{up}{low*3}"),
76
+ "region": re.compile(f"({up*2})|({dig*3})"),
77
+ "variant": re.compile(f"{alnum*4}{(alnum+'?')*4}"),
78
+ "singleton": re.compile(f"{low}"),
79
+ }
80
+
81
+ def data_dict(self, records):
82
+ """Convert the BCP-47 language subtag registry to a dictionary"""
83
+ self.version = records[0].replace("File-Date:", "").strip()
84
+ dic = {}
85
+ dic["deprecated"] = {}
86
+ for label in [
87
+ "language",
88
+ "extlang",
89
+ "script",
90
+ "region",
91
+ "variant",
92
+ "redundant",
93
+ "grandfathered",
94
+ ]:
95
+ dic["deprecated"][label] = {}
96
+ for record in records[1:]:
97
+ fields = [field.split(": ") for field in record.strip().split("\n")]
98
+ typ = fields[0][1]
99
+ tag = fields[1][1]
100
+ if typ not in dic:
101
+ dic[typ] = {}
102
+ subfields = {}
103
+ for field in fields[2:]:
104
+ if len(field) == 2:
105
+ [key, val] = field
106
+ if key not in subfields:
107
+ subfields[key] = [val]
108
+ else: # multiple value
109
+ subfields[key].append(val)
110
+ else: # multiline field
111
+ subfields[key][-1] += " " + field[0].strip()
112
+ if (
113
+ "Deprecated" not in record
114
+ and typ == "language"
115
+ and key == "Description"
116
+ ):
117
+ self.langcode[subfields[key][-1]] = tag
118
+ for key in subfields:
119
+ if len(subfields[key]) == 1: # single value
120
+ subfields[key] = subfields[key][0]
121
+ if "Deprecated" in record:
122
+ dic["deprecated"][typ][tag] = subfields
123
+ else:
124
+ dic[typ][tag] = subfields
125
+ return dic
126
+
127
+ def val2str(self, val):
128
+ """Return only first value"""
129
+ if type(val) == list:
130
+ # val = "/".join(val) # Concatenate all values
131
+ val = val[0]
132
+ return val
133
+
134
+ def lang2str(self, lg_record):
135
+ """Concatenate subtag values"""
136
+ name = f"{lg_record['language']}"
137
+ for label in ["extlang", "script", "region", "variant", "extension"]:
138
+ if label in lg_record:
139
+ name += f": {lg_record[label]}"
140
+ return name
141
+
142
+ def parse_tag(self, tag):
143
+ """Convert a BCP-47 tag to a dictionary of labelled subtags"""
144
+ subtags = tag.split("-")
145
+ lang = {}
146
+ labels = ["language", "extlang", "script", "region", "variant", "variant"]
147
+ while subtags and labels:
148
+ subtag = subtags.pop(0)
149
+ found = False
150
+ while labels:
151
+ label = labels.pop(0)
152
+ subtag = self.casing[label](subtag)
153
+ if self.format[label].fullmatch(subtag):
154
+ if subtag in self.db[label]:
155
+ found = True
156
+ valstr = self.val2str(self.db[label][subtag]["Description"])
157
+ if label == "variant" and label in lang:
158
+ lang[label] += ": " + valstr
159
+ else:
160
+ lang[label] = valstr
161
+ break
162
+ elif subtag in self.db["deprecated"][label]:
163
+ found = True
164
+ note = f"The {subtag!r} {label} code is deprecated"
165
+ if "Preferred-Value" in self.db["deprecated"][label][subtag]:
166
+ prefer = self.db["deprecated"][label][subtag][
167
+ "Preferred-Value"
168
+ ]
169
+ note += f"', prefer '{self.val2str(prefer)}'"
170
+ lang[label] = self.val2str(
171
+ self.db["deprecated"][label][subtag]["Description"]
172
+ )
173
+ warn(note)
174
+ break
175
+ if not found:
176
+ if subtag == "u" and subtags[0] == "sd": # CLDR regional subdivisions
177
+ sd = subtags[1]
178
+ if sd in self.subdiv:
179
+ ext = self.subdiv[sd]
180
+ else:
181
+ ext = f"<Unknown subdivision: {ext}>"
182
+ else: # other extension subtags are not supported yet
183
+ ext = f"{subtag}{''.join(['-'+ext for ext in subtags])}".lower()
184
+ if not self.format["singleton"].fullmatch(subtag):
185
+ ext = f"<Invalid extension: {ext}>"
186
+ warn(ext)
187
+ lang["extension"] = ext
188
+ subtags = []
189
+ return lang
190
+
191
+ def name(self, tag):
192
+ """
193
+ Convert a BCP-47 tag to a colon-separated string of subtag names
194
+
195
+ >>> from nltk.corpus import bcp47
196
+ >>> bcp47.name('ca-Latn-ES-valencia')
197
+ 'Catalan: Latin: Spain: Valencian'
198
+
199
+ """
200
+ for label in ["redundant", "grandfathered"]:
201
+ val = None
202
+ if tag in self.db[label]:
203
+ val = f"{self.db[label][tag]['Description']}"
204
+ note = f"The {tag!r} code is {label}"
205
+ elif tag in self.db["deprecated"][label]:
206
+ val = f"{self.db['deprecated'][label][tag]['Description']}"
207
+ note = f"The {tag!r} code is {label} and deprecated"
208
+ if "Preferred-Value" in self.db["deprecated"][label][tag]:
209
+ prefer = self.db["deprecated"][label][tag]["Preferred-Value"]
210
+ note += f", prefer {self.val2str(prefer)!r}"
211
+ if val:
212
+ warn(note)
213
+ return val
214
+ try:
215
+ return self.lang2str(self.parse_tag(tag))
216
+ except:
217
+ warn(f"Tag {tag!r} was not recognized")
218
+ return None
venv/lib/python3.10/site-packages/nltk/corpus/reader/bnc.py ADDED
@@ -0,0 +1,265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Plaintext Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """Corpus reader for the XML version of the British National Corpus."""
9
+
10
+ from nltk.corpus.reader.util import concat
11
+ from nltk.corpus.reader.xmldocs import ElementTree, XMLCorpusReader, XMLCorpusView
12
+
13
+
14
+ class BNCCorpusReader(XMLCorpusReader):
15
+ r"""Corpus reader for the XML version of the British National Corpus.
16
+
17
+ For access to the complete XML data structure, use the ``xml()``
18
+ method. For access to simple word lists and tagged word lists, use
19
+ ``words()``, ``sents()``, ``tagged_words()``, and ``tagged_sents()``.
20
+
21
+ You can obtain the full version of the BNC corpus at
22
+ https://www.ota.ox.ac.uk/desc/2554
23
+
24
+ If you extracted the archive to a directory called `BNC`, then you can
25
+ instantiate the reader as::
26
+
27
+ BNCCorpusReader(root='BNC/Texts/', fileids=r'[A-K]/\w*/\w*\.xml')
28
+
29
+ """
30
+
31
+ def __init__(self, root, fileids, lazy=True):
32
+ XMLCorpusReader.__init__(self, root, fileids)
33
+ self._lazy = lazy
34
+
35
+ def words(self, fileids=None, strip_space=True, stem=False):
36
+ """
37
+ :return: the given file(s) as a list of words
38
+ and punctuation symbols.
39
+ :rtype: list(str)
40
+
41
+ :param strip_space: If true, then strip trailing spaces from
42
+ word tokens. Otherwise, leave the spaces on the tokens.
43
+ :param stem: If true, then use word stems instead of word strings.
44
+ """
45
+ return self._views(fileids, False, None, strip_space, stem)
46
+
47
+ def tagged_words(self, fileids=None, c5=False, strip_space=True, stem=False):
48
+ """
49
+ :return: the given file(s) as a list of tagged
50
+ words and punctuation symbols, encoded as tuples
51
+ ``(word,tag)``.
52
+ :rtype: list(tuple(str,str))
53
+
54
+ :param c5: If true, then the tags used will be the more detailed
55
+ c5 tags. Otherwise, the simplified tags will be used.
56
+ :param strip_space: If true, then strip trailing spaces from
57
+ word tokens. Otherwise, leave the spaces on the tokens.
58
+ :param stem: If true, then use word stems instead of word strings.
59
+ """
60
+ tag = "c5" if c5 else "pos"
61
+ return self._views(fileids, False, tag, strip_space, stem)
62
+
63
+ def sents(self, fileids=None, strip_space=True, stem=False):
64
+ """
65
+ :return: the given file(s) as a list of
66
+ sentences or utterances, each encoded as a list of word
67
+ strings.
68
+ :rtype: list(list(str))
69
+
70
+ :param strip_space: If true, then strip trailing spaces from
71
+ word tokens. Otherwise, leave the spaces on the tokens.
72
+ :param stem: If true, then use word stems instead of word strings.
73
+ """
74
+ return self._views(fileids, True, None, strip_space, stem)
75
+
76
+ def tagged_sents(self, fileids=None, c5=False, strip_space=True, stem=False):
77
+ """
78
+ :return: the given file(s) as a list of
79
+ sentences, each encoded as a list of ``(word,tag)`` tuples.
80
+ :rtype: list(list(tuple(str,str)))
81
+
82
+ :param c5: If true, then the tags used will be the more detailed
83
+ c5 tags. Otherwise, the simplified tags will be used.
84
+ :param strip_space: If true, then strip trailing spaces from
85
+ word tokens. Otherwise, leave the spaces on the tokens.
86
+ :param stem: If true, then use word stems instead of word strings.
87
+ """
88
+ tag = "c5" if c5 else "pos"
89
+ return self._views(
90
+ fileids, sent=True, tag=tag, strip_space=strip_space, stem=stem
91
+ )
92
+
93
+ def _views(self, fileids=None, sent=False, tag=False, strip_space=True, stem=False):
94
+ """A helper function that instantiates BNCWordViews or the list of words/sentences."""
95
+ f = BNCWordView if self._lazy else self._words
96
+ return concat(
97
+ [
98
+ f(fileid, sent, tag, strip_space, stem)
99
+ for fileid in self.abspaths(fileids)
100
+ ]
101
+ )
102
+
103
+ def _words(self, fileid, bracket_sent, tag, strip_space, stem):
104
+ """
105
+ Helper used to implement the view methods -- returns a list of
106
+ words or a list of sentences, optionally tagged.
107
+
108
+ :param fileid: The name of the underlying file.
109
+ :param bracket_sent: If true, include sentence bracketing.
110
+ :param tag: The name of the tagset to use, or None for no tags.
111
+ :param strip_space: If true, strip spaces from word tokens.
112
+ :param stem: If true, then substitute stems for words.
113
+ """
114
+ result = []
115
+
116
+ xmldoc = ElementTree.parse(fileid).getroot()
117
+ for xmlsent in xmldoc.findall(".//s"):
118
+ sent = []
119
+ for xmlword in _all_xmlwords_in(xmlsent):
120
+ word = xmlword.text
121
+ if not word:
122
+ word = "" # fixes issue 337?
123
+ if strip_space or stem:
124
+ word = word.strip()
125
+ if stem:
126
+ word = xmlword.get("hw", word)
127
+ if tag == "c5":
128
+ word = (word, xmlword.get("c5"))
129
+ elif tag == "pos":
130
+ word = (word, xmlword.get("pos", xmlword.get("c5")))
131
+ sent.append(word)
132
+ if bracket_sent:
133
+ result.append(BNCSentence(xmlsent.attrib["n"], sent))
134
+ else:
135
+ result.extend(sent)
136
+
137
+ assert None not in result
138
+ return result
139
+
140
+
141
+ def _all_xmlwords_in(elt, result=None):
142
+ if result is None:
143
+ result = []
144
+ for child in elt:
145
+ if child.tag in ("c", "w"):
146
+ result.append(child)
147
+ else:
148
+ _all_xmlwords_in(child, result)
149
+ return result
150
+
151
+
152
+ class BNCSentence(list):
153
+ """
154
+ A list of words, augmented by an attribute ``num`` used to record
155
+ the sentence identifier (the ``n`` attribute from the XML).
156
+ """
157
+
158
+ def __init__(self, num, items):
159
+ self.num = num
160
+ list.__init__(self, items)
161
+
162
+
163
+ class BNCWordView(XMLCorpusView):
164
+ """
165
+ A stream backed corpus view specialized for use with the BNC corpus.
166
+ """
167
+
168
+ tags_to_ignore = {
169
+ "pb",
170
+ "gap",
171
+ "vocal",
172
+ "event",
173
+ "unclear",
174
+ "shift",
175
+ "pause",
176
+ "align",
177
+ }
178
+ """These tags are ignored. For their description refer to the
179
+ technical documentation, for example,
180
+ http://www.natcorp.ox.ac.uk/docs/URG/ref-vocal.html
181
+
182
+ """
183
+
184
+ def __init__(self, fileid, sent, tag, strip_space, stem):
185
+ """
186
+ :param fileid: The name of the underlying file.
187
+ :param sent: If true, include sentence bracketing.
188
+ :param tag: The name of the tagset to use, or None for no tags.
189
+ :param strip_space: If true, strip spaces from word tokens.
190
+ :param stem: If true, then substitute stems for words.
191
+ """
192
+ if sent:
193
+ tagspec = ".*/s"
194
+ else:
195
+ tagspec = ".*/s/(.*/)?(c|w)"
196
+ self._sent = sent
197
+ self._tag = tag
198
+ self._strip_space = strip_space
199
+ self._stem = stem
200
+
201
+ self.title = None #: Title of the document.
202
+ self.author = None #: Author of the document.
203
+ self.editor = None #: Editor
204
+ self.resps = None #: Statement of responsibility
205
+
206
+ XMLCorpusView.__init__(self, fileid, tagspec)
207
+
208
+ # Read in a tasty header.
209
+ self._open()
210
+ self.read_block(self._stream, ".*/teiHeader$", self.handle_header)
211
+ self.close()
212
+
213
+ # Reset tag context.
214
+ self._tag_context = {0: ()}
215
+
216
+ def handle_header(self, elt, context):
217
+ # Set up some metadata!
218
+ titles = elt.findall("titleStmt/title")
219
+ if titles:
220
+ self.title = "\n".join(title.text.strip() for title in titles)
221
+
222
+ authors = elt.findall("titleStmt/author")
223
+ if authors:
224
+ self.author = "\n".join(author.text.strip() for author in authors)
225
+
226
+ editors = elt.findall("titleStmt/editor")
227
+ if editors:
228
+ self.editor = "\n".join(editor.text.strip() for editor in editors)
229
+
230
+ resps = elt.findall("titleStmt/respStmt")
231
+ if resps:
232
+ self.resps = "\n\n".join(
233
+ "\n".join(resp_elt.text.strip() for resp_elt in resp) for resp in resps
234
+ )
235
+
236
+ def handle_elt(self, elt, context):
237
+ if self._sent:
238
+ return self.handle_sent(elt)
239
+ else:
240
+ return self.handle_word(elt)
241
+
242
+ def handle_word(self, elt):
243
+ word = elt.text
244
+ if not word:
245
+ word = "" # fixes issue 337?
246
+ if self._strip_space or self._stem:
247
+ word = word.strip()
248
+ if self._stem:
249
+ word = elt.get("hw", word)
250
+ if self._tag == "c5":
251
+ word = (word, elt.get("c5"))
252
+ elif self._tag == "pos":
253
+ word = (word, elt.get("pos", elt.get("c5")))
254
+ return word
255
+
256
+ def handle_sent(self, elt):
257
+ sent = []
258
+ for child in elt:
259
+ if child.tag in ("mw", "hi", "corr", "trunc"):
260
+ sent += [self.handle_word(w) for w in child]
261
+ elif child.tag in ("w", "c"):
262
+ sent.append(self.handle_word(child))
263
+ elif child.tag not in self.tags_to_ignore:
264
+ raise ValueError("Unexpected element %s" % child.tag)
265
+ return BNCSentence(elt.attrib["n"], sent)
venv/lib/python3.10/site-packages/nltk/corpus/reader/chasen.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (C) 2001-2023 NLTK Project
3
+ # Author: Masato Hagiwara <[email protected]>
4
+ # URL: <https://www.nltk.org/>
5
+ # For license information, see LICENSE.TXT
6
+
7
+ import sys
8
+
9
+ from nltk.corpus.reader import util
10
+ from nltk.corpus.reader.api import *
11
+ from nltk.corpus.reader.util import *
12
+
13
+
14
+ class ChasenCorpusReader(CorpusReader):
15
+ def __init__(self, root, fileids, encoding="utf8", sent_splitter=None):
16
+ self._sent_splitter = sent_splitter
17
+ CorpusReader.__init__(self, root, fileids, encoding)
18
+
19
+ def words(self, fileids=None):
20
+ return concat(
21
+ [
22
+ ChasenCorpusView(fileid, enc, False, False, False, self._sent_splitter)
23
+ for (fileid, enc) in self.abspaths(fileids, True)
24
+ ]
25
+ )
26
+
27
+ def tagged_words(self, fileids=None):
28
+ return concat(
29
+ [
30
+ ChasenCorpusView(fileid, enc, True, False, False, self._sent_splitter)
31
+ for (fileid, enc) in self.abspaths(fileids, True)
32
+ ]
33
+ )
34
+
35
+ def sents(self, fileids=None):
36
+ return concat(
37
+ [
38
+ ChasenCorpusView(fileid, enc, False, True, False, self._sent_splitter)
39
+ for (fileid, enc) in self.abspaths(fileids, True)
40
+ ]
41
+ )
42
+
43
+ def tagged_sents(self, fileids=None):
44
+ return concat(
45
+ [
46
+ ChasenCorpusView(fileid, enc, True, True, False, self._sent_splitter)
47
+ for (fileid, enc) in self.abspaths(fileids, True)
48
+ ]
49
+ )
50
+
51
+ def paras(self, fileids=None):
52
+ return concat(
53
+ [
54
+ ChasenCorpusView(fileid, enc, False, True, True, self._sent_splitter)
55
+ for (fileid, enc) in self.abspaths(fileids, True)
56
+ ]
57
+ )
58
+
59
+ def tagged_paras(self, fileids=None):
60
+ return concat(
61
+ [
62
+ ChasenCorpusView(fileid, enc, True, True, True, self._sent_splitter)
63
+ for (fileid, enc) in self.abspaths(fileids, True)
64
+ ]
65
+ )
66
+
67
+
68
+ class ChasenCorpusView(StreamBackedCorpusView):
69
+ """
70
+ A specialized corpus view for ChasenReader. Similar to ``TaggedCorpusView``,
71
+ but this'll use fixed sets of word and sentence tokenizer.
72
+ """
73
+
74
+ def __init__(
75
+ self,
76
+ corpus_file,
77
+ encoding,
78
+ tagged,
79
+ group_by_sent,
80
+ group_by_para,
81
+ sent_splitter=None,
82
+ ):
83
+ self._tagged = tagged
84
+ self._group_by_sent = group_by_sent
85
+ self._group_by_para = group_by_para
86
+ self._sent_splitter = sent_splitter
87
+ StreamBackedCorpusView.__init__(self, corpus_file, encoding=encoding)
88
+
89
+ def read_block(self, stream):
90
+ """Reads one paragraph at a time."""
91
+ block = []
92
+ for para_str in read_regexp_block(stream, r".", r"^EOS\n"):
93
+
94
+ para = []
95
+
96
+ sent = []
97
+ for line in para_str.splitlines():
98
+
99
+ _eos = line.strip() == "EOS"
100
+ _cells = line.split("\t")
101
+ w = (_cells[0], "\t".join(_cells[1:]))
102
+ if not _eos:
103
+ sent.append(w)
104
+
105
+ if _eos or (self._sent_splitter and self._sent_splitter(w)):
106
+ if not self._tagged:
107
+ sent = [w for (w, t) in sent]
108
+ if self._group_by_sent:
109
+ para.append(sent)
110
+ else:
111
+ para.extend(sent)
112
+ sent = []
113
+
114
+ if len(sent) > 0:
115
+ if not self._tagged:
116
+ sent = [w for (w, t) in sent]
117
+
118
+ if self._group_by_sent:
119
+ para.append(sent)
120
+ else:
121
+ para.extend(sent)
122
+
123
+ if self._group_by_para:
124
+ block.append(para)
125
+ else:
126
+ block.extend(para)
127
+
128
+ return block
129
+
130
+
131
+ def demo():
132
+
133
+ import nltk
134
+ from nltk.corpus.util import LazyCorpusLoader
135
+
136
+ jeita = LazyCorpusLoader("jeita", ChasenCorpusReader, r".*chasen", encoding="utf-8")
137
+ print("/".join(jeita.words()[22100:22140]))
138
+
139
+ print(
140
+ "\nEOS\n".join(
141
+ "\n".join("{}/{}".format(w[0], w[1].split("\t")[2]) for w in sent)
142
+ for sent in jeita.tagged_sents()[2170:2173]
143
+ )
144
+ )
145
+
146
+
147
+ def test():
148
+
149
+ from nltk.corpus.util import LazyCorpusLoader
150
+
151
+ jeita = LazyCorpusLoader("jeita", ChasenCorpusReader, r".*chasen", encoding="utf-8")
152
+
153
+ assert isinstance(jeita.tagged_words()[0][1], str)
154
+
155
+
156
+ if __name__ == "__main__":
157
+ demo()
158
+ test()
venv/lib/python3.10/site-packages/nltk/corpus/reader/childes.py ADDED
@@ -0,0 +1,630 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # CHILDES XML Corpus Reader
2
+
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Tomonori Nagano <[email protected]>
5
+ # Alexis Dimitriadis <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ Corpus reader for the XML version of the CHILDES corpus.
11
+ """
12
+
13
+ __docformat__ = "epytext en"
14
+
15
+ import re
16
+ from collections import defaultdict
17
+
18
+ from nltk.corpus.reader.util import concat
19
+ from nltk.corpus.reader.xmldocs import ElementTree, XMLCorpusReader
20
+ from nltk.util import LazyConcatenation, LazyMap, flatten
21
+
22
+ # to resolve the namespace issue
23
+ NS = "http://www.talkbank.org/ns/talkbank"
24
+
25
+
26
+ class CHILDESCorpusReader(XMLCorpusReader):
27
+ """
28
+ Corpus reader for the XML version of the CHILDES corpus.
29
+ The CHILDES corpus is available at ``https://childes.talkbank.org/``. The XML
30
+ version of CHILDES is located at ``https://childes.talkbank.org/data-xml/``.
31
+ Copy the needed parts of the CHILDES XML corpus into the NLTK data directory
32
+ (``nltk_data/corpora/CHILDES/``).
33
+
34
+ For access to the file text use the usual nltk functions,
35
+ ``words()``, ``sents()``, ``tagged_words()`` and ``tagged_sents()``.
36
+ """
37
+
38
+ def __init__(self, root, fileids, lazy=True):
39
+ XMLCorpusReader.__init__(self, root, fileids)
40
+ self._lazy = lazy
41
+
42
+ def words(
43
+ self,
44
+ fileids=None,
45
+ speaker="ALL",
46
+ stem=False,
47
+ relation=False,
48
+ strip_space=True,
49
+ replace=False,
50
+ ):
51
+ """
52
+ :return: the given file(s) as a list of words
53
+ :rtype: list(str)
54
+
55
+ :param speaker: If specified, select specific speaker(s) defined
56
+ in the corpus. Default is 'ALL' (all participants). Common choices
57
+ are 'CHI' (the child), 'MOT' (mother), ['CHI','MOT'] (exclude
58
+ researchers)
59
+ :param stem: If true, then use word stems instead of word strings.
60
+ :param relation: If true, then return tuples of (stem, index,
61
+ dependent_index)
62
+ :param strip_space: If true, then strip trailing spaces from word
63
+ tokens. Otherwise, leave the spaces on the tokens.
64
+ :param replace: If true, then use the replaced (intended) word instead
65
+ of the original word (e.g., 'wat' will be replaced with 'watch')
66
+ """
67
+ sent = None
68
+ pos = False
69
+ if not self._lazy:
70
+ return [
71
+ self._get_words(
72
+ fileid, speaker, sent, stem, relation, pos, strip_space, replace
73
+ )
74
+ for fileid in self.abspaths(fileids)
75
+ ]
76
+
77
+ get_words = lambda fileid: self._get_words(
78
+ fileid, speaker, sent, stem, relation, pos, strip_space, replace
79
+ )
80
+ return LazyConcatenation(LazyMap(get_words, self.abspaths(fileids)))
81
+
82
+ def tagged_words(
83
+ self,
84
+ fileids=None,
85
+ speaker="ALL",
86
+ stem=False,
87
+ relation=False,
88
+ strip_space=True,
89
+ replace=False,
90
+ ):
91
+ """
92
+ :return: the given file(s) as a list of tagged
93
+ words and punctuation symbols, encoded as tuples
94
+ ``(word,tag)``.
95
+ :rtype: list(tuple(str,str))
96
+
97
+ :param speaker: If specified, select specific speaker(s) defined
98
+ in the corpus. Default is 'ALL' (all participants). Common choices
99
+ are 'CHI' (the child), 'MOT' (mother), ['CHI','MOT'] (exclude
100
+ researchers)
101
+ :param stem: If true, then use word stems instead of word strings.
102
+ :param relation: If true, then return tuples of (stem, index,
103
+ dependent_index)
104
+ :param strip_space: If true, then strip trailing spaces from word
105
+ tokens. Otherwise, leave the spaces on the tokens.
106
+ :param replace: If true, then use the replaced (intended) word instead
107
+ of the original word (e.g., 'wat' will be replaced with 'watch')
108
+ """
109
+ sent = None
110
+ pos = True
111
+ if not self._lazy:
112
+ return [
113
+ self._get_words(
114
+ fileid, speaker, sent, stem, relation, pos, strip_space, replace
115
+ )
116
+ for fileid in self.abspaths(fileids)
117
+ ]
118
+
119
+ get_words = lambda fileid: self._get_words(
120
+ fileid, speaker, sent, stem, relation, pos, strip_space, replace
121
+ )
122
+ return LazyConcatenation(LazyMap(get_words, self.abspaths(fileids)))
123
+
124
+ def sents(
125
+ self,
126
+ fileids=None,
127
+ speaker="ALL",
128
+ stem=False,
129
+ relation=None,
130
+ strip_space=True,
131
+ replace=False,
132
+ ):
133
+ """
134
+ :return: the given file(s) as a list of sentences or utterances, each
135
+ encoded as a list of word strings.
136
+ :rtype: list(list(str))
137
+
138
+ :param speaker: If specified, select specific speaker(s) defined
139
+ in the corpus. Default is 'ALL' (all participants). Common choices
140
+ are 'CHI' (the child), 'MOT' (mother), ['CHI','MOT'] (exclude
141
+ researchers)
142
+ :param stem: If true, then use word stems instead of word strings.
143
+ :param relation: If true, then return tuples of ``(str,pos,relation_list)``.
144
+ If there is manually-annotated relation info, it will return
145
+ tuples of ``(str,pos,test_relation_list,str,pos,gold_relation_list)``
146
+ :param strip_space: If true, then strip trailing spaces from word
147
+ tokens. Otherwise, leave the spaces on the tokens.
148
+ :param replace: If true, then use the replaced (intended) word instead
149
+ of the original word (e.g., 'wat' will be replaced with 'watch')
150
+ """
151
+ sent = True
152
+ pos = False
153
+ if not self._lazy:
154
+ return [
155
+ self._get_words(
156
+ fileid, speaker, sent, stem, relation, pos, strip_space, replace
157
+ )
158
+ for fileid in self.abspaths(fileids)
159
+ ]
160
+
161
+ get_words = lambda fileid: self._get_words(
162
+ fileid, speaker, sent, stem, relation, pos, strip_space, replace
163
+ )
164
+ return LazyConcatenation(LazyMap(get_words, self.abspaths(fileids)))
165
+
166
+ def tagged_sents(
167
+ self,
168
+ fileids=None,
169
+ speaker="ALL",
170
+ stem=False,
171
+ relation=None,
172
+ strip_space=True,
173
+ replace=False,
174
+ ):
175
+ """
176
+ :return: the given file(s) as a list of
177
+ sentences, each encoded as a list of ``(word,tag)`` tuples.
178
+ :rtype: list(list(tuple(str,str)))
179
+
180
+ :param speaker: If specified, select specific speaker(s) defined
181
+ in the corpus. Default is 'ALL' (all participants). Common choices
182
+ are 'CHI' (the child), 'MOT' (mother), ['CHI','MOT'] (exclude
183
+ researchers)
184
+ :param stem: If true, then use word stems instead of word strings.
185
+ :param relation: If true, then return tuples of ``(str,pos,relation_list)``.
186
+ If there is manually-annotated relation info, it will return
187
+ tuples of ``(str,pos,test_relation_list,str,pos,gold_relation_list)``
188
+ :param strip_space: If true, then strip trailing spaces from word
189
+ tokens. Otherwise, leave the spaces on the tokens.
190
+ :param replace: If true, then use the replaced (intended) word instead
191
+ of the original word (e.g., 'wat' will be replaced with 'watch')
192
+ """
193
+ sent = True
194
+ pos = True
195
+ if not self._lazy:
196
+ return [
197
+ self._get_words(
198
+ fileid, speaker, sent, stem, relation, pos, strip_space, replace
199
+ )
200
+ for fileid in self.abspaths(fileids)
201
+ ]
202
+
203
+ get_words = lambda fileid: self._get_words(
204
+ fileid, speaker, sent, stem, relation, pos, strip_space, replace
205
+ )
206
+ return LazyConcatenation(LazyMap(get_words, self.abspaths(fileids)))
207
+
208
+ def corpus(self, fileids=None):
209
+ """
210
+ :return: the given file(s) as a dict of ``(corpus_property_key, value)``
211
+ :rtype: list(dict)
212
+ """
213
+ if not self._lazy:
214
+ return [self._get_corpus(fileid) for fileid in self.abspaths(fileids)]
215
+ return LazyMap(self._get_corpus, self.abspaths(fileids))
216
+
217
+ def _get_corpus(self, fileid):
218
+ results = dict()
219
+ xmldoc = ElementTree.parse(fileid).getroot()
220
+ for key, value in xmldoc.items():
221
+ results[key] = value
222
+ return results
223
+
224
+ def participants(self, fileids=None):
225
+ """
226
+ :return: the given file(s) as a dict of
227
+ ``(participant_property_key, value)``
228
+ :rtype: list(dict)
229
+ """
230
+ if not self._lazy:
231
+ return [self._get_participants(fileid) for fileid in self.abspaths(fileids)]
232
+ return LazyMap(self._get_participants, self.abspaths(fileids))
233
+
234
+ def _get_participants(self, fileid):
235
+ # multidimensional dicts
236
+ def dictOfDicts():
237
+ return defaultdict(dictOfDicts)
238
+
239
+ xmldoc = ElementTree.parse(fileid).getroot()
240
+ # getting participants' data
241
+ pat = dictOfDicts()
242
+ for participant in xmldoc.findall(
243
+ f".//{{{NS}}}Participants/{{{NS}}}participant"
244
+ ):
245
+ for (key, value) in participant.items():
246
+ pat[participant.get("id")][key] = value
247
+ return pat
248
+
249
+ def age(self, fileids=None, speaker="CHI", month=False):
250
+ """
251
+ :return: the given file(s) as string or int
252
+ :rtype: list or int
253
+
254
+ :param month: If true, return months instead of year-month-date
255
+ """
256
+ if not self._lazy:
257
+ return [
258
+ self._get_age(fileid, speaker, month)
259
+ for fileid in self.abspaths(fileids)
260
+ ]
261
+ get_age = lambda fileid: self._get_age(fileid, speaker, month)
262
+ return LazyMap(get_age, self.abspaths(fileids))
263
+
264
+ def _get_age(self, fileid, speaker, month):
265
+ xmldoc = ElementTree.parse(fileid).getroot()
266
+ for pat in xmldoc.findall(f".//{{{NS}}}Participants/{{{NS}}}participant"):
267
+ try:
268
+ if pat.get("id") == speaker:
269
+ age = pat.get("age")
270
+ if month:
271
+ age = self.convert_age(age)
272
+ return age
273
+ # some files don't have age data
274
+ except (TypeError, AttributeError) as e:
275
+ return None
276
+
277
+ def convert_age(self, age_year):
278
+ "Caclculate age in months from a string in CHILDES format"
279
+ m = re.match(r"P(\d+)Y(\d+)M?(\d?\d?)D?", age_year)
280
+ age_month = int(m.group(1)) * 12 + int(m.group(2))
281
+ try:
282
+ if int(m.group(3)) > 15:
283
+ age_month += 1
284
+ # some corpora don't have age information?
285
+ except ValueError as e:
286
+ pass
287
+ return age_month
288
+
289
+ def MLU(self, fileids=None, speaker="CHI"):
290
+ """
291
+ :return: the given file(s) as a floating number
292
+ :rtype: list(float)
293
+ """
294
+ if not self._lazy:
295
+ return [
296
+ self._getMLU(fileid, speaker=speaker)
297
+ for fileid in self.abspaths(fileids)
298
+ ]
299
+ get_MLU = lambda fileid: self._getMLU(fileid, speaker=speaker)
300
+ return LazyMap(get_MLU, self.abspaths(fileids))
301
+
302
+ def _getMLU(self, fileid, speaker):
303
+ sents = self._get_words(
304
+ fileid,
305
+ speaker=speaker,
306
+ sent=True,
307
+ stem=True,
308
+ relation=False,
309
+ pos=True,
310
+ strip_space=True,
311
+ replace=True,
312
+ )
313
+ results = []
314
+ lastSent = []
315
+ numFillers = 0
316
+ sentDiscount = 0
317
+ for sent in sents:
318
+ posList = [pos for (word, pos) in sent]
319
+ # if any part of the sentence is intelligible
320
+ if any(pos == "unk" for pos in posList):
321
+ continue
322
+ # if the sentence is null
323
+ elif sent == []:
324
+ continue
325
+ # if the sentence is the same as the last sent
326
+ elif sent == lastSent:
327
+ continue
328
+ else:
329
+ results.append([word for (word, pos) in sent])
330
+ # count number of fillers
331
+ if len({"co", None}.intersection(posList)) > 0:
332
+ numFillers += posList.count("co")
333
+ numFillers += posList.count(None)
334
+ sentDiscount += 1
335
+ lastSent = sent
336
+ try:
337
+ thisWordList = flatten(results)
338
+ # count number of morphemes
339
+ # (e.g., 'read' = 1 morpheme but 'read-PAST' is 2 morphemes)
340
+ numWords = (
341
+ len(flatten([word.split("-") for word in thisWordList])) - numFillers
342
+ )
343
+ numSents = len(results) - sentDiscount
344
+ mlu = numWords / numSents
345
+ except ZeroDivisionError:
346
+ mlu = 0
347
+ # return {'mlu':mlu,'wordNum':numWords,'sentNum':numSents}
348
+ return mlu
349
+
350
+ def _get_words(
351
+ self, fileid, speaker, sent, stem, relation, pos, strip_space, replace
352
+ ):
353
+ if (
354
+ isinstance(speaker, str) and speaker != "ALL"
355
+ ): # ensure we have a list of speakers
356
+ speaker = [speaker]
357
+ xmldoc = ElementTree.parse(fileid).getroot()
358
+ # processing each xml doc
359
+ results = []
360
+ for xmlsent in xmldoc.findall(".//{%s}u" % NS):
361
+ sents = []
362
+ # select speakers
363
+ if speaker == "ALL" or xmlsent.get("who") in speaker:
364
+ for xmlword in xmlsent.findall(".//{%s}w" % NS):
365
+ infl = None
366
+ suffixStem = None
367
+ suffixTag = None
368
+ # getting replaced words
369
+ if replace and xmlsent.find(f".//{{{NS}}}w/{{{NS}}}replacement"):
370
+ xmlword = xmlsent.find(
371
+ f".//{{{NS}}}w/{{{NS}}}replacement/{{{NS}}}w"
372
+ )
373
+ elif replace and xmlsent.find(f".//{{{NS}}}w/{{{NS}}}wk"):
374
+ xmlword = xmlsent.find(f".//{{{NS}}}w/{{{NS}}}wk")
375
+ # get text
376
+ if xmlword.text:
377
+ word = xmlword.text
378
+ else:
379
+ word = ""
380
+ # strip tailing space
381
+ if strip_space:
382
+ word = word.strip()
383
+ # stem
384
+ if relation or stem:
385
+ try:
386
+ xmlstem = xmlword.find(".//{%s}stem" % NS)
387
+ word = xmlstem.text
388
+ except AttributeError as e:
389
+ pass
390
+ # if there is an inflection
391
+ try:
392
+ xmlinfl = xmlword.find(
393
+ f".//{{{NS}}}mor/{{{NS}}}mw/{{{NS}}}mk"
394
+ )
395
+ word += "-" + xmlinfl.text
396
+ except:
397
+ pass
398
+ # if there is a suffix
399
+ try:
400
+ xmlsuffix = xmlword.find(
401
+ ".//{%s}mor/{%s}mor-post/{%s}mw/{%s}stem"
402
+ % (NS, NS, NS, NS)
403
+ )
404
+ suffixStem = xmlsuffix.text
405
+ except AttributeError:
406
+ suffixStem = ""
407
+ if suffixStem:
408
+ word += "~" + suffixStem
409
+ # pos
410
+ if relation or pos:
411
+ try:
412
+ xmlpos = xmlword.findall(".//{%s}c" % NS)
413
+ xmlpos2 = xmlword.findall(".//{%s}s" % NS)
414
+ if xmlpos2 != []:
415
+ tag = xmlpos[0].text + ":" + xmlpos2[0].text
416
+ else:
417
+ tag = xmlpos[0].text
418
+ except (AttributeError, IndexError) as e:
419
+ tag = ""
420
+ try:
421
+ xmlsuffixpos = xmlword.findall(
422
+ ".//{%s}mor/{%s}mor-post/{%s}mw/{%s}pos/{%s}c"
423
+ % (NS, NS, NS, NS, NS)
424
+ )
425
+ xmlsuffixpos2 = xmlword.findall(
426
+ ".//{%s}mor/{%s}mor-post/{%s}mw/{%s}pos/{%s}s"
427
+ % (NS, NS, NS, NS, NS)
428
+ )
429
+ if xmlsuffixpos2:
430
+ suffixTag = (
431
+ xmlsuffixpos[0].text + ":" + xmlsuffixpos2[0].text
432
+ )
433
+ else:
434
+ suffixTag = xmlsuffixpos[0].text
435
+ except:
436
+ pass
437
+ if suffixTag:
438
+ tag += "~" + suffixTag
439
+ word = (word, tag)
440
+ # relational
441
+ # the gold standard is stored in
442
+ # <mor></mor><mor type="trn"><gra type="grt">
443
+ if relation == True:
444
+ for xmlstem_rel in xmlword.findall(
445
+ f".//{{{NS}}}mor/{{{NS}}}gra"
446
+ ):
447
+ if not xmlstem_rel.get("type") == "grt":
448
+ word = (
449
+ word[0],
450
+ word[1],
451
+ xmlstem_rel.get("index")
452
+ + "|"
453
+ + xmlstem_rel.get("head")
454
+ + "|"
455
+ + xmlstem_rel.get("relation"),
456
+ )
457
+ else:
458
+ word = (
459
+ word[0],
460
+ word[1],
461
+ word[2],
462
+ word[0],
463
+ word[1],
464
+ xmlstem_rel.get("index")
465
+ + "|"
466
+ + xmlstem_rel.get("head")
467
+ + "|"
468
+ + xmlstem_rel.get("relation"),
469
+ )
470
+ try:
471
+ for xmlpost_rel in xmlword.findall(
472
+ f".//{{{NS}}}mor/{{{NS}}}mor-post/{{{NS}}}gra"
473
+ ):
474
+ if not xmlpost_rel.get("type") == "grt":
475
+ suffixStem = (
476
+ suffixStem[0],
477
+ suffixStem[1],
478
+ xmlpost_rel.get("index")
479
+ + "|"
480
+ + xmlpost_rel.get("head")
481
+ + "|"
482
+ + xmlpost_rel.get("relation"),
483
+ )
484
+ else:
485
+ suffixStem = (
486
+ suffixStem[0],
487
+ suffixStem[1],
488
+ suffixStem[2],
489
+ suffixStem[0],
490
+ suffixStem[1],
491
+ xmlpost_rel.get("index")
492
+ + "|"
493
+ + xmlpost_rel.get("head")
494
+ + "|"
495
+ + xmlpost_rel.get("relation"),
496
+ )
497
+ except:
498
+ pass
499
+ sents.append(word)
500
+ if sent or relation:
501
+ results.append(sents)
502
+ else:
503
+ results.extend(sents)
504
+ return LazyMap(lambda x: x, results)
505
+
506
+ # Ready-to-use browser opener
507
+
508
+ """
509
+ The base URL for viewing files on the childes website. This
510
+ shouldn't need to be changed, unless CHILDES changes the configuration
511
+ of their server or unless the user sets up their own corpus webserver.
512
+ """
513
+ childes_url_base = r"https://childes.talkbank.org/browser/index.php?url="
514
+
515
+ def webview_file(self, fileid, urlbase=None):
516
+ """Map a corpus file to its web version on the CHILDES website,
517
+ and open it in a web browser.
518
+
519
+ The complete URL to be used is:
520
+ childes.childes_url_base + urlbase + fileid.replace('.xml', '.cha')
521
+
522
+ If no urlbase is passed, we try to calculate it. This
523
+ requires that the childes corpus was set up to mirror the
524
+ folder hierarchy under childes.psy.cmu.edu/data-xml/, e.g.:
525
+ nltk_data/corpora/childes/Eng-USA/Cornell/??? or
526
+ nltk_data/corpora/childes/Romance/Spanish/Aguirre/???
527
+
528
+ The function first looks (as a special case) if "Eng-USA" is
529
+ on the path consisting of <corpus root>+fileid; then if
530
+ "childes", possibly followed by "data-xml", appears. If neither
531
+ one is found, we use the unmodified fileid and hope for the best.
532
+ If this is not right, specify urlbase explicitly, e.g., if the
533
+ corpus root points to the Cornell folder, urlbase='Eng-USA/Cornell'.
534
+ """
535
+
536
+ import webbrowser
537
+
538
+ if urlbase:
539
+ path = urlbase + "/" + fileid
540
+ else:
541
+ full = self.root + "/" + fileid
542
+ full = re.sub(r"\\", "/", full)
543
+ if "/childes/" in full.lower():
544
+ # Discard /data-xml/ if present
545
+ path = re.findall(r"(?i)/childes(?:/data-xml)?/(.*)\.xml", full)[0]
546
+ elif "eng-usa" in full.lower():
547
+ path = "Eng-USA/" + re.findall(r"/(?i)Eng-USA/(.*)\.xml", full)[0]
548
+ else:
549
+ path = fileid
550
+
551
+ # Strip ".xml" and add ".cha", as necessary:
552
+ if path.endswith(".xml"):
553
+ path = path[:-4]
554
+
555
+ if not path.endswith(".cha"):
556
+ path = path + ".cha"
557
+
558
+ url = self.childes_url_base + path
559
+
560
+ webbrowser.open_new_tab(url)
561
+ print("Opening in browser:", url)
562
+ # Pausing is a good idea, but it's up to the user...
563
+ # raw_input("Hit Return to continue")
564
+
565
+
566
+ def demo(corpus_root=None):
567
+ """
568
+ The CHILDES corpus should be manually downloaded and saved
569
+ to ``[NLTK_Data_Dir]/corpora/childes/``
570
+ """
571
+ if not corpus_root:
572
+ from nltk.data import find
573
+
574
+ corpus_root = find("corpora/childes/data-xml/Eng-USA/")
575
+
576
+ try:
577
+ childes = CHILDESCorpusReader(corpus_root, ".*.xml")
578
+ # describe all corpus
579
+ for file in childes.fileids()[:5]:
580
+ corpus = ""
581
+ corpus_id = ""
582
+ for (key, value) in childes.corpus(file)[0].items():
583
+ if key == "Corpus":
584
+ corpus = value
585
+ if key == "Id":
586
+ corpus_id = value
587
+ print("Reading", corpus, corpus_id, " .....")
588
+ print("words:", childes.words(file)[:7], "...")
589
+ print(
590
+ "words with replaced words:",
591
+ childes.words(file, replace=True)[:7],
592
+ " ...",
593
+ )
594
+ print("words with pos tags:", childes.tagged_words(file)[:7], " ...")
595
+ print("words (only MOT):", childes.words(file, speaker="MOT")[:7], "...")
596
+ print("words (only CHI):", childes.words(file, speaker="CHI")[:7], "...")
597
+ print("stemmed words:", childes.words(file, stem=True)[:7], " ...")
598
+ print(
599
+ "words with relations and pos-tag:",
600
+ childes.words(file, relation=True)[:5],
601
+ " ...",
602
+ )
603
+ print("sentence:", childes.sents(file)[:2], " ...")
604
+ for (participant, values) in childes.participants(file)[0].items():
605
+ for (key, value) in values.items():
606
+ print("\tparticipant", participant, key, ":", value)
607
+ print("num of sent:", len(childes.sents(file)))
608
+ print("num of morphemes:", len(childes.words(file, stem=True)))
609
+ print("age:", childes.age(file))
610
+ print("age in month:", childes.age(file, month=True))
611
+ print("MLU:", childes.MLU(file))
612
+ print()
613
+
614
+ except LookupError as e:
615
+ print(
616
+ """The CHILDES corpus, or the parts you need, should be manually
617
+ downloaded from https://childes.talkbank.org/data-xml/ and saved at
618
+ [NLTK_Data_Dir]/corpora/childes/
619
+ Alternately, you can call the demo with the path to a portion of the CHILDES corpus, e.g.:
620
+ demo('/path/to/childes/data-xml/Eng-USA/")
621
+ """
622
+ )
623
+ # corpus_root_http = urllib2.urlopen('https://childes.talkbank.org/data-xml/Eng-USA/Bates.zip')
624
+ # corpus_root_http_bates = zipfile.ZipFile(cStringIO.StringIO(corpus_root_http.read()))
625
+ ##this fails
626
+ # childes = CHILDESCorpusReader(corpus_root_http_bates,corpus_root_http_bates.namelist())
627
+
628
+
629
+ if __name__ == "__main__":
630
+ demo()
venv/lib/python3.10/site-packages/nltk/corpus/reader/chunked.py ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Chunked Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ A reader for corpora that contain chunked (and optionally tagged)
11
+ documents.
12
+ """
13
+
14
+ import codecs
15
+ import os.path
16
+
17
+ import nltk
18
+ from nltk.chunk import tagstr2tree
19
+ from nltk.corpus.reader.api import *
20
+ from nltk.corpus.reader.bracket_parse import BracketParseCorpusReader
21
+ from nltk.corpus.reader.util import *
22
+ from nltk.tokenize import *
23
+ from nltk.tree import Tree
24
+
25
+
26
+ class ChunkedCorpusReader(CorpusReader):
27
+ """
28
+ Reader for chunked (and optionally tagged) corpora. Paragraphs
29
+ are split using a block reader. They are then tokenized into
30
+ sentences using a sentence tokenizer. Finally, these sentences
31
+ are parsed into chunk trees using a string-to-chunktree conversion
32
+ function. Each of these steps can be performed using a default
33
+ function or a custom function. By default, paragraphs are split
34
+ on blank lines; sentences are listed one per line; and sentences
35
+ are parsed into chunk trees using ``nltk.chunk.tagstr2tree``.
36
+ """
37
+
38
+ def __init__(
39
+ self,
40
+ root,
41
+ fileids,
42
+ extension="",
43
+ str2chunktree=tagstr2tree,
44
+ sent_tokenizer=RegexpTokenizer("\n", gaps=True),
45
+ para_block_reader=read_blankline_block,
46
+ encoding="utf8",
47
+ tagset=None,
48
+ ):
49
+ """
50
+ :param root: The root directory for this corpus.
51
+ :param fileids: A list or regexp specifying the fileids in this corpus.
52
+ """
53
+ CorpusReader.__init__(self, root, fileids, encoding)
54
+ self._cv_args = (str2chunktree, sent_tokenizer, para_block_reader, tagset)
55
+ """Arguments for corpus views generated by this corpus: a tuple
56
+ (str2chunktree, sent_tokenizer, para_block_tokenizer)"""
57
+
58
+ def words(self, fileids=None):
59
+ """
60
+ :return: the given file(s) as a list of words
61
+ and punctuation symbols.
62
+ :rtype: list(str)
63
+ """
64
+ return concat(
65
+ [
66
+ ChunkedCorpusView(f, enc, 0, 0, 0, 0, *self._cv_args)
67
+ for (f, enc) in self.abspaths(fileids, True)
68
+ ]
69
+ )
70
+
71
+ def sents(self, fileids=None):
72
+ """
73
+ :return: the given file(s) as a list of
74
+ sentences or utterances, each encoded as a list of word
75
+ strings.
76
+ :rtype: list(list(str))
77
+ """
78
+ return concat(
79
+ [
80
+ ChunkedCorpusView(f, enc, 0, 1, 0, 0, *self._cv_args)
81
+ for (f, enc) in self.abspaths(fileids, True)
82
+ ]
83
+ )
84
+
85
+ def paras(self, fileids=None):
86
+ """
87
+ :return: the given file(s) as a list of
88
+ paragraphs, each encoded as a list of sentences, which are
89
+ in turn encoded as lists of word strings.
90
+ :rtype: list(list(list(str)))
91
+ """
92
+ return concat(
93
+ [
94
+ ChunkedCorpusView(f, enc, 0, 1, 1, 0, *self._cv_args)
95
+ for (f, enc) in self.abspaths(fileids, True)
96
+ ]
97
+ )
98
+
99
+ def tagged_words(self, fileids=None, tagset=None):
100
+ """
101
+ :return: the given file(s) as a list of tagged
102
+ words and punctuation symbols, encoded as tuples
103
+ ``(word,tag)``.
104
+ :rtype: list(tuple(str,str))
105
+ """
106
+ return concat(
107
+ [
108
+ ChunkedCorpusView(
109
+ f, enc, 1, 0, 0, 0, *self._cv_args, target_tagset=tagset
110
+ )
111
+ for (f, enc) in self.abspaths(fileids, True)
112
+ ]
113
+ )
114
+
115
+ def tagged_sents(self, fileids=None, tagset=None):
116
+ """
117
+ :return: the given file(s) as a list of
118
+ sentences, each encoded as a list of ``(word,tag)`` tuples.
119
+
120
+ :rtype: list(list(tuple(str,str)))
121
+ """
122
+ return concat(
123
+ [
124
+ ChunkedCorpusView(
125
+ f, enc, 1, 1, 0, 0, *self._cv_args, target_tagset=tagset
126
+ )
127
+ for (f, enc) in self.abspaths(fileids, True)
128
+ ]
129
+ )
130
+
131
+ def tagged_paras(self, fileids=None, tagset=None):
132
+ """
133
+ :return: the given file(s) as a list of
134
+ paragraphs, each encoded as a list of sentences, which are
135
+ in turn encoded as lists of ``(word,tag)`` tuples.
136
+ :rtype: list(list(list(tuple(str,str))))
137
+ """
138
+ return concat(
139
+ [
140
+ ChunkedCorpusView(
141
+ f, enc, 1, 1, 1, 0, *self._cv_args, target_tagset=tagset
142
+ )
143
+ for (f, enc) in self.abspaths(fileids, True)
144
+ ]
145
+ )
146
+
147
+ def chunked_words(self, fileids=None, tagset=None):
148
+ """
149
+ :return: the given file(s) as a list of tagged
150
+ words and chunks. Words are encoded as ``(word, tag)``
151
+ tuples (if the corpus has tags) or word strings (if the
152
+ corpus has no tags). Chunks are encoded as depth-one
153
+ trees over ``(word,tag)`` tuples or word strings.
154
+ :rtype: list(tuple(str,str) and Tree)
155
+ """
156
+ return concat(
157
+ [
158
+ ChunkedCorpusView(
159
+ f, enc, 1, 0, 0, 1, *self._cv_args, target_tagset=tagset
160
+ )
161
+ for (f, enc) in self.abspaths(fileids, True)
162
+ ]
163
+ )
164
+
165
+ def chunked_sents(self, fileids=None, tagset=None):
166
+ """
167
+ :return: the given file(s) as a list of
168
+ sentences, each encoded as a shallow Tree. The leaves
169
+ of these trees are encoded as ``(word, tag)`` tuples (if
170
+ the corpus has tags) or word strings (if the corpus has no
171
+ tags).
172
+ :rtype: list(Tree)
173
+ """
174
+ return concat(
175
+ [
176
+ ChunkedCorpusView(
177
+ f, enc, 1, 1, 0, 1, *self._cv_args, target_tagset=tagset
178
+ )
179
+ for (f, enc) in self.abspaths(fileids, True)
180
+ ]
181
+ )
182
+
183
+ def chunked_paras(self, fileids=None, tagset=None):
184
+ """
185
+ :return: the given file(s) as a list of
186
+ paragraphs, each encoded as a list of sentences, which are
187
+ in turn encoded as a shallow Tree. The leaves of these
188
+ trees are encoded as ``(word, tag)`` tuples (if the corpus
189
+ has tags) or word strings (if the corpus has no tags).
190
+ :rtype: list(list(Tree))
191
+ """
192
+ return concat(
193
+ [
194
+ ChunkedCorpusView(
195
+ f, enc, 1, 1, 1, 1, *self._cv_args, target_tagset=tagset
196
+ )
197
+ for (f, enc) in self.abspaths(fileids, True)
198
+ ]
199
+ )
200
+
201
+ def _read_block(self, stream):
202
+ return [tagstr2tree(t) for t in read_blankline_block(stream)]
203
+
204
+
205
+ class ChunkedCorpusView(StreamBackedCorpusView):
206
+ def __init__(
207
+ self,
208
+ fileid,
209
+ encoding,
210
+ tagged,
211
+ group_by_sent,
212
+ group_by_para,
213
+ chunked,
214
+ str2chunktree,
215
+ sent_tokenizer,
216
+ para_block_reader,
217
+ source_tagset=None,
218
+ target_tagset=None,
219
+ ):
220
+ StreamBackedCorpusView.__init__(self, fileid, encoding=encoding)
221
+ self._tagged = tagged
222
+ self._group_by_sent = group_by_sent
223
+ self._group_by_para = group_by_para
224
+ self._chunked = chunked
225
+ self._str2chunktree = str2chunktree
226
+ self._sent_tokenizer = sent_tokenizer
227
+ self._para_block_reader = para_block_reader
228
+ self._source_tagset = source_tagset
229
+ self._target_tagset = target_tagset
230
+
231
+ def read_block(self, stream):
232
+ block = []
233
+ for para_str in self._para_block_reader(stream):
234
+ para = []
235
+ for sent_str in self._sent_tokenizer.tokenize(para_str):
236
+ sent = self._str2chunktree(
237
+ sent_str,
238
+ source_tagset=self._source_tagset,
239
+ target_tagset=self._target_tagset,
240
+ )
241
+
242
+ # If requested, throw away the tags.
243
+ if not self._tagged:
244
+ sent = self._untag(sent)
245
+
246
+ # If requested, throw away the chunks.
247
+ if not self._chunked:
248
+ sent = sent.leaves()
249
+
250
+ # Add the sentence to `para`.
251
+ if self._group_by_sent:
252
+ para.append(sent)
253
+ else:
254
+ para.extend(sent)
255
+
256
+ # Add the paragraph to `block`.
257
+ if self._group_by_para:
258
+ block.append(para)
259
+ else:
260
+ block.extend(para)
261
+
262
+ # Return the block
263
+ return block
264
+
265
+ def _untag(self, tree):
266
+ for i, child in enumerate(tree):
267
+ if isinstance(child, Tree):
268
+ self._untag(child)
269
+ elif isinstance(child, tuple):
270
+ tree[i] = child[0]
271
+ else:
272
+ raise ValueError("expected child to be Tree or tuple")
273
+ return tree
venv/lib/python3.10/site-packages/nltk/corpus/reader/cmudict.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Carnegie Mellon Pronouncing Dictionary Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ The Carnegie Mellon Pronouncing Dictionary [cmudict.0.6]
10
+ ftp://ftp.cs.cmu.edu/project/speech/dict/
11
+ Copyright 1998 Carnegie Mellon University
12
+
13
+ File Format: Each line consists of an uppercased word, a counter
14
+ (for alternative pronunciations), and a transcription. Vowels are
15
+ marked for stress (1=primary, 2=secondary, 0=no stress). E.g.:
16
+ NATURAL 1 N AE1 CH ER0 AH0 L
17
+
18
+ The dictionary contains 127069 entries. Of these, 119400 words are assigned
19
+ a unique pronunciation, 6830 words have two pronunciations, and 839 words have
20
+ three or more pronunciations. Many of these are fast-speech variants.
21
+
22
+ Phonemes: There are 39 phonemes, as shown below:
23
+
24
+ Phoneme Example Translation Phoneme Example Translation
25
+ ------- ------- ----------- ------- ------- -----------
26
+ AA odd AA D AE at AE T
27
+ AH hut HH AH T AO ought AO T
28
+ AW cow K AW AY hide HH AY D
29
+ B be B IY CH cheese CH IY Z
30
+ D dee D IY DH thee DH IY
31
+ EH Ed EH D ER hurt HH ER T
32
+ EY ate EY T F fee F IY
33
+ G green G R IY N HH he HH IY
34
+ IH it IH T IY eat IY T
35
+ JH gee JH IY K key K IY
36
+ L lee L IY M me M IY
37
+ N knee N IY NG ping P IH NG
38
+ OW oat OW T OY toy T OY
39
+ P pee P IY R read R IY D
40
+ S sea S IY SH she SH IY
41
+ T tea T IY TH theta TH EY T AH
42
+ UH hood HH UH D UW two T UW
43
+ V vee V IY W we W IY
44
+ Y yield Y IY L D Z zee Z IY
45
+ ZH seizure S IY ZH ER
46
+ """
47
+
48
+ from nltk.corpus.reader.api import *
49
+ from nltk.corpus.reader.util import *
50
+ from nltk.util import Index
51
+
52
+
53
+ class CMUDictCorpusReader(CorpusReader):
54
+ def entries(self):
55
+ """
56
+ :return: the cmudict lexicon as a list of entries
57
+ containing (word, transcriptions) tuples.
58
+ """
59
+ return concat(
60
+ [
61
+ StreamBackedCorpusView(fileid, read_cmudict_block, encoding=enc)
62
+ for fileid, enc in self.abspaths(None, True)
63
+ ]
64
+ )
65
+
66
+ def words(self):
67
+ """
68
+ :return: a list of all words defined in the cmudict lexicon.
69
+ """
70
+ return [word.lower() for (word, _) in self.entries()]
71
+
72
+ def dict(self):
73
+ """
74
+ :return: the cmudict lexicon as a dictionary, whose keys are
75
+ lowercase words and whose values are lists of pronunciations.
76
+ """
77
+ return dict(Index(self.entries()))
78
+
79
+
80
+ def read_cmudict_block(stream):
81
+ entries = []
82
+ while len(entries) < 100: # Read 100 at a time.
83
+ line = stream.readline()
84
+ if line == "":
85
+ return entries # end of file.
86
+ pieces = line.split()
87
+ entries.append((pieces[0].lower(), pieces[2:]))
88
+ return entries
venv/lib/python3.10/site-packages/nltk/corpus/reader/comparative_sents.py ADDED
@@ -0,0 +1,309 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Comparative Sentence Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Pierpaolo Pantone <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ CorpusReader for the Comparative Sentence Dataset.
10
+
11
+ - Comparative Sentence Dataset information -
12
+
13
+ Annotated by: Nitin Jindal and Bing Liu, 2006.
14
+ Department of Computer Sicence
15
+ University of Illinois at Chicago
16
+
17
+ Contact: Nitin Jindal, [email protected]
18
+ Bing Liu, [email protected] (https://www.cs.uic.edu/~liub)
19
+
20
+ Distributed with permission.
21
+
22
+ Related papers:
23
+
24
+ - Nitin Jindal and Bing Liu. "Identifying Comparative Sentences in Text Documents".
25
+ Proceedings of the ACM SIGIR International Conference on Information Retrieval
26
+ (SIGIR-06), 2006.
27
+
28
+ - Nitin Jindal and Bing Liu. "Mining Comprative Sentences and Relations".
29
+ Proceedings of Twenty First National Conference on Artificial Intelligence
30
+ (AAAI-2006), 2006.
31
+
32
+ - Murthy Ganapathibhotla and Bing Liu. "Mining Opinions in Comparative Sentences".
33
+ Proceedings of the 22nd International Conference on Computational Linguistics
34
+ (Coling-2008), Manchester, 18-22 August, 2008.
35
+ """
36
+ import re
37
+
38
+ from nltk.corpus.reader.api import *
39
+ from nltk.tokenize import *
40
+
41
+ # Regular expressions for dataset components
42
+ STARS = re.compile(r"^\*+$")
43
+ COMPARISON = re.compile(r"<cs-[1234]>")
44
+ CLOSE_COMPARISON = re.compile(r"</cs-[1234]>")
45
+ GRAD_COMPARISON = re.compile(r"<cs-[123]>")
46
+ NON_GRAD_COMPARISON = re.compile(r"<cs-4>")
47
+ ENTITIES_FEATS = re.compile(r"(\d)_((?:[\.\w\s/-](?!\d_))+)")
48
+ KEYWORD = re.compile(r"\(([^\(]*)\)$")
49
+
50
+
51
+ class Comparison:
52
+ """
53
+ A Comparison represents a comparative sentence and its constituents.
54
+ """
55
+
56
+ def __init__(
57
+ self,
58
+ text=None,
59
+ comp_type=None,
60
+ entity_1=None,
61
+ entity_2=None,
62
+ feature=None,
63
+ keyword=None,
64
+ ):
65
+ """
66
+ :param text: a string (optionally tokenized) containing a comparison.
67
+ :param comp_type: an integer defining the type of comparison expressed.
68
+ Values can be: 1 (Non-equal gradable), 2 (Equative), 3 (Superlative),
69
+ 4 (Non-gradable).
70
+ :param entity_1: the first entity considered in the comparison relation.
71
+ :param entity_2: the second entity considered in the comparison relation.
72
+ :param feature: the feature considered in the comparison relation.
73
+ :param keyword: the word or phrase which is used for that comparative relation.
74
+ """
75
+ self.text = text
76
+ self.comp_type = comp_type
77
+ self.entity_1 = entity_1
78
+ self.entity_2 = entity_2
79
+ self.feature = feature
80
+ self.keyword = keyword
81
+
82
+ def __repr__(self):
83
+ return (
84
+ 'Comparison(text="{}", comp_type={}, entity_1="{}", entity_2="{}", '
85
+ 'feature="{}", keyword="{}")'
86
+ ).format(
87
+ self.text,
88
+ self.comp_type,
89
+ self.entity_1,
90
+ self.entity_2,
91
+ self.feature,
92
+ self.keyword,
93
+ )
94
+
95
+
96
+ class ComparativeSentencesCorpusReader(CorpusReader):
97
+ """
98
+ Reader for the Comparative Sentence Dataset by Jindal and Liu (2006).
99
+
100
+ >>> from nltk.corpus import comparative_sentences
101
+ >>> comparison = comparative_sentences.comparisons()[0]
102
+ >>> comparison.text # doctest: +NORMALIZE_WHITESPACE
103
+ ['its', 'fast-forward', 'and', 'rewind', 'work', 'much', 'more', 'smoothly',
104
+ 'and', 'consistently', 'than', 'those', 'of', 'other', 'models', 'i', "'ve",
105
+ 'had', '.']
106
+ >>> comparison.entity_2
107
+ 'models'
108
+ >>> (comparison.feature, comparison.keyword)
109
+ ('rewind', 'more')
110
+ >>> len(comparative_sentences.comparisons())
111
+ 853
112
+ """
113
+
114
+ CorpusView = StreamBackedCorpusView
115
+
116
+ def __init__(
117
+ self,
118
+ root,
119
+ fileids,
120
+ word_tokenizer=WhitespaceTokenizer(),
121
+ sent_tokenizer=None,
122
+ encoding="utf8",
123
+ ):
124
+ """
125
+ :param root: The root directory for this corpus.
126
+ :param fileids: a list or regexp specifying the fileids in this corpus.
127
+ :param word_tokenizer: tokenizer for breaking sentences or paragraphs
128
+ into words. Default: `WhitespaceTokenizer`
129
+ :param sent_tokenizer: tokenizer for breaking paragraphs into sentences.
130
+ :param encoding: the encoding that should be used to read the corpus.
131
+ """
132
+
133
+ CorpusReader.__init__(self, root, fileids, encoding)
134
+ self._word_tokenizer = word_tokenizer
135
+ self._sent_tokenizer = sent_tokenizer
136
+ self._readme = "README.txt"
137
+
138
+ def comparisons(self, fileids=None):
139
+ """
140
+ Return all comparisons in the corpus.
141
+
142
+ :param fileids: a list or regexp specifying the ids of the files whose
143
+ comparisons have to be returned.
144
+ :return: the given file(s) as a list of Comparison objects.
145
+ :rtype: list(Comparison)
146
+ """
147
+ if fileids is None:
148
+ fileids = self._fileids
149
+ elif isinstance(fileids, str):
150
+ fileids = [fileids]
151
+ return concat(
152
+ [
153
+ self.CorpusView(path, self._read_comparison_block, encoding=enc)
154
+ for (path, enc, fileid) in self.abspaths(fileids, True, True)
155
+ ]
156
+ )
157
+
158
+ def keywords(self, fileids=None):
159
+ """
160
+ Return a set of all keywords used in the corpus.
161
+
162
+ :param fileids: a list or regexp specifying the ids of the files whose
163
+ keywords have to be returned.
164
+ :return: the set of keywords and comparative phrases used in the corpus.
165
+ :rtype: set(str)
166
+ """
167
+ all_keywords = concat(
168
+ [
169
+ self.CorpusView(path, self._read_keyword_block, encoding=enc)
170
+ for (path, enc, fileid) in self.abspaths(fileids, True, True)
171
+ ]
172
+ )
173
+
174
+ keywords_set = {keyword.lower() for keyword in all_keywords if keyword}
175
+ return keywords_set
176
+
177
+ def keywords_readme(self):
178
+ """
179
+ Return the list of words and constituents considered as clues of a
180
+ comparison (from listOfkeywords.txt).
181
+ """
182
+ keywords = []
183
+ with self.open("listOfkeywords.txt") as fp:
184
+ raw_text = fp.read()
185
+ for line in raw_text.split("\n"):
186
+ if not line or line.startswith("//"):
187
+ continue
188
+ keywords.append(line.strip())
189
+ return keywords
190
+
191
+ def sents(self, fileids=None):
192
+ """
193
+ Return all sentences in the corpus.
194
+
195
+ :param fileids: a list or regexp specifying the ids of the files whose
196
+ sentences have to be returned.
197
+ :return: all sentences of the corpus as lists of tokens (or as plain
198
+ strings, if no word tokenizer is specified).
199
+ :rtype: list(list(str)) or list(str)
200
+ """
201
+ return concat(
202
+ [
203
+ self.CorpusView(path, self._read_sent_block, encoding=enc)
204
+ for (path, enc, fileid) in self.abspaths(fileids, True, True)
205
+ ]
206
+ )
207
+
208
+ def words(self, fileids=None):
209
+ """
210
+ Return all words and punctuation symbols in the corpus.
211
+
212
+ :param fileids: a list or regexp specifying the ids of the files whose
213
+ words have to be returned.
214
+ :return: the given file(s) as a list of words and punctuation symbols.
215
+ :rtype: list(str)
216
+ """
217
+ return concat(
218
+ [
219
+ self.CorpusView(path, self._read_word_block, encoding=enc)
220
+ for (path, enc, fileid) in self.abspaths(fileids, True, True)
221
+ ]
222
+ )
223
+
224
+ def _read_comparison_block(self, stream):
225
+ while True:
226
+ line = stream.readline()
227
+ if not line:
228
+ return [] # end of file.
229
+ comparison_tags = re.findall(COMPARISON, line)
230
+ if comparison_tags:
231
+ grad_comparisons = re.findall(GRAD_COMPARISON, line)
232
+ non_grad_comparisons = re.findall(NON_GRAD_COMPARISON, line)
233
+ # Advance to the next line (it contains the comparative sentence)
234
+ comparison_text = stream.readline().strip()
235
+ if self._word_tokenizer:
236
+ comparison_text = self._word_tokenizer.tokenize(comparison_text)
237
+ # Skip the next line (it contains closing comparison tags)
238
+ stream.readline()
239
+ # If gradable comparisons are found, create Comparison instances
240
+ # and populate their fields
241
+ comparison_bundle = []
242
+ if grad_comparisons:
243
+ # Each comparison tag has its own relations on a separate line
244
+ for comp in grad_comparisons:
245
+ comp_type = int(re.match(r"<cs-(\d)>", comp).group(1))
246
+ comparison = Comparison(
247
+ text=comparison_text, comp_type=comp_type
248
+ )
249
+ line = stream.readline()
250
+ entities_feats = ENTITIES_FEATS.findall(line)
251
+ if entities_feats:
252
+ for (code, entity_feat) in entities_feats:
253
+ if code == "1":
254
+ comparison.entity_1 = entity_feat.strip()
255
+ elif code == "2":
256
+ comparison.entity_2 = entity_feat.strip()
257
+ elif code == "3":
258
+ comparison.feature = entity_feat.strip()
259
+ keyword = KEYWORD.findall(line)
260
+ if keyword:
261
+ comparison.keyword = keyword[0]
262
+ comparison_bundle.append(comparison)
263
+ # If non-gradable comparisons are found, create a simple Comparison
264
+ # instance for each one
265
+ if non_grad_comparisons:
266
+ for comp in non_grad_comparisons:
267
+ # comp_type in this case should always be 4.
268
+ comp_type = int(re.match(r"<cs-(\d)>", comp).group(1))
269
+ comparison = Comparison(
270
+ text=comparison_text, comp_type=comp_type
271
+ )
272
+ comparison_bundle.append(comparison)
273
+ # Flatten the list of comparisons before returning them
274
+ # return concat([comparison_bundle])
275
+ return comparison_bundle
276
+
277
+ def _read_keyword_block(self, stream):
278
+ keywords = []
279
+ for comparison in self._read_comparison_block(stream):
280
+ keywords.append(comparison.keyword)
281
+ return keywords
282
+
283
+ def _read_sent_block(self, stream):
284
+ while True:
285
+ line = stream.readline()
286
+ if re.match(STARS, line):
287
+ while True:
288
+ line = stream.readline()
289
+ if re.match(STARS, line):
290
+ break
291
+ continue
292
+ if (
293
+ not re.findall(COMPARISON, line)
294
+ and not ENTITIES_FEATS.findall(line)
295
+ and not re.findall(CLOSE_COMPARISON, line)
296
+ ):
297
+ if self._sent_tokenizer:
298
+ return [
299
+ self._word_tokenizer.tokenize(sent)
300
+ for sent in self._sent_tokenizer.tokenize(line)
301
+ ]
302
+ else:
303
+ return [self._word_tokenizer.tokenize(line)]
304
+
305
+ def _read_word_block(self, stream):
306
+ words = []
307
+ for sent in self._read_sent_block(stream):
308
+ words.extend(sent)
309
+ return words
venv/lib/python3.10/site-packages/nltk/corpus/reader/dependency.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Dependency Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Kepa Sarasola <[email protected]>
5
+ # Iker Manterola <[email protected]>
6
+ #
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+ from nltk.corpus.reader.api import *
11
+ from nltk.corpus.reader.util import *
12
+ from nltk.parse import DependencyGraph
13
+ from nltk.tokenize import *
14
+
15
+
16
+ class DependencyCorpusReader(SyntaxCorpusReader):
17
+ def __init__(
18
+ self,
19
+ root,
20
+ fileids,
21
+ encoding="utf8",
22
+ word_tokenizer=TabTokenizer(),
23
+ sent_tokenizer=RegexpTokenizer("\n", gaps=True),
24
+ para_block_reader=read_blankline_block,
25
+ ):
26
+ SyntaxCorpusReader.__init__(self, root, fileids, encoding)
27
+
28
+ #########################################################
29
+
30
+ def words(self, fileids=None):
31
+ return concat(
32
+ [
33
+ DependencyCorpusView(fileid, False, False, False, encoding=enc)
34
+ for fileid, enc in self.abspaths(fileids, include_encoding=True)
35
+ ]
36
+ )
37
+
38
+ def tagged_words(self, fileids=None):
39
+ return concat(
40
+ [
41
+ DependencyCorpusView(fileid, True, False, False, encoding=enc)
42
+ for fileid, enc in self.abspaths(fileids, include_encoding=True)
43
+ ]
44
+ )
45
+
46
+ def sents(self, fileids=None):
47
+ return concat(
48
+ [
49
+ DependencyCorpusView(fileid, False, True, False, encoding=enc)
50
+ for fileid, enc in self.abspaths(fileids, include_encoding=True)
51
+ ]
52
+ )
53
+
54
+ def tagged_sents(self, fileids=None):
55
+ return concat(
56
+ [
57
+ DependencyCorpusView(fileid, True, True, False, encoding=enc)
58
+ for fileid, enc in self.abspaths(fileids, include_encoding=True)
59
+ ]
60
+ )
61
+
62
+ def parsed_sents(self, fileids=None):
63
+ sents = concat(
64
+ [
65
+ DependencyCorpusView(fileid, False, True, True, encoding=enc)
66
+ for fileid, enc in self.abspaths(fileids, include_encoding=True)
67
+ ]
68
+ )
69
+ return [DependencyGraph(sent) for sent in sents]
70
+
71
+
72
+ class DependencyCorpusView(StreamBackedCorpusView):
73
+ _DOCSTART = "-DOCSTART- -DOCSTART- O\n" # dokumentu hasiera definitzen da
74
+
75
+ def __init__(
76
+ self,
77
+ corpus_file,
78
+ tagged,
79
+ group_by_sent,
80
+ dependencies,
81
+ chunk_types=None,
82
+ encoding="utf8",
83
+ ):
84
+ self._tagged = tagged
85
+ self._dependencies = dependencies
86
+ self._group_by_sent = group_by_sent
87
+ self._chunk_types = chunk_types
88
+ StreamBackedCorpusView.__init__(self, corpus_file, encoding=encoding)
89
+
90
+ def read_block(self, stream):
91
+ # Read the next sentence.
92
+ sent = read_blankline_block(stream)[0].strip()
93
+ # Strip off the docstart marker, if present.
94
+ if sent.startswith(self._DOCSTART):
95
+ sent = sent[len(self._DOCSTART) :].lstrip()
96
+
97
+ # extract word and tag from any of the formats
98
+ if not self._dependencies:
99
+ lines = [line.split("\t") for line in sent.split("\n")]
100
+ if len(lines[0]) == 3 or len(lines[0]) == 4:
101
+ sent = [(line[0], line[1]) for line in lines]
102
+ elif len(lines[0]) == 10:
103
+ sent = [(line[1], line[4]) for line in lines]
104
+ else:
105
+ raise ValueError("Unexpected number of fields in dependency tree file")
106
+
107
+ # discard tags if they weren't requested
108
+ if not self._tagged:
109
+ sent = [word for (word, tag) in sent]
110
+
111
+ # Return the result.
112
+ if self._group_by_sent:
113
+ return [sent]
114
+ else:
115
+ return list(sent)
venv/lib/python3.10/site-packages/nltk/corpus/reader/indian.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Indian Language POS-Tagged Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ Indian Language POS-Tagged Corpus
11
+ Collected by A Kumaran, Microsoft Research, India
12
+ Distributed with permission
13
+
14
+ Contents:
15
+ - Bangla: IIT Kharagpur
16
+ - Hindi: Microsoft Research India
17
+ - Marathi: IIT Bombay
18
+ - Telugu: IIIT Hyderabad
19
+ """
20
+
21
+ from nltk.corpus.reader.api import *
22
+ from nltk.corpus.reader.util import *
23
+ from nltk.tag import map_tag, str2tuple
24
+
25
+
26
+ class IndianCorpusReader(CorpusReader):
27
+ """
28
+ List of words, one per line. Blank lines are ignored.
29
+ """
30
+
31
+ def words(self, fileids=None):
32
+ return concat(
33
+ [
34
+ IndianCorpusView(fileid, enc, False, False)
35
+ for (fileid, enc) in self.abspaths(fileids, True)
36
+ ]
37
+ )
38
+
39
+ def tagged_words(self, fileids=None, tagset=None):
40
+ if tagset and tagset != self._tagset:
41
+ tag_mapping_function = lambda t: map_tag(self._tagset, tagset, t)
42
+ else:
43
+ tag_mapping_function = None
44
+ return concat(
45
+ [
46
+ IndianCorpusView(fileid, enc, True, False, tag_mapping_function)
47
+ for (fileid, enc) in self.abspaths(fileids, True)
48
+ ]
49
+ )
50
+
51
+ def sents(self, fileids=None):
52
+ return concat(
53
+ [
54
+ IndianCorpusView(fileid, enc, False, True)
55
+ for (fileid, enc) in self.abspaths(fileids, True)
56
+ ]
57
+ )
58
+
59
+ def tagged_sents(self, fileids=None, tagset=None):
60
+ if tagset and tagset != self._tagset:
61
+ tag_mapping_function = lambda t: map_tag(self._tagset, tagset, t)
62
+ else:
63
+ tag_mapping_function = None
64
+ return concat(
65
+ [
66
+ IndianCorpusView(fileid, enc, True, True, tag_mapping_function)
67
+ for (fileid, enc) in self.abspaths(fileids, True)
68
+ ]
69
+ )
70
+
71
+
72
+ class IndianCorpusView(StreamBackedCorpusView):
73
+ def __init__(
74
+ self, corpus_file, encoding, tagged, group_by_sent, tag_mapping_function=None
75
+ ):
76
+ self._tagged = tagged
77
+ self._group_by_sent = group_by_sent
78
+ self._tag_mapping_function = tag_mapping_function
79
+ StreamBackedCorpusView.__init__(self, corpus_file, encoding=encoding)
80
+
81
+ def read_block(self, stream):
82
+ line = stream.readline()
83
+ if line.startswith("<"):
84
+ return []
85
+ sent = [str2tuple(word, sep="_") for word in line.split()]
86
+ if self._tag_mapping_function:
87
+ sent = [(w, self._tag_mapping_function(t)) for (w, t) in sent]
88
+ if not self._tagged:
89
+ sent = [w for (w, t) in sent]
90
+ if self._group_by_sent:
91
+ return [sent]
92
+ else:
93
+ return sent
venv/lib/python3.10/site-packages/nltk/corpus/reader/knbc.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #! /usr/bin/env python
2
+ # KNB Corpus reader
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Masato Hagiwara <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ # For more information, see http://lilyx.net/pages/nltkjapanesecorpus.html
9
+
10
+ import re
11
+
12
+ from nltk.corpus.reader.api import CorpusReader, SyntaxCorpusReader
13
+ from nltk.corpus.reader.util import (
14
+ FileSystemPathPointer,
15
+ find_corpus_fileids,
16
+ read_blankline_block,
17
+ )
18
+ from nltk.parse import DependencyGraph
19
+
20
+ # default function to convert morphlist to str for tree representation
21
+ _morphs2str_default = lambda morphs: "/".join(m[0] for m in morphs if m[0] != "EOS")
22
+
23
+
24
+ class KNBCorpusReader(SyntaxCorpusReader):
25
+ """
26
+ This class implements:
27
+ - ``__init__``, which specifies the location of the corpus
28
+ and a method for detecting the sentence blocks in corpus files.
29
+ - ``_read_block``, which reads a block from the input stream.
30
+ - ``_word``, which takes a block and returns a list of list of words.
31
+ - ``_tag``, which takes a block and returns a list of list of tagged
32
+ words.
33
+ - ``_parse``, which takes a block and returns a list of parsed
34
+ sentences.
35
+
36
+ The structure of tagged words:
37
+ tagged_word = (word(str), tags(tuple))
38
+ tags = (surface, reading, lemma, pos1, posid1, pos2, posid2, pos3, posid3, others ...)
39
+
40
+ Usage example
41
+
42
+ >>> from nltk.corpus.util import LazyCorpusLoader
43
+ >>> knbc = LazyCorpusLoader(
44
+ ... 'knbc/corpus1',
45
+ ... KNBCorpusReader,
46
+ ... r'.*/KN.*',
47
+ ... encoding='euc-jp',
48
+ ... )
49
+
50
+ >>> len(knbc.sents()[0])
51
+ 9
52
+
53
+ """
54
+
55
+ def __init__(self, root, fileids, encoding="utf8", morphs2str=_morphs2str_default):
56
+ """
57
+ Initialize KNBCorpusReader
58
+ morphs2str is a function to convert morphlist to str for tree representation
59
+ for _parse()
60
+ """
61
+ SyntaxCorpusReader.__init__(self, root, fileids, encoding)
62
+ self.morphs2str = morphs2str
63
+
64
+ def _read_block(self, stream):
65
+ # blocks are split by blankline (or EOF) - default
66
+ return read_blankline_block(stream)
67
+
68
+ def _word(self, t):
69
+ res = []
70
+ for line in t.splitlines():
71
+ # ignore the Bunsets headers
72
+ if not re.match(r"EOS|\*|\#|\+", line):
73
+ cells = line.strip().split(" ")
74
+ res.append(cells[0])
75
+
76
+ return res
77
+
78
+ # ignores tagset argument
79
+ def _tag(self, t, tagset=None):
80
+ res = []
81
+ for line in t.splitlines():
82
+ # ignore the Bunsets headers
83
+ if not re.match(r"EOS|\*|\#|\+", line):
84
+ cells = line.strip().split(" ")
85
+ # convert cells to morph tuples
86
+ res.append((cells[0], " ".join(cells[1:])))
87
+
88
+ return res
89
+
90
+ def _parse(self, t):
91
+ dg = DependencyGraph()
92
+ i = 0
93
+ for line in t.splitlines():
94
+ if line[0] in "*+":
95
+ # start of bunsetsu or tag
96
+
97
+ cells = line.strip().split(" ", 3)
98
+ m = re.match(r"([\-0-9]*)([ADIP])", cells[1])
99
+
100
+ assert m is not None
101
+
102
+ node = dg.nodes[i]
103
+ node.update({"address": i, "rel": m.group(2), "word": []})
104
+
105
+ dep_parent = int(m.group(1))
106
+
107
+ if dep_parent == -1:
108
+ dg.root = node
109
+ else:
110
+ dg.nodes[dep_parent]["deps"].append(i)
111
+
112
+ i += 1
113
+ elif line[0] != "#":
114
+ # normal morph
115
+ cells = line.strip().split(" ")
116
+ # convert cells to morph tuples
117
+ morph = cells[0], " ".join(cells[1:])
118
+ dg.nodes[i - 1]["word"].append(morph)
119
+
120
+ if self.morphs2str:
121
+ for node in dg.nodes.values():
122
+ node["word"] = self.morphs2str(node["word"])
123
+
124
+ return dg.tree()
125
+
126
+
127
+ ######################################################################
128
+ # Demo
129
+ ######################################################################
130
+
131
+
132
+ def demo():
133
+
134
+ import nltk
135
+ from nltk.corpus.util import LazyCorpusLoader
136
+
137
+ root = nltk.data.find("corpora/knbc/corpus1")
138
+ fileids = [
139
+ f
140
+ for f in find_corpus_fileids(FileSystemPathPointer(root), ".*")
141
+ if re.search(r"\d\-\d\-[\d]+\-[\d]+", f)
142
+ ]
143
+
144
+ def _knbc_fileids_sort(x):
145
+ cells = x.split("-")
146
+ return (cells[0], int(cells[1]), int(cells[2]), int(cells[3]))
147
+
148
+ knbc = LazyCorpusLoader(
149
+ "knbc/corpus1",
150
+ KNBCorpusReader,
151
+ sorted(fileids, key=_knbc_fileids_sort),
152
+ encoding="euc-jp",
153
+ )
154
+
155
+ print(knbc.fileids()[:10])
156
+ print("".join(knbc.words()[:100]))
157
+
158
+ print("\n\n".join(str(tree) for tree in knbc.parsed_sents()[:2]))
159
+
160
+ knbc.morphs2str = lambda morphs: "/".join(
161
+ "{}({})".format(m[0], m[1].split(" ")[2]) for m in morphs if m[0] != "EOS"
162
+ ).encode("utf-8")
163
+
164
+ print("\n\n".join("%s" % tree for tree in knbc.parsed_sents()[:2]))
165
+
166
+ print(
167
+ "\n".join(
168
+ " ".join("{}/{}".format(w[0], w[1].split(" ")[2]) for w in sent)
169
+ for sent in knbc.tagged_sents()[0:2]
170
+ )
171
+ )
172
+
173
+
174
+ def test():
175
+
176
+ from nltk.corpus.util import LazyCorpusLoader
177
+
178
+ knbc = LazyCorpusLoader(
179
+ "knbc/corpus1", KNBCorpusReader, r".*/KN.*", encoding="euc-jp"
180
+ )
181
+ assert isinstance(knbc.words()[0], str)
182
+ assert isinstance(knbc.sents()[0][0], str)
183
+ assert isinstance(knbc.tagged_words()[0], tuple)
184
+ assert isinstance(knbc.tagged_sents()[0][0], tuple)
185
+
186
+
187
+ if __name__ == "__main__":
188
+ demo()
venv/lib/python3.10/site-packages/nltk/corpus/reader/lin.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Lin's Thesaurus
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Dan Blanchard <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.txt
7
+
8
+ import re
9
+ from collections import defaultdict
10
+ from functools import reduce
11
+
12
+ from nltk.corpus.reader import CorpusReader
13
+
14
+
15
+ class LinThesaurusCorpusReader(CorpusReader):
16
+ """Wrapper for the LISP-formatted thesauruses distributed by Dekang Lin."""
17
+
18
+ # Compiled regular expression for extracting the key from the first line of each
19
+ # thesaurus entry
20
+ _key_re = re.compile(r'\("?([^"]+)"? \(desc [0-9.]+\).+')
21
+
22
+ @staticmethod
23
+ def __defaultdict_factory():
24
+ """Factory for creating defaultdict of defaultdict(dict)s"""
25
+ return defaultdict(dict)
26
+
27
+ def __init__(self, root, badscore=0.0):
28
+ """
29
+ Initialize the thesaurus.
30
+
31
+ :param root: root directory containing thesaurus LISP files
32
+ :type root: C{string}
33
+ :param badscore: the score to give to words which do not appear in each other's sets of synonyms
34
+ :type badscore: C{float}
35
+ """
36
+
37
+ super().__init__(root, r"sim[A-Z]\.lsp")
38
+ self._thesaurus = defaultdict(LinThesaurusCorpusReader.__defaultdict_factory)
39
+ self._badscore = badscore
40
+ for path, encoding, fileid in self.abspaths(
41
+ include_encoding=True, include_fileid=True
42
+ ):
43
+ with open(path) as lin_file:
44
+ first = True
45
+ for line in lin_file:
46
+ line = line.strip()
47
+ # Start of entry
48
+ if first:
49
+ key = LinThesaurusCorpusReader._key_re.sub(r"\1", line)
50
+ first = False
51
+ # End of entry
52
+ elif line == "))":
53
+ first = True
54
+ # Lines with pairs of ngrams and scores
55
+ else:
56
+ split_line = line.split("\t")
57
+ if len(split_line) == 2:
58
+ ngram, score = split_line
59
+ self._thesaurus[fileid][key][ngram.strip('"')] = float(
60
+ score
61
+ )
62
+
63
+ def similarity(self, ngram1, ngram2, fileid=None):
64
+ """
65
+ Returns the similarity score for two ngrams.
66
+
67
+ :param ngram1: first ngram to compare
68
+ :type ngram1: C{string}
69
+ :param ngram2: second ngram to compare
70
+ :type ngram2: C{string}
71
+ :param fileid: thesaurus fileid to search in. If None, search all fileids.
72
+ :type fileid: C{string}
73
+ :return: If fileid is specified, just the score for the two ngrams; otherwise,
74
+ list of tuples of fileids and scores.
75
+ """
76
+ # Entries don't contain themselves, so make sure similarity between item and itself is 1.0
77
+ if ngram1 == ngram2:
78
+ if fileid:
79
+ return 1.0
80
+ else:
81
+ return [(fid, 1.0) for fid in self._fileids]
82
+ else:
83
+ if fileid:
84
+ return (
85
+ self._thesaurus[fileid][ngram1][ngram2]
86
+ if ngram2 in self._thesaurus[fileid][ngram1]
87
+ else self._badscore
88
+ )
89
+ else:
90
+ return [
91
+ (
92
+ fid,
93
+ (
94
+ self._thesaurus[fid][ngram1][ngram2]
95
+ if ngram2 in self._thesaurus[fid][ngram1]
96
+ else self._badscore
97
+ ),
98
+ )
99
+ for fid in self._fileids
100
+ ]
101
+
102
+ def scored_synonyms(self, ngram, fileid=None):
103
+ """
104
+ Returns a list of scored synonyms (tuples of synonyms and scores) for the current ngram
105
+
106
+ :param ngram: ngram to lookup
107
+ :type ngram: C{string}
108
+ :param fileid: thesaurus fileid to search in. If None, search all fileids.
109
+ :type fileid: C{string}
110
+ :return: If fileid is specified, list of tuples of scores and synonyms; otherwise,
111
+ list of tuples of fileids and lists, where inner lists consist of tuples of
112
+ scores and synonyms.
113
+ """
114
+ if fileid:
115
+ return self._thesaurus[fileid][ngram].items()
116
+ else:
117
+ return [
118
+ (fileid, self._thesaurus[fileid][ngram].items())
119
+ for fileid in self._fileids
120
+ ]
121
+
122
+ def synonyms(self, ngram, fileid=None):
123
+ """
124
+ Returns a list of synonyms for the current ngram.
125
+
126
+ :param ngram: ngram to lookup
127
+ :type ngram: C{string}
128
+ :param fileid: thesaurus fileid to search in. If None, search all fileids.
129
+ :type fileid: C{string}
130
+ :return: If fileid is specified, list of synonyms; otherwise, list of tuples of fileids and
131
+ lists, where inner lists contain synonyms.
132
+ """
133
+ if fileid:
134
+ return self._thesaurus[fileid][ngram].keys()
135
+ else:
136
+ return [
137
+ (fileid, self._thesaurus[fileid][ngram].keys())
138
+ for fileid in self._fileids
139
+ ]
140
+
141
+ def __contains__(self, ngram):
142
+ """
143
+ Determines whether or not the given ngram is in the thesaurus.
144
+
145
+ :param ngram: ngram to lookup
146
+ :type ngram: C{string}
147
+ :return: whether the given ngram is in the thesaurus.
148
+ """
149
+ return reduce(
150
+ lambda accum, fileid: accum or (ngram in self._thesaurus[fileid]),
151
+ self._fileids,
152
+ False,
153
+ )
154
+
155
+
156
+ ######################################################################
157
+ # Demo
158
+ ######################################################################
159
+
160
+
161
+ def demo():
162
+ from nltk.corpus import lin_thesaurus as thes
163
+
164
+ word1 = "business"
165
+ word2 = "enterprise"
166
+ print("Getting synonyms for " + word1)
167
+ print(thes.synonyms(word1))
168
+
169
+ print("Getting scored synonyms for " + word1)
170
+ print(thes.scored_synonyms(word1))
171
+
172
+ print("Getting synonyms from simN.lsp (noun subsection) for " + word1)
173
+ print(thes.synonyms(word1, fileid="simN.lsp"))
174
+
175
+ print("Getting synonyms from simN.lsp (noun subsection) for " + word1)
176
+ print(thes.synonyms(word1, fileid="simN.lsp"))
177
+
178
+ print(f"Similarity score for {word1} and {word2}:")
179
+ print(thes.similarity(word1, word2))
180
+
181
+
182
+ if __name__ == "__main__":
183
+ demo()
venv/lib/python3.10/site-packages/nltk/corpus/reader/nps_chat.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: NPS Chat Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ import re
9
+ import textwrap
10
+
11
+ from nltk.corpus.reader.api import *
12
+ from nltk.corpus.reader.util import *
13
+ from nltk.corpus.reader.xmldocs import *
14
+ from nltk.internals import ElementWrapper
15
+ from nltk.tag import map_tag
16
+ from nltk.util import LazyConcatenation
17
+
18
+
19
+ class NPSChatCorpusReader(XMLCorpusReader):
20
+ def __init__(self, root, fileids, wrap_etree=False, tagset=None):
21
+ XMLCorpusReader.__init__(self, root, fileids, wrap_etree)
22
+ self._tagset = tagset
23
+
24
+ def xml_posts(self, fileids=None):
25
+ if self._wrap_etree:
26
+ return concat(
27
+ [
28
+ XMLCorpusView(fileid, "Session/Posts/Post", self._wrap_elt)
29
+ for fileid in self.abspaths(fileids)
30
+ ]
31
+ )
32
+ else:
33
+ return concat(
34
+ [
35
+ XMLCorpusView(fileid, "Session/Posts/Post")
36
+ for fileid in self.abspaths(fileids)
37
+ ]
38
+ )
39
+
40
+ def posts(self, fileids=None):
41
+ return concat(
42
+ [
43
+ XMLCorpusView(
44
+ fileid, "Session/Posts/Post/terminals", self._elt_to_words
45
+ )
46
+ for fileid in self.abspaths(fileids)
47
+ ]
48
+ )
49
+
50
+ def tagged_posts(self, fileids=None, tagset=None):
51
+ def reader(elt, handler):
52
+ return self._elt_to_tagged_words(elt, handler, tagset)
53
+
54
+ return concat(
55
+ [
56
+ XMLCorpusView(fileid, "Session/Posts/Post/terminals", reader)
57
+ for fileid in self.abspaths(fileids)
58
+ ]
59
+ )
60
+
61
+ def words(self, fileids=None):
62
+ return LazyConcatenation(self.posts(fileids))
63
+
64
+ def tagged_words(self, fileids=None, tagset=None):
65
+ return LazyConcatenation(self.tagged_posts(fileids, tagset))
66
+
67
+ def _wrap_elt(self, elt, handler):
68
+ return ElementWrapper(elt)
69
+
70
+ def _elt_to_words(self, elt, handler):
71
+ return [self._simplify_username(t.attrib["word"]) for t in elt.findall("t")]
72
+
73
+ def _elt_to_tagged_words(self, elt, handler, tagset=None):
74
+ tagged_post = [
75
+ (self._simplify_username(t.attrib["word"]), t.attrib["pos"])
76
+ for t in elt.findall("t")
77
+ ]
78
+ if tagset and tagset != self._tagset:
79
+ tagged_post = [
80
+ (w, map_tag(self._tagset, tagset, t)) for (w, t) in tagged_post
81
+ ]
82
+ return tagged_post
83
+
84
+ @staticmethod
85
+ def _simplify_username(word):
86
+ if "User" in word:
87
+ word = "U" + word.split("User", 1)[1]
88
+ elif isinstance(word, bytes):
89
+ word = word.decode("ascii")
90
+ return word
venv/lib/python3.10/site-packages/nltk/corpus/reader/rte.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: RTE Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Ewan Klein <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ Corpus reader for the Recognizing Textual Entailment (RTE) Challenge Corpora.
10
+
11
+ The files were taken from the RTE1, RTE2 and RTE3 datasets and the files
12
+ were regularized.
13
+
14
+ Filenames are of the form rte*_dev.xml and rte*_test.xml. The latter are the
15
+ gold standard annotated files.
16
+
17
+ Each entailment corpus is a list of 'text'/'hypothesis' pairs. The following
18
+ example is taken from RTE3::
19
+
20
+ <pair id="1" entailment="YES" task="IE" length="short" >
21
+
22
+ <t>The sale was made to pay Yukos' US$ 27.5 billion tax bill,
23
+ Yuganskneftegaz was originally sold for US$ 9.4 billion to a little known
24
+ company Baikalfinansgroup which was later bought by the Russian
25
+ state-owned oil company Rosneft .</t>
26
+
27
+ <h>Baikalfinansgroup was sold to Rosneft.</h>
28
+ </pair>
29
+
30
+ In order to provide globally unique IDs for each pair, a new attribute
31
+ ``challenge`` has been added to the root element ``entailment-corpus`` of each
32
+ file, taking values 1, 2 or 3. The GID is formatted 'm-n', where 'm' is the
33
+ challenge number and 'n' is the pair ID.
34
+ """
35
+ from nltk.corpus.reader.api import *
36
+ from nltk.corpus.reader.util import *
37
+ from nltk.corpus.reader.xmldocs import *
38
+
39
+
40
+ def norm(value_string):
41
+ """
42
+ Normalize the string value in an RTE pair's ``value`` or ``entailment``
43
+ attribute as an integer (1, 0).
44
+
45
+ :param value_string: the label used to classify a text/hypothesis pair
46
+ :type value_string: str
47
+ :rtype: int
48
+ """
49
+
50
+ valdict = {"TRUE": 1, "FALSE": 0, "YES": 1, "NO": 0}
51
+ return valdict[value_string.upper()]
52
+
53
+
54
+ class RTEPair:
55
+ """
56
+ Container for RTE text-hypothesis pairs.
57
+
58
+ The entailment relation is signalled by the ``value`` attribute in RTE1, and by
59
+ ``entailment`` in RTE2 and RTE3. These both get mapped on to the ``entailment``
60
+ attribute of this class.
61
+ """
62
+
63
+ def __init__(
64
+ self,
65
+ pair,
66
+ challenge=None,
67
+ id=None,
68
+ text=None,
69
+ hyp=None,
70
+ value=None,
71
+ task=None,
72
+ length=None,
73
+ ):
74
+ """
75
+ :param challenge: version of the RTE challenge (i.e., RTE1, RTE2 or RTE3)
76
+ :param id: identifier for the pair
77
+ :param text: the text component of the pair
78
+ :param hyp: the hypothesis component of the pair
79
+ :param value: classification label for the pair
80
+ :param task: attribute for the particular NLP task that the data was drawn from
81
+ :param length: attribute for the length of the text of the pair
82
+ """
83
+ self.challenge = challenge
84
+ self.id = pair.attrib["id"]
85
+ self.gid = f"{self.challenge}-{self.id}"
86
+ self.text = pair[0].text
87
+ self.hyp = pair[1].text
88
+
89
+ if "value" in pair.attrib:
90
+ self.value = norm(pair.attrib["value"])
91
+ elif "entailment" in pair.attrib:
92
+ self.value = norm(pair.attrib["entailment"])
93
+ else:
94
+ self.value = value
95
+ if "task" in pair.attrib:
96
+ self.task = pair.attrib["task"]
97
+ else:
98
+ self.task = task
99
+ if "length" in pair.attrib:
100
+ self.length = pair.attrib["length"]
101
+ else:
102
+ self.length = length
103
+
104
+ def __repr__(self):
105
+ if self.challenge:
106
+ return f"<RTEPair: gid={self.challenge}-{self.id}>"
107
+ else:
108
+ return "<RTEPair: id=%s>" % self.id
109
+
110
+
111
+ class RTECorpusReader(XMLCorpusReader):
112
+ """
113
+ Corpus reader for corpora in RTE challenges.
114
+
115
+ This is just a wrapper around the XMLCorpusReader. See module docstring above for the expected
116
+ structure of input documents.
117
+ """
118
+
119
+ def _read_etree(self, doc):
120
+ """
121
+ Map the XML input into an RTEPair.
122
+
123
+ This uses the ``getiterator()`` method from the ElementTree package to
124
+ find all the ``<pair>`` elements.
125
+
126
+ :param doc: a parsed XML document
127
+ :rtype: list(RTEPair)
128
+ """
129
+ try:
130
+ challenge = doc.attrib["challenge"]
131
+ except KeyError:
132
+ challenge = None
133
+ pairiter = doc.iter("pair")
134
+ return [RTEPair(pair, challenge=challenge) for pair in pairiter]
135
+
136
+ def pairs(self, fileids):
137
+ """
138
+ Build a list of RTEPairs from a RTE corpus.
139
+
140
+ :param fileids: a list of RTE corpus fileids
141
+ :type: list
142
+ :rtype: list(RTEPair)
143
+ """
144
+ if isinstance(fileids, str):
145
+ fileids = [fileids]
146
+ return concat([self._read_etree(self.xml(fileid)) for fileid in fileids])
venv/lib/python3.10/site-packages/nltk/corpus/reader/semcor.py ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: SemCor Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Nathan Schneider <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ Corpus reader for the SemCor Corpus.
10
+ """
11
+
12
+ __docformat__ = "epytext en"
13
+
14
+ from nltk.corpus.reader.api import *
15
+ from nltk.corpus.reader.xmldocs import XMLCorpusReader, XMLCorpusView
16
+ from nltk.tree import Tree
17
+
18
+
19
+ class SemcorCorpusReader(XMLCorpusReader):
20
+ """
21
+ Corpus reader for the SemCor Corpus.
22
+ For access to the complete XML data structure, use the ``xml()``
23
+ method. For access to simple word lists and tagged word lists, use
24
+ ``words()``, ``sents()``, ``tagged_words()``, and ``tagged_sents()``.
25
+ """
26
+
27
+ def __init__(self, root, fileids, wordnet, lazy=True):
28
+ XMLCorpusReader.__init__(self, root, fileids)
29
+ self._lazy = lazy
30
+ self._wordnet = wordnet
31
+
32
+ def words(self, fileids=None):
33
+ """
34
+ :return: the given file(s) as a list of words and punctuation symbols.
35
+ :rtype: list(str)
36
+ """
37
+ return self._items(fileids, "word", False, False, False)
38
+
39
+ def chunks(self, fileids=None):
40
+ """
41
+ :return: the given file(s) as a list of chunks,
42
+ each of which is a list of words and punctuation symbols
43
+ that form a unit.
44
+ :rtype: list(list(str))
45
+ """
46
+ return self._items(fileids, "chunk", False, False, False)
47
+
48
+ def tagged_chunks(self, fileids=None, tag=("pos" or "sem" or "both")):
49
+ """
50
+ :return: the given file(s) as a list of tagged chunks, represented
51
+ in tree form.
52
+ :rtype: list(Tree)
53
+
54
+ :param tag: `'pos'` (part of speech), `'sem'` (semantic), or `'both'`
55
+ to indicate the kind of tags to include. Semantic tags consist of
56
+ WordNet lemma IDs, plus an `'NE'` node if the chunk is a named entity
57
+ without a specific entry in WordNet. (Named entities of type 'other'
58
+ have no lemma. Other chunks not in WordNet have no semantic tag.
59
+ Punctuation tokens have `None` for their part of speech tag.)
60
+ """
61
+ return self._items(fileids, "chunk", False, tag != "sem", tag != "pos")
62
+
63
+ def sents(self, fileids=None):
64
+ """
65
+ :return: the given file(s) as a list of sentences, each encoded
66
+ as a list of word strings.
67
+ :rtype: list(list(str))
68
+ """
69
+ return self._items(fileids, "word", True, False, False)
70
+
71
+ def chunk_sents(self, fileids=None):
72
+ """
73
+ :return: the given file(s) as a list of sentences, each encoded
74
+ as a list of chunks.
75
+ :rtype: list(list(list(str)))
76
+ """
77
+ return self._items(fileids, "chunk", True, False, False)
78
+
79
+ def tagged_sents(self, fileids=None, tag=("pos" or "sem" or "both")):
80
+ """
81
+ :return: the given file(s) as a list of sentences. Each sentence
82
+ is represented as a list of tagged chunks (in tree form).
83
+ :rtype: list(list(Tree))
84
+
85
+ :param tag: `'pos'` (part of speech), `'sem'` (semantic), or `'both'`
86
+ to indicate the kind of tags to include. Semantic tags consist of
87
+ WordNet lemma IDs, plus an `'NE'` node if the chunk is a named entity
88
+ without a specific entry in WordNet. (Named entities of type 'other'
89
+ have no lemma. Other chunks not in WordNet have no semantic tag.
90
+ Punctuation tokens have `None` for their part of speech tag.)
91
+ """
92
+ return self._items(fileids, "chunk", True, tag != "sem", tag != "pos")
93
+
94
+ def _items(self, fileids, unit, bracket_sent, pos_tag, sem_tag):
95
+ if unit == "word" and not bracket_sent:
96
+ # the result of the SemcorWordView may be a multiword unit, so the
97
+ # LazyConcatenation will make sure the sentence is flattened
98
+ _ = lambda *args: LazyConcatenation(
99
+ (SemcorWordView if self._lazy else self._words)(*args)
100
+ )
101
+ else:
102
+ _ = SemcorWordView if self._lazy else self._words
103
+ return concat(
104
+ [
105
+ _(fileid, unit, bracket_sent, pos_tag, sem_tag, self._wordnet)
106
+ for fileid in self.abspaths(fileids)
107
+ ]
108
+ )
109
+
110
+ def _words(self, fileid, unit, bracket_sent, pos_tag, sem_tag):
111
+ """
112
+ Helper used to implement the view methods -- returns a list of
113
+ tokens, (segmented) words, chunks, or sentences. The tokens
114
+ and chunks may optionally be tagged (with POS and sense
115
+ information).
116
+
117
+ :param fileid: The name of the underlying file.
118
+ :param unit: One of `'token'`, `'word'`, or `'chunk'`.
119
+ :param bracket_sent: If true, include sentence bracketing.
120
+ :param pos_tag: Whether to include part-of-speech tags.
121
+ :param sem_tag: Whether to include semantic tags, namely WordNet lemma
122
+ and OOV named entity status.
123
+ """
124
+ assert unit in ("token", "word", "chunk")
125
+ result = []
126
+
127
+ xmldoc = ElementTree.parse(fileid).getroot()
128
+ for xmlsent in xmldoc.findall(".//s"):
129
+ sent = []
130
+ for xmlword in _all_xmlwords_in(xmlsent):
131
+ itm = SemcorCorpusReader._word(
132
+ xmlword, unit, pos_tag, sem_tag, self._wordnet
133
+ )
134
+ if unit == "word":
135
+ sent.extend(itm)
136
+ else:
137
+ sent.append(itm)
138
+
139
+ if bracket_sent:
140
+ result.append(SemcorSentence(xmlsent.attrib["snum"], sent))
141
+ else:
142
+ result.extend(sent)
143
+
144
+ assert None not in result
145
+ return result
146
+
147
+ @staticmethod
148
+ def _word(xmlword, unit, pos_tag, sem_tag, wordnet):
149
+ tkn = xmlword.text
150
+ if not tkn:
151
+ tkn = "" # fixes issue 337?
152
+
153
+ lemma = xmlword.get("lemma", tkn) # lemma or NE class
154
+ lexsn = xmlword.get("lexsn") # lex_sense (locator for the lemma's sense)
155
+ if lexsn is not None:
156
+ sense_key = lemma + "%" + lexsn
157
+ wnpos = ("n", "v", "a", "r", "s")[
158
+ int(lexsn.split(":")[0]) - 1
159
+ ] # see http://wordnet.princeton.edu/man/senseidx.5WN.html
160
+ else:
161
+ sense_key = wnpos = None
162
+ redef = xmlword.get(
163
+ "rdf", tkn
164
+ ) # redefinition--this indicates the lookup string
165
+ # does not exactly match the enclosed string, e.g. due to typographical adjustments
166
+ # or discontinuity of a multiword expression. If a redefinition has occurred,
167
+ # the "rdf" attribute holds its inflected form and "lemma" holds its lemma.
168
+ # For NEs, "rdf", "lemma", and "pn" all hold the same value (the NE class).
169
+ sensenum = xmlword.get("wnsn") # WordNet sense number
170
+ isOOVEntity = "pn" in xmlword.keys() # a "personal name" (NE) not in WordNet
171
+ pos = xmlword.get(
172
+ "pos"
173
+ ) # part of speech for the whole chunk (None for punctuation)
174
+
175
+ if unit == "token":
176
+ if not pos_tag and not sem_tag:
177
+ itm = tkn
178
+ else:
179
+ itm = (
180
+ (tkn,)
181
+ + ((pos,) if pos_tag else ())
182
+ + ((lemma, wnpos, sensenum, isOOVEntity) if sem_tag else ())
183
+ )
184
+ return itm
185
+ else:
186
+ ww = tkn.split("_") # TODO: case where punctuation intervenes in MWE
187
+ if unit == "word":
188
+ return ww
189
+ else:
190
+ if sensenum is not None:
191
+ try:
192
+ sense = wordnet.lemma_from_key(sense_key) # Lemma object
193
+ except Exception:
194
+ # cannot retrieve the wordnet.Lemma object. possible reasons:
195
+ # (a) the wordnet corpus is not downloaded;
196
+ # (b) a nonexistent sense is annotated: e.g., such.s.00 triggers:
197
+ # nltk.corpus.reader.wordnet.WordNetError: No synset found for key u'such%5:00:01:specified:00'
198
+ # solution: just use the lemma name as a string
199
+ try:
200
+ sense = "%s.%s.%02d" % (
201
+ lemma,
202
+ wnpos,
203
+ int(sensenum),
204
+ ) # e.g.: reach.v.02
205
+ except ValueError:
206
+ sense = (
207
+ lemma + "." + wnpos + "." + sensenum
208
+ ) # e.g. the sense number may be "2;1"
209
+
210
+ bottom = [Tree(pos, ww)] if pos_tag else ww
211
+
212
+ if sem_tag and isOOVEntity:
213
+ if sensenum is not None:
214
+ return Tree(sense, [Tree("NE", bottom)])
215
+ else: # 'other' NE
216
+ return Tree("NE", bottom)
217
+ elif sem_tag and sensenum is not None:
218
+ return Tree(sense, bottom)
219
+ elif pos_tag:
220
+ return bottom[0]
221
+ else:
222
+ return bottom # chunk as a list
223
+
224
+
225
+ def _all_xmlwords_in(elt, result=None):
226
+ if result is None:
227
+ result = []
228
+ for child in elt:
229
+ if child.tag in ("wf", "punc"):
230
+ result.append(child)
231
+ else:
232
+ _all_xmlwords_in(child, result)
233
+ return result
234
+
235
+
236
+ class SemcorSentence(list):
237
+ """
238
+ A list of words, augmented by an attribute ``num`` used to record
239
+ the sentence identifier (the ``n`` attribute from the XML).
240
+ """
241
+
242
+ def __init__(self, num, items):
243
+ self.num = num
244
+ list.__init__(self, items)
245
+
246
+
247
+ class SemcorWordView(XMLCorpusView):
248
+ """
249
+ A stream backed corpus view specialized for use with the BNC corpus.
250
+ """
251
+
252
+ def __init__(self, fileid, unit, bracket_sent, pos_tag, sem_tag, wordnet):
253
+ """
254
+ :param fileid: The name of the underlying file.
255
+ :param unit: One of `'token'`, `'word'`, or `'chunk'`.
256
+ :param bracket_sent: If true, include sentence bracketing.
257
+ :param pos_tag: Whether to include part-of-speech tags.
258
+ :param sem_tag: Whether to include semantic tags, namely WordNet lemma
259
+ and OOV named entity status.
260
+ """
261
+ if bracket_sent:
262
+ tagspec = ".*/s"
263
+ else:
264
+ tagspec = ".*/s/(punc|wf)"
265
+
266
+ self._unit = unit
267
+ self._sent = bracket_sent
268
+ self._pos_tag = pos_tag
269
+ self._sem_tag = sem_tag
270
+ self._wordnet = wordnet
271
+
272
+ XMLCorpusView.__init__(self, fileid, tagspec)
273
+
274
+ def handle_elt(self, elt, context):
275
+ if self._sent:
276
+ return self.handle_sent(elt)
277
+ else:
278
+ return self.handle_word(elt)
279
+
280
+ def handle_word(self, elt):
281
+ return SemcorCorpusReader._word(
282
+ elt, self._unit, self._pos_tag, self._sem_tag, self._wordnet
283
+ )
284
+
285
+ def handle_sent(self, elt):
286
+ sent = []
287
+ for child in elt:
288
+ if child.tag in ("wf", "punc"):
289
+ itm = self.handle_word(child)
290
+ if self._unit == "word":
291
+ sent.extend(itm)
292
+ else:
293
+ sent.append(itm)
294
+ else:
295
+ raise ValueError("Unexpected element %s" % child.tag)
296
+ return SemcorSentence(elt.attrib["snum"], sent)
venv/lib/python3.10/site-packages/nltk/corpus/reader/sentiwordnet.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: SentiWordNet
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Christopher Potts <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ An NLTK interface for SentiWordNet
10
+
11
+ SentiWordNet is a lexical resource for opinion mining.
12
+ SentiWordNet assigns to each synset of WordNet three
13
+ sentiment scores: positivity, negativity, and objectivity.
14
+
15
+ For details about SentiWordNet see:
16
+ http://sentiwordnet.isti.cnr.it/
17
+
18
+ >>> from nltk.corpus import sentiwordnet as swn
19
+ >>> print(swn.senti_synset('breakdown.n.03'))
20
+ <breakdown.n.03: PosScore=0.0 NegScore=0.25>
21
+ >>> list(swn.senti_synsets('slow'))
22
+ [SentiSynset('decelerate.v.01'), SentiSynset('slow.v.02'),\
23
+ SentiSynset('slow.v.03'), SentiSynset('slow.a.01'),\
24
+ SentiSynset('slow.a.02'), SentiSynset('dense.s.04'),\
25
+ SentiSynset('slow.a.04'), SentiSynset('boring.s.01'),\
26
+ SentiSynset('dull.s.08'), SentiSynset('slowly.r.01'),\
27
+ SentiSynset('behind.r.03')]
28
+ >>> happy = swn.senti_synsets('happy', 'a')
29
+ >>> happy0 = list(happy)[0]
30
+ >>> happy0.pos_score()
31
+ 0.875
32
+ >>> happy0.neg_score()
33
+ 0.0
34
+ >>> happy0.obj_score()
35
+ 0.125
36
+ """
37
+
38
+ import re
39
+
40
+ from nltk.corpus.reader import CorpusReader
41
+
42
+
43
+ class SentiWordNetCorpusReader(CorpusReader):
44
+ def __init__(self, root, fileids, encoding="utf-8"):
45
+ """
46
+ Construct a new SentiWordNet Corpus Reader, using data from
47
+ the specified file.
48
+ """
49
+ super().__init__(root, fileids, encoding=encoding)
50
+ if len(self._fileids) != 1:
51
+ raise ValueError("Exactly one file must be specified")
52
+ self._db = {}
53
+ self._parse_src_file()
54
+
55
+ def _parse_src_file(self):
56
+ lines = self.open(self._fileids[0]).read().splitlines()
57
+ lines = filter((lambda x: not re.search(r"^\s*#", x)), lines)
58
+ for i, line in enumerate(lines):
59
+ fields = [field.strip() for field in re.split(r"\t+", line)]
60
+ try:
61
+ pos, offset, pos_score, neg_score, synset_terms, gloss = fields
62
+ except BaseException as e:
63
+ raise ValueError(f"Line {i} formatted incorrectly: {line}\n") from e
64
+ if pos and offset:
65
+ offset = int(offset)
66
+ self._db[(pos, offset)] = (float(pos_score), float(neg_score))
67
+
68
+ def senti_synset(self, *vals):
69
+ from nltk.corpus import wordnet as wn
70
+
71
+ if tuple(vals) in self._db:
72
+ pos_score, neg_score = self._db[tuple(vals)]
73
+ pos, offset = vals
74
+ if pos == "s":
75
+ pos = "a"
76
+ synset = wn.synset_from_pos_and_offset(pos, offset)
77
+ return SentiSynset(pos_score, neg_score, synset)
78
+ else:
79
+ synset = wn.synset(vals[0])
80
+ pos = synset.pos()
81
+ if pos == "s":
82
+ pos = "a"
83
+ offset = synset.offset()
84
+ if (pos, offset) in self._db:
85
+ pos_score, neg_score = self._db[(pos, offset)]
86
+ return SentiSynset(pos_score, neg_score, synset)
87
+ else:
88
+ return None
89
+
90
+ def senti_synsets(self, string, pos=None):
91
+ from nltk.corpus import wordnet as wn
92
+
93
+ sentis = []
94
+ synset_list = wn.synsets(string, pos)
95
+ for synset in synset_list:
96
+ sentis.append(self.senti_synset(synset.name()))
97
+ sentis = filter(lambda x: x, sentis)
98
+ return sentis
99
+
100
+ def all_senti_synsets(self):
101
+ from nltk.corpus import wordnet as wn
102
+
103
+ for key, fields in self._db.items():
104
+ pos, offset = key
105
+ pos_score, neg_score = fields
106
+ synset = wn.synset_from_pos_and_offset(pos, offset)
107
+ yield SentiSynset(pos_score, neg_score, synset)
108
+
109
+
110
+ class SentiSynset:
111
+ def __init__(self, pos_score, neg_score, synset):
112
+ self._pos_score = pos_score
113
+ self._neg_score = neg_score
114
+ self._obj_score = 1.0 - (self._pos_score + self._neg_score)
115
+ self.synset = synset
116
+
117
+ def pos_score(self):
118
+ return self._pos_score
119
+
120
+ def neg_score(self):
121
+ return self._neg_score
122
+
123
+ def obj_score(self):
124
+ return self._obj_score
125
+
126
+ def __str__(self):
127
+ """Prints just the Pos/Neg scores for now."""
128
+ s = "<"
129
+ s += self.synset.name() + ": "
130
+ s += "PosScore=%s " % self._pos_score
131
+ s += "NegScore=%s" % self._neg_score
132
+ s += ">"
133
+ return s
134
+
135
+ def __repr__(self):
136
+ return "Senti" + repr(self.synset)
venv/lib/python3.10/site-packages/nltk/corpus/reader/sinica_treebank.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Sinica Treebank Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ Sinica Treebank Corpus Sample
10
+
11
+ http://rocling.iis.sinica.edu.tw/CKIP/engversion/treebank.htm
12
+
13
+ 10,000 parsed sentences, drawn from the Academia Sinica Balanced
14
+ Corpus of Modern Chinese. Parse tree notation is based on
15
+ Information-based Case Grammar. Tagset documentation is available
16
+ at https://www.sinica.edu.tw/SinicaCorpus/modern_e_wordtype.html
17
+
18
+ Language and Knowledge Processing Group, Institute of Information
19
+ Science, Academia Sinica
20
+
21
+ The data is distributed with the Natural Language Toolkit under the terms of
22
+ the Creative Commons Attribution-NonCommercial-ShareAlike License
23
+ [https://creativecommons.org/licenses/by-nc-sa/2.5/].
24
+
25
+ References:
26
+
27
+ Feng-Yi Chen, Pi-Fang Tsai, Keh-Jiann Chen, and Chu-Ren Huang (1999)
28
+ The Construction of Sinica Treebank. Computational Linguistics and
29
+ Chinese Language Processing, 4, pp 87-104.
30
+
31
+ Huang Chu-Ren, Keh-Jiann Chen, Feng-Yi Chen, Keh-Jiann Chen, Zhao-Ming
32
+ Gao, and Kuang-Yu Chen. 2000. Sinica Treebank: Design Criteria,
33
+ Annotation Guidelines, and On-line Interface. Proceedings of 2nd
34
+ Chinese Language Processing Workshop, Association for Computational
35
+ Linguistics.
36
+
37
+ Chen Keh-Jiann and Yu-Ming Hsieh (2004) Chinese Treebanks and Grammar
38
+ Extraction, Proceedings of IJCNLP-04, pp560-565.
39
+ """
40
+
41
+ from nltk.corpus.reader.api import *
42
+ from nltk.corpus.reader.util import *
43
+ from nltk.tag import map_tag
44
+ from nltk.tree import sinica_parse
45
+
46
+ IDENTIFIER = re.compile(r"^#\S+\s")
47
+ APPENDIX = re.compile(r"(?<=\))#.*$")
48
+ TAGWORD = re.compile(r":([^:()|]+):([^:()|]+)")
49
+ WORD = re.compile(r":[^:()|]+:([^:()|]+)")
50
+
51
+
52
+ class SinicaTreebankCorpusReader(SyntaxCorpusReader):
53
+ """
54
+ Reader for the sinica treebank.
55
+ """
56
+
57
+ def _read_block(self, stream):
58
+ sent = stream.readline()
59
+ sent = IDENTIFIER.sub("", sent)
60
+ sent = APPENDIX.sub("", sent)
61
+ return [sent]
62
+
63
+ def _parse(self, sent):
64
+ return sinica_parse(sent)
65
+
66
+ def _tag(self, sent, tagset=None):
67
+ tagged_sent = [(w, t) for (t, w) in TAGWORD.findall(sent)]
68
+ if tagset and tagset != self._tagset:
69
+ tagged_sent = [
70
+ (w, map_tag(self._tagset, tagset, t)) for (w, t) in tagged_sent
71
+ ]
72
+ return tagged_sent
73
+
74
+ def _word(self, sent):
75
+ return WORD.findall(sent)
venv/lib/python3.10/site-packages/nltk/corpus/reader/switchboard.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Switchboard Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+ import re
8
+
9
+ from nltk.corpus.reader.api import *
10
+ from nltk.corpus.reader.util import *
11
+ from nltk.tag import map_tag, str2tuple
12
+
13
+
14
+ class SwitchboardTurn(list):
15
+ """
16
+ A specialized list object used to encode switchboard utterances.
17
+ The elements of the list are the words in the utterance; and two
18
+ attributes, ``speaker`` and ``id``, are provided to retrieve the
19
+ spearker identifier and utterance id. Note that utterance ids
20
+ are only unique within a given discourse.
21
+ """
22
+
23
+ def __init__(self, words, speaker, id):
24
+ list.__init__(self, words)
25
+ self.speaker = speaker
26
+ self.id = int(id)
27
+
28
+ def __repr__(self):
29
+ if len(self) == 0:
30
+ text = ""
31
+ elif isinstance(self[0], tuple):
32
+ text = " ".join("%s/%s" % w for w in self)
33
+ else:
34
+ text = " ".join(self)
35
+ return f"<{self.speaker}.{self.id}: {text!r}>"
36
+
37
+
38
+ class SwitchboardCorpusReader(CorpusReader):
39
+ _FILES = ["tagged"]
40
+ # Use the "tagged" file even for non-tagged data methods, since
41
+ # it's tokenized.
42
+
43
+ def __init__(self, root, tagset=None):
44
+ CorpusReader.__init__(self, root, self._FILES)
45
+ self._tagset = tagset
46
+
47
+ def words(self):
48
+ return StreamBackedCorpusView(self.abspath("tagged"), self._words_block_reader)
49
+
50
+ def tagged_words(self, tagset=None):
51
+ def tagged_words_block_reader(stream):
52
+ return self._tagged_words_block_reader(stream, tagset)
53
+
54
+ return StreamBackedCorpusView(self.abspath("tagged"), tagged_words_block_reader)
55
+
56
+ def turns(self):
57
+ return StreamBackedCorpusView(self.abspath("tagged"), self._turns_block_reader)
58
+
59
+ def tagged_turns(self, tagset=None):
60
+ def tagged_turns_block_reader(stream):
61
+ return self._tagged_turns_block_reader(stream, tagset)
62
+
63
+ return StreamBackedCorpusView(self.abspath("tagged"), tagged_turns_block_reader)
64
+
65
+ def discourses(self):
66
+ return StreamBackedCorpusView(
67
+ self.abspath("tagged"), self._discourses_block_reader
68
+ )
69
+
70
+ def tagged_discourses(self, tagset=False):
71
+ def tagged_discourses_block_reader(stream):
72
+ return self._tagged_discourses_block_reader(stream, tagset)
73
+
74
+ return StreamBackedCorpusView(
75
+ self.abspath("tagged"), tagged_discourses_block_reader
76
+ )
77
+
78
+ def _discourses_block_reader(self, stream):
79
+ # returns at most 1 discourse. (The other methods depend on this.)
80
+ return [
81
+ [
82
+ self._parse_utterance(u, include_tag=False)
83
+ for b in read_blankline_block(stream)
84
+ for u in b.split("\n")
85
+ if u.strip()
86
+ ]
87
+ ]
88
+
89
+ def _tagged_discourses_block_reader(self, stream, tagset=None):
90
+ # returns at most 1 discourse. (The other methods depend on this.)
91
+ return [
92
+ [
93
+ self._parse_utterance(u, include_tag=True, tagset=tagset)
94
+ for b in read_blankline_block(stream)
95
+ for u in b.split("\n")
96
+ if u.strip()
97
+ ]
98
+ ]
99
+
100
+ def _turns_block_reader(self, stream):
101
+ return self._discourses_block_reader(stream)[0]
102
+
103
+ def _tagged_turns_block_reader(self, stream, tagset=None):
104
+ return self._tagged_discourses_block_reader(stream, tagset)[0]
105
+
106
+ def _words_block_reader(self, stream):
107
+ return sum(self._discourses_block_reader(stream)[0], [])
108
+
109
+ def _tagged_words_block_reader(self, stream, tagset=None):
110
+ return sum(self._tagged_discourses_block_reader(stream, tagset)[0], [])
111
+
112
+ _UTTERANCE_RE = re.compile(r"(\w+)\.(\d+)\:\s*(.*)")
113
+ _SEP = "/"
114
+
115
+ def _parse_utterance(self, utterance, include_tag, tagset=None):
116
+ m = self._UTTERANCE_RE.match(utterance)
117
+ if m is None:
118
+ raise ValueError("Bad utterance %r" % utterance)
119
+ speaker, id, text = m.groups()
120
+ words = [str2tuple(s, self._SEP) for s in text.split()]
121
+ if not include_tag:
122
+ words = [w for (w, t) in words]
123
+ elif tagset and tagset != self._tagset:
124
+ words = [(w, map_tag(self._tagset, tagset, t)) for (w, t) in words]
125
+ return SwitchboardTurn(words, speaker, id)
venv/lib/python3.10/site-packages/nltk/corpus/reader/timit.py ADDED
@@ -0,0 +1,510 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: TIMIT Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2007 NLTK Project
4
+ # Author: Haejoong Lee <[email protected]>
5
+ # Steven Bird <[email protected]>
6
+ # Jacob Perkins <[email protected]>
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+ # [xx] this docstring is out-of-date:
11
+ """
12
+ Read tokens, phonemes and audio data from the NLTK TIMIT Corpus.
13
+
14
+ This corpus contains selected portion of the TIMIT corpus.
15
+
16
+ - 16 speakers from 8 dialect regions
17
+ - 1 male and 1 female from each dialect region
18
+ - total 130 sentences (10 sentences per speaker. Note that some
19
+ sentences are shared among other speakers, especially sa1 and sa2
20
+ are spoken by all speakers.)
21
+ - total 160 recording of sentences (10 recordings per speaker)
22
+ - audio format: NIST Sphere, single channel, 16kHz sampling,
23
+ 16 bit sample, PCM encoding
24
+
25
+
26
+ Module contents
27
+ ===============
28
+
29
+ The timit corpus reader provides 4 functions and 4 data items.
30
+
31
+ - utterances
32
+
33
+ List of utterances in the corpus. There are total 160 utterances,
34
+ each of which corresponds to a unique utterance of a speaker.
35
+ Here's an example of an utterance identifier in the list::
36
+
37
+ dr1-fvmh0/sx206
38
+ - _---- _---
39
+ | | | | |
40
+ | | | | |
41
+ | | | | `--- sentence number
42
+ | | | `----- sentence type (a:all, i:shared, x:exclusive)
43
+ | | `--------- speaker ID
44
+ | `------------ sex (m:male, f:female)
45
+ `-------------- dialect region (1..8)
46
+
47
+ - speakers
48
+
49
+ List of speaker IDs. An example of speaker ID::
50
+
51
+ dr1-fvmh0
52
+
53
+ Note that if you split an item ID with colon and take the first element of
54
+ the result, you will get a speaker ID.
55
+
56
+ >>> itemid = 'dr1-fvmh0/sx206'
57
+ >>> spkrid , sentid = itemid.split('/')
58
+ >>> spkrid
59
+ 'dr1-fvmh0'
60
+
61
+ The second element of the result is a sentence ID.
62
+
63
+ - dictionary()
64
+
65
+ Phonetic dictionary of words contained in this corpus. This is a Python
66
+ dictionary from words to phoneme lists.
67
+
68
+ - spkrinfo()
69
+
70
+ Speaker information table. It's a Python dictionary from speaker IDs to
71
+ records of 10 fields. Speaker IDs the same as the ones in timie.speakers.
72
+ Each record is a dictionary from field names to values, and the fields are
73
+ as follows::
74
+
75
+ id speaker ID as defined in the original TIMIT speaker info table
76
+ sex speaker gender (M:male, F:female)
77
+ dr speaker dialect region (1:new england, 2:northern,
78
+ 3:north midland, 4:south midland, 5:southern, 6:new york city,
79
+ 7:western, 8:army brat (moved around))
80
+ use corpus type (TRN:training, TST:test)
81
+ in this sample corpus only TRN is available
82
+ recdate recording date
83
+ birthdate speaker birth date
84
+ ht speaker height
85
+ race speaker race (WHT:white, BLK:black, AMR:american indian,
86
+ SPN:spanish-american, ORN:oriental,???:unknown)
87
+ edu speaker education level (HS:high school, AS:associate degree,
88
+ BS:bachelor's degree (BS or BA), MS:master's degree (MS or MA),
89
+ PHD:doctorate degree (PhD,JD,MD), ??:unknown)
90
+ comments comments by the recorder
91
+
92
+ The 4 functions are as follows.
93
+
94
+ - tokenized(sentences=items, offset=False)
95
+
96
+ Given a list of items, returns an iterator of a list of word lists,
97
+ each of which corresponds to an item (sentence). If offset is set to True,
98
+ each element of the word list is a tuple of word(string), start offset and
99
+ end offset, where offset is represented as a number of 16kHz samples.
100
+
101
+ - phonetic(sentences=items, offset=False)
102
+
103
+ Given a list of items, returns an iterator of a list of phoneme lists,
104
+ each of which corresponds to an item (sentence). If offset is set to True,
105
+ each element of the phoneme list is a tuple of word(string), start offset
106
+ and end offset, where offset is represented as a number of 16kHz samples.
107
+
108
+ - audiodata(item, start=0, end=None)
109
+
110
+ Given an item, returns a chunk of audio samples formatted into a string.
111
+ When the function is called, if start and end are omitted, the entire
112
+ samples of the recording will be returned. If only end is omitted,
113
+ samples from the start offset to the end of the recording will be returned.
114
+
115
+ - play(data)
116
+
117
+ Play the given audio samples. The audio samples can be obtained from the
118
+ timit.audiodata function.
119
+
120
+ """
121
+ import sys
122
+ import time
123
+
124
+ from nltk.corpus.reader.api import *
125
+ from nltk.internals import import_from_stdlib
126
+ from nltk.tree import Tree
127
+
128
+
129
+ class TimitCorpusReader(CorpusReader):
130
+ """
131
+ Reader for the TIMIT corpus (or any other corpus with the same
132
+ file layout and use of file formats). The corpus root directory
133
+ should contain the following files:
134
+
135
+ - timitdic.txt: dictionary of standard transcriptions
136
+ - spkrinfo.txt: table of speaker information
137
+
138
+ In addition, the root directory should contain one subdirectory
139
+ for each speaker, containing three files for each utterance:
140
+
141
+ - <utterance-id>.txt: text content of utterances
142
+ - <utterance-id>.wrd: tokenized text content of utterances
143
+ - <utterance-id>.phn: phonetic transcription of utterances
144
+ - <utterance-id>.wav: utterance sound file
145
+ """
146
+
147
+ _FILE_RE = r"(\w+-\w+/\w+\.(phn|txt|wav|wrd))|" + r"timitdic\.txt|spkrinfo\.txt"
148
+ """A regexp matching fileids that are used by this corpus reader."""
149
+ _UTTERANCE_RE = r"\w+-\w+/\w+\.txt"
150
+
151
+ def __init__(self, root, encoding="utf8"):
152
+ """
153
+ Construct a new TIMIT corpus reader in the given directory.
154
+ :param root: The root directory for this corpus.
155
+ """
156
+ # Ensure that wave files don't get treated as unicode data:
157
+ if isinstance(encoding, str):
158
+ encoding = [(r".*\.wav", None), (".*", encoding)]
159
+
160
+ CorpusReader.__init__(
161
+ self, root, find_corpus_fileids(root, self._FILE_RE), encoding=encoding
162
+ )
163
+
164
+ self._utterances = [
165
+ name[:-4] for name in find_corpus_fileids(root, self._UTTERANCE_RE)
166
+ ]
167
+ """A list of the utterance identifiers for all utterances in
168
+ this corpus."""
169
+
170
+ self._speakerinfo = None
171
+ self._root = root
172
+ self.speakers = sorted({u.split("/")[0] for u in self._utterances})
173
+
174
+ def fileids(self, filetype=None):
175
+ """
176
+ Return a list of file identifiers for the files that make up
177
+ this corpus.
178
+
179
+ :param filetype: If specified, then ``filetype`` indicates that
180
+ only the files that have the given type should be
181
+ returned. Accepted values are: ``txt``, ``wrd``, ``phn``,
182
+ ``wav``, or ``metadata``,
183
+ """
184
+ if filetype is None:
185
+ return CorpusReader.fileids(self)
186
+ elif filetype in ("txt", "wrd", "phn", "wav"):
187
+ return [f"{u}.{filetype}" for u in self._utterances]
188
+ elif filetype == "metadata":
189
+ return ["timitdic.txt", "spkrinfo.txt"]
190
+ else:
191
+ raise ValueError("Bad value for filetype: %r" % filetype)
192
+
193
+ def utteranceids(
194
+ self, dialect=None, sex=None, spkrid=None, sent_type=None, sentid=None
195
+ ):
196
+ """
197
+ :return: A list of the utterance identifiers for all
198
+ utterances in this corpus, or for the given speaker, dialect
199
+ region, gender, sentence type, or sentence number, if
200
+ specified.
201
+ """
202
+ if isinstance(dialect, str):
203
+ dialect = [dialect]
204
+ if isinstance(sex, str):
205
+ sex = [sex]
206
+ if isinstance(spkrid, str):
207
+ spkrid = [spkrid]
208
+ if isinstance(sent_type, str):
209
+ sent_type = [sent_type]
210
+ if isinstance(sentid, str):
211
+ sentid = [sentid]
212
+
213
+ utterances = self._utterances[:]
214
+ if dialect is not None:
215
+ utterances = [u for u in utterances if u[2] in dialect]
216
+ if sex is not None:
217
+ utterances = [u for u in utterances if u[4] in sex]
218
+ if spkrid is not None:
219
+ utterances = [u for u in utterances if u[:9] in spkrid]
220
+ if sent_type is not None:
221
+ utterances = [u for u in utterances if u[11] in sent_type]
222
+ if sentid is not None:
223
+ utterances = [u for u in utterances if u[10:] in spkrid]
224
+ return utterances
225
+
226
+ def transcription_dict(self):
227
+ """
228
+ :return: A dictionary giving the 'standard' transcription for
229
+ each word.
230
+ """
231
+ _transcriptions = {}
232
+ with self.open("timitdic.txt") as fp:
233
+ for line in fp:
234
+ if not line.strip() or line[0] == ";":
235
+ continue
236
+ m = re.match(r"\s*(\S+)\s+/(.*)/\s*$", line)
237
+ if not m:
238
+ raise ValueError("Bad line: %r" % line)
239
+ _transcriptions[m.group(1)] = m.group(2).split()
240
+ return _transcriptions
241
+
242
+ def spkrid(self, utterance):
243
+ return utterance.split("/")[0]
244
+
245
+ def sentid(self, utterance):
246
+ return utterance.split("/")[1]
247
+
248
+ def utterance(self, spkrid, sentid):
249
+ return f"{spkrid}/{sentid}"
250
+
251
+ def spkrutteranceids(self, speaker):
252
+ """
253
+ :return: A list of all utterances associated with a given
254
+ speaker.
255
+ """
256
+ return [
257
+ utterance
258
+ for utterance in self._utterances
259
+ if utterance.startswith(speaker + "/")
260
+ ]
261
+
262
+ def spkrinfo(self, speaker):
263
+ """
264
+ :return: A dictionary mapping .. something.
265
+ """
266
+ if speaker in self._utterances:
267
+ speaker = self.spkrid(speaker)
268
+
269
+ if self._speakerinfo is None:
270
+ self._speakerinfo = {}
271
+ with self.open("spkrinfo.txt") as fp:
272
+ for line in fp:
273
+ if not line.strip() or line[0] == ";":
274
+ continue
275
+ rec = line.strip().split(None, 9)
276
+ key = f"dr{rec[2]}-{rec[1].lower()}{rec[0].lower()}"
277
+ self._speakerinfo[key] = SpeakerInfo(*rec)
278
+
279
+ return self._speakerinfo[speaker]
280
+
281
+ def phones(self, utterances=None):
282
+ results = []
283
+ for fileid in self._utterance_fileids(utterances, ".phn"):
284
+ with self.open(fileid) as fp:
285
+ for line in fp:
286
+ if line.strip():
287
+ results.append(line.split()[-1])
288
+ return results
289
+
290
+ def phone_times(self, utterances=None):
291
+ """
292
+ offset is represented as a number of 16kHz samples!
293
+ """
294
+ results = []
295
+ for fileid in self._utterance_fileids(utterances, ".phn"):
296
+ with self.open(fileid) as fp:
297
+ for line in fp:
298
+ if line.strip():
299
+ results.append(
300
+ (
301
+ line.split()[2],
302
+ int(line.split()[0]),
303
+ int(line.split()[1]),
304
+ )
305
+ )
306
+ return results
307
+
308
+ def words(self, utterances=None):
309
+ results = []
310
+ for fileid in self._utterance_fileids(utterances, ".wrd"):
311
+ with self.open(fileid) as fp:
312
+ for line in fp:
313
+ if line.strip():
314
+ results.append(line.split()[-1])
315
+ return results
316
+
317
+ def word_times(self, utterances=None):
318
+ results = []
319
+ for fileid in self._utterance_fileids(utterances, ".wrd"):
320
+ with self.open(fileid) as fp:
321
+ for line in fp:
322
+ if line.strip():
323
+ results.append(
324
+ (
325
+ line.split()[2],
326
+ int(line.split()[0]),
327
+ int(line.split()[1]),
328
+ )
329
+ )
330
+ return results
331
+
332
+ def sents(self, utterances=None):
333
+ results = []
334
+ for fileid in self._utterance_fileids(utterances, ".wrd"):
335
+ with self.open(fileid) as fp:
336
+ results.append([line.split()[-1] for line in fp if line.strip()])
337
+ return results
338
+
339
+ def sent_times(self, utterances=None):
340
+ # TODO: Check this
341
+ return [
342
+ (
343
+ line.split(None, 2)[-1].strip(),
344
+ int(line.split()[0]),
345
+ int(line.split()[1]),
346
+ )
347
+ for fileid in self._utterance_fileids(utterances, ".txt")
348
+ for line in self.open(fileid)
349
+ if line.strip()
350
+ ]
351
+
352
+ def phone_trees(self, utterances=None):
353
+ if utterances is None:
354
+ utterances = self._utterances
355
+ if isinstance(utterances, str):
356
+ utterances = [utterances]
357
+
358
+ trees = []
359
+ for utterance in utterances:
360
+ word_times = self.word_times(utterance)
361
+ phone_times = self.phone_times(utterance)
362
+ sent_times = self.sent_times(utterance)
363
+
364
+ while sent_times:
365
+ (sent, sent_start, sent_end) = sent_times.pop(0)
366
+ trees.append(Tree("S", []))
367
+ while (
368
+ word_times and phone_times and phone_times[0][2] <= word_times[0][1]
369
+ ):
370
+ trees[-1].append(phone_times.pop(0)[0])
371
+ while word_times and word_times[0][2] <= sent_end:
372
+ (word, word_start, word_end) = word_times.pop(0)
373
+ trees[-1].append(Tree(word, []))
374
+ while phone_times and phone_times[0][2] <= word_end:
375
+ trees[-1][-1].append(phone_times.pop(0)[0])
376
+ while phone_times and phone_times[0][2] <= sent_end:
377
+ trees[-1].append(phone_times.pop(0)[0])
378
+ return trees
379
+
380
+ # [xx] NOTE: This is currently broken -- we're assuming that the
381
+ # fileids are WAV fileids (aka RIFF), but they're actually NIST SPHERE
382
+ # fileids.
383
+ def wav(self, utterance, start=0, end=None):
384
+ # nltk.chunk conflicts with the stdlib module 'chunk'
385
+ wave = import_from_stdlib("wave")
386
+
387
+ w = wave.open(self.open(utterance + ".wav"), "rb")
388
+
389
+ if end is None:
390
+ end = w.getnframes()
391
+
392
+ # Skip past frames before start, then read the frames we want
393
+ w.readframes(start)
394
+ frames = w.readframes(end - start)
395
+
396
+ # Open a new temporary file -- the wave module requires
397
+ # an actual file, and won't work w/ stringio. :(
398
+ tf = tempfile.TemporaryFile()
399
+ out = wave.open(tf, "w")
400
+
401
+ # Write the parameters & data to the new file.
402
+ out.setparams(w.getparams())
403
+ out.writeframes(frames)
404
+ out.close()
405
+
406
+ # Read the data back from the file, and return it. The
407
+ # file will automatically be deleted when we return.
408
+ tf.seek(0)
409
+ return tf.read()
410
+
411
+ def audiodata(self, utterance, start=0, end=None):
412
+ assert end is None or end > start
413
+ headersize = 44
414
+ with self.open(utterance + ".wav") as fp:
415
+ if end is None:
416
+ data = fp.read()
417
+ else:
418
+ data = fp.read(headersize + end * 2)
419
+ return data[headersize + start * 2 :]
420
+
421
+ def _utterance_fileids(self, utterances, extension):
422
+ if utterances is None:
423
+ utterances = self._utterances
424
+ if isinstance(utterances, str):
425
+ utterances = [utterances]
426
+ return [f"{u}{extension}" for u in utterances]
427
+
428
+ def play(self, utterance, start=0, end=None):
429
+ """
430
+ Play the given audio sample.
431
+
432
+ :param utterance: The utterance id of the sample to play
433
+ """
434
+ # Method 1: os audio dev.
435
+ try:
436
+ import ossaudiodev
437
+
438
+ try:
439
+ dsp = ossaudiodev.open("w")
440
+ dsp.setfmt(ossaudiodev.AFMT_S16_LE)
441
+ dsp.channels(1)
442
+ dsp.speed(16000)
443
+ dsp.write(self.audiodata(utterance, start, end))
444
+ dsp.close()
445
+ except OSError as e:
446
+ print(
447
+ (
448
+ "can't acquire the audio device; please "
449
+ "activate your audio device."
450
+ ),
451
+ file=sys.stderr,
452
+ )
453
+ print("system error message:", str(e), file=sys.stderr)
454
+ return
455
+ except ImportError:
456
+ pass
457
+
458
+ # Method 2: pygame
459
+ try:
460
+ # FIXME: this won't work under python 3
461
+ import pygame.mixer
462
+ import StringIO
463
+
464
+ pygame.mixer.init(16000)
465
+ f = StringIO.StringIO(self.wav(utterance, start, end))
466
+ pygame.mixer.Sound(f).play()
467
+ while pygame.mixer.get_busy():
468
+ time.sleep(0.01)
469
+ return
470
+ except ImportError:
471
+ pass
472
+
473
+ # Method 3: complain. :)
474
+ print(
475
+ ("you must install pygame or ossaudiodev " "for audio playback."),
476
+ file=sys.stderr,
477
+ )
478
+
479
+
480
+ class SpeakerInfo:
481
+ def __init__(
482
+ self, id, sex, dr, use, recdate, birthdate, ht, race, edu, comments=None
483
+ ):
484
+ self.id = id
485
+ self.sex = sex
486
+ self.dr = dr
487
+ self.use = use
488
+ self.recdate = recdate
489
+ self.birthdate = birthdate
490
+ self.ht = ht
491
+ self.race = race
492
+ self.edu = edu
493
+ self.comments = comments
494
+
495
+ def __repr__(self):
496
+ attribs = "id sex dr use recdate birthdate ht race edu comments"
497
+ args = [f"{attr}={getattr(self, attr)!r}" for attr in attribs.split()]
498
+ return "SpeakerInfo(%s)" % (", ".join(args))
499
+
500
+
501
+ def read_timit_block(stream):
502
+ """
503
+ Block reader for timit tagged sentences, which are preceded by a sentence
504
+ number that will be ignored.
505
+ """
506
+ line = stream.readline()
507
+ if not line:
508
+ return []
509
+ n, sent = line.split(" ", 1)
510
+ return [sent]
venv/lib/python3.10/site-packages/nltk/corpus/reader/toolbox.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Toolbox Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Greg Aumann <[email protected]>
5
+ # Stuart Robinson <[email protected]>
6
+ # Steven Bird <[email protected]>
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+ """
11
+ Module for reading, writing and manipulating
12
+ Toolbox databases and settings fileids.
13
+ """
14
+
15
+ from nltk.corpus.reader.api import *
16
+ from nltk.corpus.reader.util import *
17
+ from nltk.toolbox import ToolboxData
18
+
19
+
20
+ class ToolboxCorpusReader(CorpusReader):
21
+ def xml(self, fileids, key=None):
22
+ return concat(
23
+ [
24
+ ToolboxData(path, enc).parse(key=key)
25
+ for (path, enc) in self.abspaths(fileids, True)
26
+ ]
27
+ )
28
+
29
+ def fields(
30
+ self,
31
+ fileids,
32
+ strip=True,
33
+ unwrap=True,
34
+ encoding="utf8",
35
+ errors="strict",
36
+ unicode_fields=None,
37
+ ):
38
+ return concat(
39
+ [
40
+ list(
41
+ ToolboxData(fileid, enc).fields(
42
+ strip, unwrap, encoding, errors, unicode_fields
43
+ )
44
+ )
45
+ for (fileid, enc) in self.abspaths(fileids, include_encoding=True)
46
+ ]
47
+ )
48
+
49
+ # should probably be done lazily:
50
+ def entries(self, fileids, **kwargs):
51
+ if "key" in kwargs:
52
+ key = kwargs["key"]
53
+ del kwargs["key"]
54
+ else:
55
+ key = "lx" # the default key in MDF
56
+ entries = []
57
+ for marker, contents in self.fields(fileids, **kwargs):
58
+ if marker == key:
59
+ entries.append((contents, []))
60
+ else:
61
+ try:
62
+ entries[-1][-1].append((marker, contents))
63
+ except IndexError:
64
+ pass
65
+ return entries
66
+
67
+ def words(self, fileids, key="lx"):
68
+ return [contents for marker, contents in self.fields(fileids) if marker == key]
69
+
70
+
71
+ def demo():
72
+ pass
73
+
74
+
75
+ if __name__ == "__main__":
76
+ demo()
venv/lib/python3.10/site-packages/nltk/corpus/reader/twitter.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Twitter Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Ewan Klein <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ A reader for corpora that consist of Tweets. It is assumed that the Tweets
10
+ have been serialised into line-delimited JSON.
11
+ """
12
+
13
+ import json
14
+ import os
15
+
16
+ from nltk.corpus.reader.api import CorpusReader
17
+ from nltk.corpus.reader.util import StreamBackedCorpusView, ZipFilePathPointer, concat
18
+ from nltk.tokenize import TweetTokenizer
19
+
20
+
21
+ class TwitterCorpusReader(CorpusReader):
22
+ r"""
23
+ Reader for corpora that consist of Tweets represented as a list of line-delimited JSON.
24
+
25
+ Individual Tweets can be tokenized using the default tokenizer, or by a
26
+ custom tokenizer specified as a parameter to the constructor.
27
+
28
+ Construct a new Tweet corpus reader for a set of documents
29
+ located at the given root directory.
30
+
31
+ If you made your own tweet collection in a directory called
32
+ `twitter-files`, then you can initialise the reader as::
33
+
34
+ from nltk.corpus import TwitterCorpusReader
35
+ reader = TwitterCorpusReader(root='/path/to/twitter-files', '.*\.json')
36
+
37
+ However, the recommended approach is to set the relevant directory as the
38
+ value of the environmental variable `TWITTER`, and then invoke the reader
39
+ as follows::
40
+
41
+ root = os.environ['TWITTER']
42
+ reader = TwitterCorpusReader(root, '.*\.json')
43
+
44
+ If you want to work directly with the raw Tweets, the `json` library can
45
+ be used::
46
+
47
+ import json
48
+ for tweet in reader.docs():
49
+ print(json.dumps(tweet, indent=1, sort_keys=True))
50
+
51
+ """
52
+
53
+ CorpusView = StreamBackedCorpusView
54
+ """
55
+ The corpus view class used by this reader.
56
+ """
57
+
58
+ def __init__(
59
+ self, root, fileids=None, word_tokenizer=TweetTokenizer(), encoding="utf8"
60
+ ):
61
+ """
62
+ :param root: The root directory for this corpus.
63
+ :param fileids: A list or regexp specifying the fileids in this corpus.
64
+ :param word_tokenizer: Tokenizer for breaking the text of Tweets into
65
+ smaller units, including but not limited to words.
66
+ """
67
+ CorpusReader.__init__(self, root, fileids, encoding)
68
+
69
+ for path in self.abspaths(self._fileids):
70
+ if isinstance(path, ZipFilePathPointer):
71
+ pass
72
+ elif os.path.getsize(path) == 0:
73
+ raise ValueError(f"File {path} is empty")
74
+ """Check that all user-created corpus files are non-empty."""
75
+
76
+ self._word_tokenizer = word_tokenizer
77
+
78
+ def docs(self, fileids=None):
79
+ """
80
+ Returns the full Tweet objects, as specified by `Twitter
81
+ documentation on Tweets
82
+ <https://dev.twitter.com/docs/platform-objects/tweets>`_
83
+
84
+ :return: the given file(s) as a list of dictionaries deserialised
85
+ from JSON.
86
+ :rtype: list(dict)
87
+ """
88
+ return concat(
89
+ [
90
+ self.CorpusView(path, self._read_tweets, encoding=enc)
91
+ for (path, enc, fileid) in self.abspaths(fileids, True, True)
92
+ ]
93
+ )
94
+
95
+ def strings(self, fileids=None):
96
+ """
97
+ Returns only the text content of Tweets in the file(s)
98
+
99
+ :return: the given file(s) as a list of Tweets.
100
+ :rtype: list(str)
101
+ """
102
+ fulltweets = self.docs(fileids)
103
+ tweets = []
104
+ for jsono in fulltweets:
105
+ try:
106
+ text = jsono["text"]
107
+ if isinstance(text, bytes):
108
+ text = text.decode(self.encoding)
109
+ tweets.append(text)
110
+ except KeyError:
111
+ pass
112
+ return tweets
113
+
114
+ def tokenized(self, fileids=None):
115
+ """
116
+ :return: the given file(s) as a list of the text content of Tweets as
117
+ as a list of words, screenanames, hashtags, URLs and punctuation symbols.
118
+
119
+ :rtype: list(list(str))
120
+ """
121
+ tweets = self.strings(fileids)
122
+ tokenizer = self._word_tokenizer
123
+ return [tokenizer.tokenize(t) for t in tweets]
124
+
125
+ def _read_tweets(self, stream):
126
+ """
127
+ Assumes that each line in ``stream`` is a JSON-serialised object.
128
+ """
129
+ tweets = []
130
+ for i in range(10):
131
+ line = stream.readline()
132
+ if not line:
133
+ return tweets
134
+ tweet = json.loads(line)
135
+ tweets.append(tweet)
136
+ return tweets
venv/lib/python3.10/site-packages/nltk/corpus/reader/wordnet.py ADDED
@@ -0,0 +1,2489 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: WordNet
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bethard <[email protected]>
5
+ # Steven Bird <[email protected]>
6
+ # Edward Loper <[email protected]>
7
+ # Nitin Madnani <[email protected]>
8
+ # Nasruddin A’aidil Shari
9
+ # Sim Wei Ying Geraldine
10
+ # Soe Lynn
11
+ # Francis Bond <[email protected]>
12
+ # Eric Kafe <[email protected]>
13
+
14
+ # URL: <https://www.nltk.org/>
15
+ # For license information, see LICENSE.TXT
16
+
17
+ """
18
+ An NLTK interface for WordNet
19
+
20
+ WordNet is a lexical database of English.
21
+ Using synsets, helps find conceptual relationships between words
22
+ such as hypernyms, hyponyms, synonyms, antonyms etc.
23
+
24
+ For details about WordNet see:
25
+ https://wordnet.princeton.edu/
26
+
27
+ This module also allows you to find lemmas in languages
28
+ other than English from the Open Multilingual Wordnet
29
+ https://omwn.org/
30
+
31
+ """
32
+
33
+ import math
34
+ import os
35
+ import re
36
+ import warnings
37
+ from collections import defaultdict, deque
38
+ from functools import total_ordering
39
+ from itertools import chain, islice
40
+ from operator import itemgetter
41
+
42
+ from nltk.corpus.reader import CorpusReader
43
+ from nltk.internals import deprecated
44
+ from nltk.probability import FreqDist
45
+ from nltk.util import binary_search_file as _binary_search_file
46
+
47
+ ######################################################################
48
+ # Table of Contents
49
+ ######################################################################
50
+ # - Constants
51
+ # - Data Classes
52
+ # - WordNetError
53
+ # - Lemma
54
+ # - Synset
55
+ # - WordNet Corpus Reader
56
+ # - WordNet Information Content Corpus Reader
57
+ # - Similarity Metrics
58
+ # - Demo
59
+
60
+ ######################################################################
61
+ # Constants
62
+ ######################################################################
63
+
64
+ #: Positive infinity (for similarity functions)
65
+ _INF = 1e300
66
+
67
+ # { Part-of-speech constants
68
+ ADJ, ADJ_SAT, ADV, NOUN, VERB = "a", "s", "r", "n", "v"
69
+ # }
70
+
71
+ POS_LIST = [NOUN, VERB, ADJ, ADV]
72
+
73
+ # A table of strings that are used to express verb frames.
74
+ VERB_FRAME_STRINGS = (
75
+ None,
76
+ "Something %s",
77
+ "Somebody %s",
78
+ "It is %sing",
79
+ "Something is %sing PP",
80
+ "Something %s something Adjective/Noun",
81
+ "Something %s Adjective/Noun",
82
+ "Somebody %s Adjective",
83
+ "Somebody %s something",
84
+ "Somebody %s somebody",
85
+ "Something %s somebody",
86
+ "Something %s something",
87
+ "Something %s to somebody",
88
+ "Somebody %s on something",
89
+ "Somebody %s somebody something",
90
+ "Somebody %s something to somebody",
91
+ "Somebody %s something from somebody",
92
+ "Somebody %s somebody with something",
93
+ "Somebody %s somebody of something",
94
+ "Somebody %s something on somebody",
95
+ "Somebody %s somebody PP",
96
+ "Somebody %s something PP",
97
+ "Somebody %s PP",
98
+ "Somebody's (body part) %s",
99
+ "Somebody %s somebody to INFINITIVE",
100
+ "Somebody %s somebody INFINITIVE",
101
+ "Somebody %s that CLAUSE",
102
+ "Somebody %s to somebody",
103
+ "Somebody %s to INFINITIVE",
104
+ "Somebody %s whether INFINITIVE",
105
+ "Somebody %s somebody into V-ing something",
106
+ "Somebody %s something with something",
107
+ "Somebody %s INFINITIVE",
108
+ "Somebody %s VERB-ing",
109
+ "It %s that CLAUSE",
110
+ "Something %s INFINITIVE",
111
+ # OEWN additions:
112
+ "Somebody %s at something",
113
+ "Somebody %s for something",
114
+ "Somebody %s on somebody",
115
+ "Somebody %s out of somebody",
116
+ )
117
+
118
+ SENSENUM_RE = re.compile(r"\.[\d]+\.")
119
+
120
+
121
+ ######################################################################
122
+ # Data Classes
123
+ ######################################################################
124
+
125
+
126
+ class WordNetError(Exception):
127
+ """An exception class for wordnet-related errors."""
128
+
129
+
130
+ @total_ordering
131
+ class _WordNetObject:
132
+ """A common base class for lemmas and synsets."""
133
+
134
+ def hypernyms(self):
135
+ return self._related("@")
136
+
137
+ def _hypernyms(self):
138
+ return self._related("@")
139
+
140
+ def instance_hypernyms(self):
141
+ return self._related("@i")
142
+
143
+ def _instance_hypernyms(self):
144
+ return self._related("@i")
145
+
146
+ def hyponyms(self):
147
+ return self._related("~")
148
+
149
+ def instance_hyponyms(self):
150
+ return self._related("~i")
151
+
152
+ def member_holonyms(self):
153
+ return self._related("#m")
154
+
155
+ def substance_holonyms(self):
156
+ return self._related("#s")
157
+
158
+ def part_holonyms(self):
159
+ return self._related("#p")
160
+
161
+ def member_meronyms(self):
162
+ return self._related("%m")
163
+
164
+ def substance_meronyms(self):
165
+ return self._related("%s")
166
+
167
+ def part_meronyms(self):
168
+ return self._related("%p")
169
+
170
+ def topic_domains(self):
171
+ return self._related(";c")
172
+
173
+ def in_topic_domains(self):
174
+ return self._related("-c")
175
+
176
+ def region_domains(self):
177
+ return self._related(";r")
178
+
179
+ def in_region_domains(self):
180
+ return self._related("-r")
181
+
182
+ def usage_domains(self):
183
+ return self._related(";u")
184
+
185
+ def in_usage_domains(self):
186
+ return self._related("-u")
187
+
188
+ def attributes(self):
189
+ return self._related("=")
190
+
191
+ def entailments(self):
192
+ return self._related("*")
193
+
194
+ def causes(self):
195
+ return self._related(">")
196
+
197
+ def also_sees(self):
198
+ return self._related("^")
199
+
200
+ def verb_groups(self):
201
+ return self._related("$")
202
+
203
+ def similar_tos(self):
204
+ return self._related("&")
205
+
206
+ def __hash__(self):
207
+ return hash(self._name)
208
+
209
+ def __eq__(self, other):
210
+ return self._name == other._name
211
+
212
+ def __ne__(self, other):
213
+ return self._name != other._name
214
+
215
+ def __lt__(self, other):
216
+ return self._name < other._name
217
+
218
+
219
+ class Lemma(_WordNetObject):
220
+ """
221
+ The lexical entry for a single morphological form of a
222
+ sense-disambiguated word.
223
+
224
+ Create a Lemma from a "<word>.<pos>.<number>.<lemma>" string where:
225
+ <word> is the morphological stem identifying the synset
226
+ <pos> is one of the module attributes ADJ, ADJ_SAT, ADV, NOUN or VERB
227
+ <number> is the sense number, counting from 0.
228
+ <lemma> is the morphological form of interest
229
+
230
+ Note that <word> and <lemma> can be different, e.g. the Synset
231
+ 'salt.n.03' has the Lemmas 'salt.n.03.salt', 'salt.n.03.saltiness' and
232
+ 'salt.n.03.salinity'.
233
+
234
+ Lemma attributes, accessible via methods with the same name:
235
+
236
+ - name: The canonical name of this lemma.
237
+ - synset: The synset that this lemma belongs to.
238
+ - syntactic_marker: For adjectives, the WordNet string identifying the
239
+ syntactic position relative modified noun. See:
240
+ https://wordnet.princeton.edu/documentation/wninput5wn
241
+ For all other parts of speech, this attribute is None.
242
+ - count: The frequency of this lemma in wordnet.
243
+
244
+ Lemma methods:
245
+
246
+ Lemmas have the following methods for retrieving related Lemmas. They
247
+ correspond to the names for the pointer symbols defined here:
248
+ https://wordnet.princeton.edu/documentation/wninput5wn
249
+ These methods all return lists of Lemmas:
250
+
251
+ - antonyms
252
+ - hypernyms, instance_hypernyms
253
+ - hyponyms, instance_hyponyms
254
+ - member_holonyms, substance_holonyms, part_holonyms
255
+ - member_meronyms, substance_meronyms, part_meronyms
256
+ - topic_domains, region_domains, usage_domains
257
+ - attributes
258
+ - derivationally_related_forms
259
+ - entailments
260
+ - causes
261
+ - also_sees
262
+ - verb_groups
263
+ - similar_tos
264
+ - pertainyms
265
+ """
266
+
267
+ __slots__ = [
268
+ "_wordnet_corpus_reader",
269
+ "_name",
270
+ "_syntactic_marker",
271
+ "_synset",
272
+ "_frame_strings",
273
+ "_frame_ids",
274
+ "_lexname_index",
275
+ "_lex_id",
276
+ "_lang",
277
+ "_key",
278
+ ]
279
+
280
+ def __init__(
281
+ self,
282
+ wordnet_corpus_reader,
283
+ synset,
284
+ name,
285
+ lexname_index,
286
+ lex_id,
287
+ syntactic_marker,
288
+ ):
289
+ self._wordnet_corpus_reader = wordnet_corpus_reader
290
+ self._name = name
291
+ self._syntactic_marker = syntactic_marker
292
+ self._synset = synset
293
+ self._frame_strings = []
294
+ self._frame_ids = []
295
+ self._lexname_index = lexname_index
296
+ self._lex_id = lex_id
297
+ self._lang = "eng"
298
+
299
+ self._key = None # gets set later.
300
+
301
+ def name(self):
302
+ return self._name
303
+
304
+ def syntactic_marker(self):
305
+ return self._syntactic_marker
306
+
307
+ def synset(self):
308
+ return self._synset
309
+
310
+ def frame_strings(self):
311
+ return self._frame_strings
312
+
313
+ def frame_ids(self):
314
+ return self._frame_ids
315
+
316
+ def lang(self):
317
+ return self._lang
318
+
319
+ def key(self):
320
+ return self._key
321
+
322
+ def __repr__(self):
323
+ tup = type(self).__name__, self._synset._name, self._name
324
+ return "%s('%s.%s')" % tup
325
+
326
+ def _related(self, relation_symbol):
327
+ get_synset = self._wordnet_corpus_reader.synset_from_pos_and_offset
328
+ if (self._name, relation_symbol) not in self._synset._lemma_pointers:
329
+ return []
330
+ return [
331
+ get_synset(pos, offset)._lemmas[lemma_index]
332
+ for pos, offset, lemma_index in self._synset._lemma_pointers[
333
+ self._name, relation_symbol
334
+ ]
335
+ ]
336
+
337
+ def count(self):
338
+ """Return the frequency count for this Lemma"""
339
+ return self._wordnet_corpus_reader.lemma_count(self)
340
+
341
+ def antonyms(self):
342
+ return self._related("!")
343
+
344
+ def derivationally_related_forms(self):
345
+ return self._related("+")
346
+
347
+ def pertainyms(self):
348
+ return self._related("\\")
349
+
350
+
351
+ class Synset(_WordNetObject):
352
+ """Create a Synset from a "<lemma>.<pos>.<number>" string where:
353
+ <lemma> is the word's morphological stem
354
+ <pos> is one of the module attributes ADJ, ADJ_SAT, ADV, NOUN or VERB
355
+ <number> is the sense number, counting from 0.
356
+
357
+ Synset attributes, accessible via methods with the same name:
358
+
359
+ - name: The canonical name of this synset, formed using the first lemma
360
+ of this synset. Note that this may be different from the name
361
+ passed to the constructor if that string used a different lemma to
362
+ identify the synset.
363
+ - pos: The synset's part of speech, matching one of the module level
364
+ attributes ADJ, ADJ_SAT, ADV, NOUN or VERB.
365
+ - lemmas: A list of the Lemma objects for this synset.
366
+ - definition: The definition for this synset.
367
+ - examples: A list of example strings for this synset.
368
+ - offset: The offset in the WordNet dict file of this synset.
369
+ - lexname: The name of the lexicographer file containing this synset.
370
+
371
+ Synset methods:
372
+
373
+ Synsets have the following methods for retrieving related Synsets.
374
+ They correspond to the names for the pointer symbols defined here:
375
+ https://wordnet.princeton.edu/documentation/wninput5wn
376
+ These methods all return lists of Synsets.
377
+
378
+ - hypernyms, instance_hypernyms
379
+ - hyponyms, instance_hyponyms
380
+ - member_holonyms, substance_holonyms, part_holonyms
381
+ - member_meronyms, substance_meronyms, part_meronyms
382
+ - attributes
383
+ - entailments
384
+ - causes
385
+ - also_sees
386
+ - verb_groups
387
+ - similar_tos
388
+
389
+ Additionally, Synsets support the following methods specific to the
390
+ hypernym relation:
391
+
392
+ - root_hypernyms
393
+ - common_hypernyms
394
+ - lowest_common_hypernyms
395
+
396
+ Note that Synsets do not support the following relations because
397
+ these are defined by WordNet as lexical relations:
398
+
399
+ - antonyms
400
+ - derivationally_related_forms
401
+ - pertainyms
402
+ """
403
+
404
+ __slots__ = [
405
+ "_pos",
406
+ "_offset",
407
+ "_name",
408
+ "_frame_ids",
409
+ "_lemmas",
410
+ "_lemma_names",
411
+ "_definition",
412
+ "_examples",
413
+ "_lexname",
414
+ "_pointers",
415
+ "_lemma_pointers",
416
+ "_max_depth",
417
+ "_min_depth",
418
+ ]
419
+
420
+ def __init__(self, wordnet_corpus_reader):
421
+ self._wordnet_corpus_reader = wordnet_corpus_reader
422
+ # All of these attributes get initialized by
423
+ # WordNetCorpusReader._synset_from_pos_and_line()
424
+
425
+ self._pos = None
426
+ self._offset = None
427
+ self._name = None
428
+ self._frame_ids = []
429
+ self._lemmas = []
430
+ self._lemma_names = []
431
+ self._definition = None
432
+ self._examples = []
433
+ self._lexname = None # lexicographer name
434
+ self._all_hypernyms = None
435
+
436
+ self._pointers = defaultdict(set)
437
+ self._lemma_pointers = defaultdict(list)
438
+
439
+ def pos(self):
440
+ return self._pos
441
+
442
+ def offset(self):
443
+ return self._offset
444
+
445
+ def name(self):
446
+ return self._name
447
+
448
+ def frame_ids(self):
449
+ return self._frame_ids
450
+
451
+ def _doc(self, doc_type, default, lang="eng"):
452
+ """Helper method for Synset.definition and Synset.examples"""
453
+ corpus = self._wordnet_corpus_reader
454
+ if lang not in corpus.langs():
455
+ return None
456
+ elif lang == "eng":
457
+ return default
458
+ else:
459
+ corpus._load_lang_data(lang)
460
+ of = corpus.ss2of(self)
461
+ i = corpus.lg_attrs.index(doc_type)
462
+ if of in corpus._lang_data[lang][i]:
463
+ return corpus._lang_data[lang][i][of]
464
+ else:
465
+ return None
466
+
467
+ def definition(self, lang="eng"):
468
+ """Return definition in specified language"""
469
+ return self._doc("def", self._definition, lang=lang)
470
+
471
+ def examples(self, lang="eng"):
472
+ """Return examples in specified language"""
473
+ return self._doc("exe", self._examples, lang=lang)
474
+
475
+ def lexname(self):
476
+ return self._lexname
477
+
478
+ def _needs_root(self):
479
+ if self._pos == NOUN and self._wordnet_corpus_reader.get_version() != "1.6":
480
+ return False
481
+ else:
482
+ return True
483
+
484
+ def lemma_names(self, lang="eng"):
485
+ """Return all the lemma_names associated with the synset"""
486
+ if lang == "eng":
487
+ return self._lemma_names
488
+ else:
489
+ reader = self._wordnet_corpus_reader
490
+ reader._load_lang_data(lang)
491
+ i = reader.ss2of(self)
492
+ if i in reader._lang_data[lang][0]:
493
+ return reader._lang_data[lang][0][i]
494
+ else:
495
+ return []
496
+
497
+ def lemmas(self, lang="eng"):
498
+ """Return all the lemma objects associated with the synset"""
499
+ if lang == "eng":
500
+ return self._lemmas
501
+ elif self._name:
502
+ self._wordnet_corpus_reader._load_lang_data(lang)
503
+ lemmark = []
504
+ lemmy = self.lemma_names(lang)
505
+ for lem in lemmy:
506
+ temp = Lemma(
507
+ self._wordnet_corpus_reader,
508
+ self,
509
+ lem,
510
+ self._wordnet_corpus_reader._lexnames.index(self.lexname()),
511
+ 0,
512
+ None,
513
+ )
514
+ temp._lang = lang
515
+ lemmark.append(temp)
516
+ return lemmark
517
+
518
+ def root_hypernyms(self):
519
+ """Get the topmost hypernyms of this synset in WordNet."""
520
+
521
+ result = []
522
+ seen = set()
523
+ todo = [self]
524
+ while todo:
525
+ next_synset = todo.pop()
526
+ if next_synset not in seen:
527
+ seen.add(next_synset)
528
+ next_hypernyms = (
529
+ next_synset.hypernyms() + next_synset.instance_hypernyms()
530
+ )
531
+ if not next_hypernyms:
532
+ result.append(next_synset)
533
+ else:
534
+ todo.extend(next_hypernyms)
535
+ return result
536
+
537
+ # Simpler implementation which makes incorrect assumption that
538
+ # hypernym hierarchy is acyclic:
539
+ #
540
+ # if not self.hypernyms():
541
+ # return [self]
542
+ # else:
543
+ # return list(set(root for h in self.hypernyms()
544
+ # for root in h.root_hypernyms()))
545
+ def max_depth(self):
546
+ """
547
+ :return: The length of the longest hypernym path from this
548
+ synset to the root.
549
+ """
550
+
551
+ if "_max_depth" not in self.__dict__:
552
+ hypernyms = self.hypernyms() + self.instance_hypernyms()
553
+ if not hypernyms:
554
+ self._max_depth = 0
555
+ else:
556
+ self._max_depth = 1 + max(h.max_depth() for h in hypernyms)
557
+ return self._max_depth
558
+
559
+ def min_depth(self):
560
+ """
561
+ :return: The length of the shortest hypernym path from this
562
+ synset to the root.
563
+ """
564
+
565
+ if "_min_depth" not in self.__dict__:
566
+ hypernyms = self.hypernyms() + self.instance_hypernyms()
567
+ if not hypernyms:
568
+ self._min_depth = 0
569
+ else:
570
+ self._min_depth = 1 + min(h.min_depth() for h in hypernyms)
571
+ return self._min_depth
572
+
573
+ def closure(self, rel, depth=-1):
574
+ """
575
+ Return the transitive closure of source under the rel
576
+ relationship, breadth-first, discarding cycles:
577
+
578
+ >>> from nltk.corpus import wordnet as wn
579
+ >>> computer = wn.synset('computer.n.01')
580
+ >>> topic = lambda s:s.topic_domains()
581
+ >>> print(list(computer.closure(topic)))
582
+ [Synset('computer_science.n.01')]
583
+
584
+ UserWarning: Discarded redundant search for Synset('computer.n.01') at depth 2
585
+
586
+
587
+ Include redundant paths (but only once), avoiding duplicate searches
588
+ (from 'animal.n.01' to 'entity.n.01'):
589
+
590
+ >>> dog = wn.synset('dog.n.01')
591
+ >>> hyp = lambda s:s.hypernyms()
592
+ >>> print(list(dog.closure(hyp)))
593
+ [Synset('canine.n.02'), Synset('domestic_animal.n.01'), Synset('carnivore.n.01'),\
594
+ Synset('animal.n.01'), Synset('placental.n.01'), Synset('organism.n.01'),\
595
+ Synset('mammal.n.01'), Synset('living_thing.n.01'), Synset('vertebrate.n.01'),\
596
+ Synset('whole.n.02'), Synset('chordate.n.01'), Synset('object.n.01'),\
597
+ Synset('physical_entity.n.01'), Synset('entity.n.01')]
598
+
599
+ UserWarning: Discarded redundant search for Synset('animal.n.01') at depth 7
600
+ """
601
+
602
+ from nltk.util import acyclic_breadth_first
603
+
604
+ for synset in acyclic_breadth_first(self, rel, depth):
605
+ if synset != self:
606
+ yield synset
607
+
608
+ from nltk.util import acyclic_depth_first as acyclic_tree
609
+ from nltk.util import unweighted_minimum_spanning_tree as mst
610
+
611
+ # Also add this shortcut?
612
+ # from nltk.util import unweighted_minimum_spanning_digraph as umsd
613
+
614
+ def tree(self, rel, depth=-1, cut_mark=None):
615
+ """
616
+ Return the full relation tree, including self,
617
+ discarding cycles:
618
+
619
+ >>> from nltk.corpus import wordnet as wn
620
+ >>> from pprint import pprint
621
+ >>> computer = wn.synset('computer.n.01')
622
+ >>> topic = lambda s:s.topic_domains()
623
+ >>> pprint(computer.tree(topic))
624
+ [Synset('computer.n.01'), [Synset('computer_science.n.01')]]
625
+
626
+ UserWarning: Discarded redundant search for Synset('computer.n.01') at depth -3
627
+
628
+
629
+ But keep duplicate branches (from 'animal.n.01' to 'entity.n.01'):
630
+
631
+ >>> dog = wn.synset('dog.n.01')
632
+ >>> hyp = lambda s:s.hypernyms()
633
+ >>> pprint(dog.tree(hyp))
634
+ [Synset('dog.n.01'),
635
+ [Synset('canine.n.02'),
636
+ [Synset('carnivore.n.01'),
637
+ [Synset('placental.n.01'),
638
+ [Synset('mammal.n.01'),
639
+ [Synset('vertebrate.n.01'),
640
+ [Synset('chordate.n.01'),
641
+ [Synset('animal.n.01'),
642
+ [Synset('organism.n.01'),
643
+ [Synset('living_thing.n.01'),
644
+ [Synset('whole.n.02'),
645
+ [Synset('object.n.01'),
646
+ [Synset('physical_entity.n.01'),
647
+ [Synset('entity.n.01')]]]]]]]]]]]]],
648
+ [Synset('domestic_animal.n.01'),
649
+ [Synset('animal.n.01'),
650
+ [Synset('organism.n.01'),
651
+ [Synset('living_thing.n.01'),
652
+ [Synset('whole.n.02'),
653
+ [Synset('object.n.01'),
654
+ [Synset('physical_entity.n.01'), [Synset('entity.n.01')]]]]]]]]]
655
+ """
656
+
657
+ from nltk.util import acyclic_branches_depth_first
658
+
659
+ return acyclic_branches_depth_first(self, rel, depth, cut_mark)
660
+
661
+ def hypernym_paths(self):
662
+ """
663
+ Get the path(s) from this synset to the root, where each path is a
664
+ list of the synset nodes traversed on the way to the root.
665
+
666
+ :return: A list of lists, where each list gives the node sequence
667
+ connecting the initial ``Synset`` node and a root node.
668
+ """
669
+ paths = []
670
+
671
+ hypernyms = self.hypernyms() + self.instance_hypernyms()
672
+ if len(hypernyms) == 0:
673
+ paths = [[self]]
674
+
675
+ for hypernym in hypernyms:
676
+ for ancestor_list in hypernym.hypernym_paths():
677
+ ancestor_list.append(self)
678
+ paths.append(ancestor_list)
679
+ return paths
680
+
681
+ def common_hypernyms(self, other):
682
+ """
683
+ Find all synsets that are hypernyms of this synset and the
684
+ other synset.
685
+
686
+ :type other: Synset
687
+ :param other: other input synset.
688
+ :return: The synsets that are hypernyms of both synsets.
689
+ """
690
+ if not self._all_hypernyms:
691
+ self._all_hypernyms = {
692
+ self_synset
693
+ for self_synsets in self._iter_hypernym_lists()
694
+ for self_synset in self_synsets
695
+ }
696
+ if not other._all_hypernyms:
697
+ other._all_hypernyms = {
698
+ other_synset
699
+ for other_synsets in other._iter_hypernym_lists()
700
+ for other_synset in other_synsets
701
+ }
702
+ return list(self._all_hypernyms.intersection(other._all_hypernyms))
703
+
704
+ def lowest_common_hypernyms(self, other, simulate_root=False, use_min_depth=False):
705
+ """
706
+ Get a list of lowest synset(s) that both synsets have as a hypernym.
707
+ When `use_min_depth == False` this means that the synset which appears
708
+ as a hypernym of both `self` and `other` with the lowest maximum depth
709
+ is returned or if there are multiple such synsets at the same depth
710
+ they are all returned
711
+
712
+ However, if `use_min_depth == True` then the synset(s) which has/have
713
+ the lowest minimum depth and appear(s) in both paths is/are returned.
714
+
715
+ By setting the use_min_depth flag to True, the behavior of NLTK2 can be
716
+ preserved. This was changed in NLTK3 to give more accurate results in a
717
+ small set of cases, generally with synsets concerning people. (eg:
718
+ 'chef.n.01', 'fireman.n.01', etc.)
719
+
720
+ This method is an implementation of Ted Pedersen's "Lowest Common
721
+ Subsumer" method from the Perl Wordnet module. It can return either
722
+ "self" or "other" if they are a hypernym of the other.
723
+
724
+ :type other: Synset
725
+ :param other: other input synset
726
+ :type simulate_root: bool
727
+ :param simulate_root: The various verb taxonomies do not
728
+ share a single root which disallows this metric from working for
729
+ synsets that are not connected. This flag (False by default)
730
+ creates a fake root that connects all the taxonomies. Set it
731
+ to True to enable this behavior. For the noun taxonomy,
732
+ there is usually a default root except for WordNet version 1.6.
733
+ If you are using wordnet 1.6, a fake root will need to be added
734
+ for nouns as well.
735
+ :type use_min_depth: bool
736
+ :param use_min_depth: This setting mimics older (v2) behavior of NLTK
737
+ wordnet If True, will use the min_depth function to calculate the
738
+ lowest common hypernyms. This is known to give strange results for
739
+ some synset pairs (eg: 'chef.n.01', 'fireman.n.01') but is retained
740
+ for backwards compatibility
741
+ :return: The synsets that are the lowest common hypernyms of both
742
+ synsets
743
+ """
744
+ synsets = self.common_hypernyms(other)
745
+ if simulate_root:
746
+ fake_synset = Synset(None)
747
+ fake_synset._name = "*ROOT*"
748
+ fake_synset.hypernyms = lambda: []
749
+ fake_synset.instance_hypernyms = lambda: []
750
+ synsets.append(fake_synset)
751
+
752
+ try:
753
+ if use_min_depth:
754
+ max_depth = max(s.min_depth() for s in synsets)
755
+ unsorted_lch = [s for s in synsets if s.min_depth() == max_depth]
756
+ else:
757
+ max_depth = max(s.max_depth() for s in synsets)
758
+ unsorted_lch = [s for s in synsets if s.max_depth() == max_depth]
759
+ return sorted(unsorted_lch)
760
+ except ValueError:
761
+ return []
762
+
763
+ def hypernym_distances(self, distance=0, simulate_root=False):
764
+ """
765
+ Get the path(s) from this synset to the root, counting the distance
766
+ of each node from the initial node on the way. A set of
767
+ (synset, distance) tuples is returned.
768
+
769
+ :type distance: int
770
+ :param distance: the distance (number of edges) from this hypernym to
771
+ the original hypernym ``Synset`` on which this method was called.
772
+ :return: A set of ``(Synset, int)`` tuples where each ``Synset`` is
773
+ a hypernym of the first ``Synset``.
774
+ """
775
+ distances = {(self, distance)}
776
+ for hypernym in self._hypernyms() + self._instance_hypernyms():
777
+ distances |= hypernym.hypernym_distances(distance + 1, simulate_root=False)
778
+ if simulate_root:
779
+ fake_synset = Synset(None)
780
+ fake_synset._name = "*ROOT*"
781
+ fake_synset_distance = max(distances, key=itemgetter(1))[1]
782
+ distances.add((fake_synset, fake_synset_distance + 1))
783
+ return distances
784
+
785
+ def _shortest_hypernym_paths(self, simulate_root):
786
+ if self._name == "*ROOT*":
787
+ return {self: 0}
788
+
789
+ queue = deque([(self, 0)])
790
+ path = {}
791
+
792
+ while queue:
793
+ s, depth = queue.popleft()
794
+ if s in path:
795
+ continue
796
+ path[s] = depth
797
+
798
+ depth += 1
799
+ queue.extend((hyp, depth) for hyp in s._hypernyms())
800
+ queue.extend((hyp, depth) for hyp in s._instance_hypernyms())
801
+
802
+ if simulate_root:
803
+ fake_synset = Synset(None)
804
+ fake_synset._name = "*ROOT*"
805
+ path[fake_synset] = max(path.values()) + 1
806
+
807
+ return path
808
+
809
+ def shortest_path_distance(self, other, simulate_root=False):
810
+ """
811
+ Returns the distance of the shortest path linking the two synsets (if
812
+ one exists). For each synset, all the ancestor nodes and their
813
+ distances are recorded and compared. The ancestor node common to both
814
+ synsets that can be reached with the minimum number of traversals is
815
+ used. If no ancestor nodes are common, None is returned. If a node is
816
+ compared with itself 0 is returned.
817
+
818
+ :type other: Synset
819
+ :param other: The Synset to which the shortest path will be found.
820
+ :return: The number of edges in the shortest path connecting the two
821
+ nodes, or None if no path exists.
822
+ """
823
+
824
+ if self == other:
825
+ return 0
826
+
827
+ dist_dict1 = self._shortest_hypernym_paths(simulate_root)
828
+ dist_dict2 = other._shortest_hypernym_paths(simulate_root)
829
+
830
+ # For each ancestor synset common to both subject synsets, find the
831
+ # connecting path length. Return the shortest of these.
832
+
833
+ inf = float("inf")
834
+ path_distance = inf
835
+ for synset, d1 in dist_dict1.items():
836
+ d2 = dist_dict2.get(synset, inf)
837
+ path_distance = min(path_distance, d1 + d2)
838
+
839
+ return None if math.isinf(path_distance) else path_distance
840
+
841
+ # interface to similarity methods
842
+ def path_similarity(self, other, verbose=False, simulate_root=True):
843
+ """
844
+ Path Distance Similarity:
845
+ Return a score denoting how similar two word senses are, based on the
846
+ shortest path that connects the senses in the is-a (hypernym/hypnoym)
847
+ taxonomy. The score is in the range 0 to 1, except in those cases where
848
+ a path cannot be found (will only be true for verbs as there are many
849
+ distinct verb taxonomies), in which case None is returned. A score of
850
+ 1 represents identity i.e. comparing a sense with itself will return 1.
851
+
852
+ :type other: Synset
853
+ :param other: The ``Synset`` that this ``Synset`` is being compared to.
854
+ :type simulate_root: bool
855
+ :param simulate_root: The various verb taxonomies do not
856
+ share a single root which disallows this metric from working for
857
+ synsets that are not connected. This flag (True by default)
858
+ creates a fake root that connects all the taxonomies. Set it
859
+ to false to disable this behavior. For the noun taxonomy,
860
+ there is usually a default root except for WordNet version 1.6.
861
+ If you are using wordnet 1.6, a fake root will be added for nouns
862
+ as well.
863
+ :return: A score denoting the similarity of the two ``Synset`` objects,
864
+ normally between 0 and 1. None is returned if no connecting path
865
+ could be found. 1 is returned if a ``Synset`` is compared with
866
+ itself.
867
+ """
868
+
869
+ distance = self.shortest_path_distance(
870
+ other,
871
+ simulate_root=simulate_root and (self._needs_root() or other._needs_root()),
872
+ )
873
+ if distance is None or distance < 0:
874
+ return None
875
+ return 1.0 / (distance + 1)
876
+
877
+ def lch_similarity(self, other, verbose=False, simulate_root=True):
878
+ """
879
+ Leacock Chodorow Similarity:
880
+ Return a score denoting how similar two word senses are, based on the
881
+ shortest path that connects the senses (as above) and the maximum depth
882
+ of the taxonomy in which the senses occur. The relationship is given as
883
+ -log(p/2d) where p is the shortest path length and d is the taxonomy
884
+ depth.
885
+
886
+ :type other: Synset
887
+ :param other: The ``Synset`` that this ``Synset`` is being compared to.
888
+ :type simulate_root: bool
889
+ :param simulate_root: The various verb taxonomies do not
890
+ share a single root which disallows this metric from working for
891
+ synsets that are not connected. This flag (True by default)
892
+ creates a fake root that connects all the taxonomies. Set it
893
+ to false to disable this behavior. For the noun taxonomy,
894
+ there is usually a default root except for WordNet version 1.6.
895
+ If you are using wordnet 1.6, a fake root will be added for nouns
896
+ as well.
897
+ :return: A score denoting the similarity of the two ``Synset`` objects,
898
+ normally greater than 0. None is returned if no connecting path
899
+ could be found. If a ``Synset`` is compared with itself, the
900
+ maximum score is returned, which varies depending on the taxonomy
901
+ depth.
902
+ """
903
+
904
+ if self._pos != other._pos:
905
+ raise WordNetError(
906
+ "Computing the lch similarity requires "
907
+ "%s and %s to have the same part of speech." % (self, other)
908
+ )
909
+
910
+ need_root = self._needs_root()
911
+
912
+ if self._pos not in self._wordnet_corpus_reader._max_depth:
913
+ self._wordnet_corpus_reader._compute_max_depth(self._pos, need_root)
914
+
915
+ depth = self._wordnet_corpus_reader._max_depth[self._pos]
916
+
917
+ distance = self.shortest_path_distance(
918
+ other, simulate_root=simulate_root and need_root
919
+ )
920
+
921
+ if distance is None or distance < 0 or depth == 0:
922
+ return None
923
+ return -math.log((distance + 1) / (2.0 * depth))
924
+
925
+ def wup_similarity(self, other, verbose=False, simulate_root=True):
926
+ """
927
+ Wu-Palmer Similarity:
928
+ Return a score denoting how similar two word senses are, based on the
929
+ depth of the two senses in the taxonomy and that of their Least Common
930
+ Subsumer (most specific ancestor node). Previously, the scores computed
931
+ by this implementation did _not_ always agree with those given by
932
+ Pedersen's Perl implementation of WordNet Similarity. However, with
933
+ the addition of the simulate_root flag (see below), the score for
934
+ verbs now almost always agree but not always for nouns.
935
+
936
+ The LCS does not necessarily feature in the shortest path connecting
937
+ the two senses, as it is by definition the common ancestor deepest in
938
+ the taxonomy, not closest to the two senses. Typically, however, it
939
+ will so feature. Where multiple candidates for the LCS exist, that
940
+ whose shortest path to the root node is the longest will be selected.
941
+ Where the LCS has multiple paths to the root, the longer path is used
942
+ for the purposes of the calculation.
943
+
944
+ :type other: Synset
945
+ :param other: The ``Synset`` that this ``Synset`` is being compared to.
946
+ :type simulate_root: bool
947
+ :param simulate_root: The various verb taxonomies do not
948
+ share a single root which disallows this metric from working for
949
+ synsets that are not connected. This flag (True by default)
950
+ creates a fake root that connects all the taxonomies. Set it
951
+ to false to disable this behavior. For the noun taxonomy,
952
+ there is usually a default root except for WordNet version 1.6.
953
+ If you are using wordnet 1.6, a fake root will be added for nouns
954
+ as well.
955
+ :return: A float score denoting the similarity of the two ``Synset``
956
+ objects, normally greater than zero. If no connecting path between
957
+ the two senses can be found, None is returned.
958
+
959
+ """
960
+ need_root = self._needs_root() or other._needs_root()
961
+
962
+ # Note that to preserve behavior from NLTK2 we set use_min_depth=True
963
+ # It is possible that more accurate results could be obtained by
964
+ # removing this setting and it should be tested later on
965
+ subsumers = self.lowest_common_hypernyms(
966
+ other, simulate_root=simulate_root and need_root, use_min_depth=True
967
+ )
968
+
969
+ # If no LCS was found return None
970
+ if len(subsumers) == 0:
971
+ return None
972
+
973
+ subsumer = self if self in subsumers else subsumers[0]
974
+
975
+ # Get the longest path from the LCS to the root,
976
+ # including a correction:
977
+ # - add one because the calculations include both the start and end
978
+ # nodes
979
+ depth = subsumer.max_depth() + 1
980
+
981
+ # Note: No need for an additional add-one correction for non-nouns
982
+ # to account for an imaginary root node because that is now
983
+ # automatically handled by simulate_root
984
+ # if subsumer._pos != NOUN:
985
+ # depth += 1
986
+
987
+ # Get the shortest path from the LCS to each of the synsets it is
988
+ # subsuming. Add this to the LCS path length to get the path
989
+ # length from each synset to the root.
990
+ len1 = self.shortest_path_distance(
991
+ subsumer, simulate_root=simulate_root and need_root
992
+ )
993
+ len2 = other.shortest_path_distance(
994
+ subsumer, simulate_root=simulate_root and need_root
995
+ )
996
+ if len1 is None or len2 is None:
997
+ return None
998
+ len1 += depth
999
+ len2 += depth
1000
+ return (2.0 * depth) / (len1 + len2)
1001
+
1002
+ def res_similarity(self, other, ic, verbose=False):
1003
+ """
1004
+ Resnik Similarity:
1005
+ Return a score denoting how similar two word senses are, based on the
1006
+ Information Content (IC) of the Least Common Subsumer (most specific
1007
+ ancestor node).
1008
+
1009
+ :type other: Synset
1010
+ :param other: The ``Synset`` that this ``Synset`` is being compared to.
1011
+ :type ic: dict
1012
+ :param ic: an information content object (as returned by
1013
+ ``nltk.corpus.wordnet_ic.ic()``).
1014
+ :return: A float score denoting the similarity of the two ``Synset``
1015
+ objects. Synsets whose LCS is the root node of the taxonomy will
1016
+ have a score of 0 (e.g. N['dog'][0] and N['table'][0]).
1017
+ """
1018
+
1019
+ ic1, ic2, lcs_ic = _lcs_ic(self, other, ic)
1020
+ return lcs_ic
1021
+
1022
+ def jcn_similarity(self, other, ic, verbose=False):
1023
+ """
1024
+ Jiang-Conrath Similarity:
1025
+ Return a score denoting how similar two word senses are, based on the
1026
+ Information Content (IC) of the Least Common Subsumer (most specific
1027
+ ancestor node) and that of the two input Synsets. The relationship is
1028
+ given by the equation 1 / (IC(s1) + IC(s2) - 2 * IC(lcs)).
1029
+
1030
+ :type other: Synset
1031
+ :param other: The ``Synset`` that this ``Synset`` is being compared to.
1032
+ :type ic: dict
1033
+ :param ic: an information content object (as returned by
1034
+ ``nltk.corpus.wordnet_ic.ic()``).
1035
+ :return: A float score denoting the similarity of the two ``Synset``
1036
+ objects.
1037
+ """
1038
+
1039
+ if self == other:
1040
+ return _INF
1041
+
1042
+ ic1, ic2, lcs_ic = _lcs_ic(self, other, ic)
1043
+
1044
+ # If either of the input synsets are the root synset, or have a
1045
+ # frequency of 0 (sparse data problem), return 0.
1046
+ if ic1 == 0 or ic2 == 0:
1047
+ return 0
1048
+
1049
+ ic_difference = ic1 + ic2 - 2 * lcs_ic
1050
+
1051
+ if ic_difference == 0:
1052
+ return _INF
1053
+
1054
+ return 1 / ic_difference
1055
+
1056
+ def lin_similarity(self, other, ic, verbose=False):
1057
+ """
1058
+ Lin Similarity:
1059
+ Return a score denoting how similar two word senses are, based on the
1060
+ Information Content (IC) of the Least Common Subsumer (most specific
1061
+ ancestor node) and that of the two input Synsets. The relationship is
1062
+ given by the equation 2 * IC(lcs) / (IC(s1) + IC(s2)).
1063
+
1064
+ :type other: Synset
1065
+ :param other: The ``Synset`` that this ``Synset`` is being compared to.
1066
+ :type ic: dict
1067
+ :param ic: an information content object (as returned by
1068
+ ``nltk.corpus.wordnet_ic.ic()``).
1069
+ :return: A float score denoting the similarity of the two ``Synset``
1070
+ objects, in the range 0 to 1.
1071
+ """
1072
+
1073
+ ic1, ic2, lcs_ic = _lcs_ic(self, other, ic)
1074
+ return (2.0 * lcs_ic) / (ic1 + ic2)
1075
+
1076
+ def _iter_hypernym_lists(self):
1077
+ """
1078
+ :return: An iterator over ``Synset`` objects that are either proper
1079
+ hypernyms or instance of hypernyms of the synset.
1080
+ """
1081
+ todo = [self]
1082
+ seen = set()
1083
+ while todo:
1084
+ for synset in todo:
1085
+ seen.add(synset)
1086
+ yield todo
1087
+ todo = [
1088
+ hypernym
1089
+ for synset in todo
1090
+ for hypernym in (synset.hypernyms() + synset.instance_hypernyms())
1091
+ if hypernym not in seen
1092
+ ]
1093
+
1094
+ def __repr__(self):
1095
+ return f"{type(self).__name__}('{self._name}')"
1096
+
1097
+ def _related(self, relation_symbol, sort=True):
1098
+ get_synset = self._wordnet_corpus_reader.synset_from_pos_and_offset
1099
+ if relation_symbol not in self._pointers:
1100
+ return []
1101
+ pointer_tuples = self._pointers[relation_symbol]
1102
+ r = [get_synset(pos, offset) for pos, offset in pointer_tuples]
1103
+ if sort:
1104
+ r.sort()
1105
+ return r
1106
+
1107
+
1108
+ ######################################################################
1109
+ # WordNet Corpus Reader
1110
+ ######################################################################
1111
+
1112
+
1113
+ class WordNetCorpusReader(CorpusReader):
1114
+ """
1115
+ A corpus reader used to access wordnet or its variants.
1116
+ """
1117
+
1118
+ _ENCODING = "utf8"
1119
+
1120
+ # { Part-of-speech constants
1121
+ ADJ, ADJ_SAT, ADV, NOUN, VERB = "a", "s", "r", "n", "v"
1122
+ # }
1123
+
1124
+ # { Filename constants
1125
+ _FILEMAP = {ADJ: "adj", ADV: "adv", NOUN: "noun", VERB: "verb"}
1126
+ # }
1127
+
1128
+ # { Part of speech constants
1129
+ _pos_numbers = {NOUN: 1, VERB: 2, ADJ: 3, ADV: 4, ADJ_SAT: 5}
1130
+ _pos_names = dict(tup[::-1] for tup in _pos_numbers.items())
1131
+ # }
1132
+
1133
+ #: A list of file identifiers for all the fileids used by this
1134
+ #: corpus reader.
1135
+ _FILES = (
1136
+ "cntlist.rev",
1137
+ "lexnames",
1138
+ "index.sense",
1139
+ "index.adj",
1140
+ "index.adv",
1141
+ "index.noun",
1142
+ "index.verb",
1143
+ "data.adj",
1144
+ "data.adv",
1145
+ "data.noun",
1146
+ "data.verb",
1147
+ "adj.exc",
1148
+ "adv.exc",
1149
+ "noun.exc",
1150
+ "verb.exc",
1151
+ )
1152
+
1153
+ def __init__(self, root, omw_reader):
1154
+ """
1155
+ Construct a new wordnet corpus reader, with the given root
1156
+ directory.
1157
+ """
1158
+
1159
+ super().__init__(root, self._FILES, encoding=self._ENCODING)
1160
+
1161
+ # A index that provides the file offset
1162
+ # Map from lemma -> pos -> synset_index -> offset
1163
+ self._lemma_pos_offset_map = defaultdict(dict)
1164
+
1165
+ # A cache so we don't have to reconstruct synsets
1166
+ # Map from pos -> offset -> synset
1167
+ self._synset_offset_cache = defaultdict(dict)
1168
+
1169
+ # A lookup for the maximum depth of each part of speech. Useful for
1170
+ # the lch similarity metric.
1171
+ self._max_depth = defaultdict(dict)
1172
+
1173
+ # Corpus reader containing omw data.
1174
+ self._omw_reader = omw_reader
1175
+
1176
+ # Corpus reader containing extended_omw data.
1177
+ self._exomw_reader = None
1178
+
1179
+ self.provenances = defaultdict(str)
1180
+ self.provenances["eng"] = ""
1181
+
1182
+ if self._omw_reader is None:
1183
+ warnings.warn(
1184
+ "The multilingual functions are not available with this Wordnet version"
1185
+ )
1186
+
1187
+ self.omw_langs = set()
1188
+
1189
+ # A cache to store the wordnet data of multiple languages
1190
+ self._lang_data = defaultdict(list)
1191
+
1192
+ self._data_file_map = {}
1193
+ self._exception_map = {}
1194
+ self._lexnames = []
1195
+ self._key_count_file = None
1196
+ self._key_synset_file = None
1197
+
1198
+ # Load the lexnames
1199
+ with self.open("lexnames") as fp:
1200
+ for i, line in enumerate(fp):
1201
+ index, lexname, _ = line.split()
1202
+ assert int(index) == i
1203
+ self._lexnames.append(lexname)
1204
+
1205
+ # Load the indices for lemmas and synset offsets
1206
+ self._load_lemma_pos_offset_map()
1207
+
1208
+ # load the exception file data into memory
1209
+ self._load_exception_map()
1210
+
1211
+ self.nomap = []
1212
+ self.splits = {}
1213
+
1214
+ # map from WordNet 3.0 for OMW data
1215
+ self.map30 = self.map_wn30()
1216
+
1217
+ # Language data attributes
1218
+ self.lg_attrs = ["lemma", "none", "def", "exe"]
1219
+
1220
+ def index_sense(self, version=None):
1221
+ """Read sense key to synset id mapping from index.sense file in corpus directory"""
1222
+ fn = "index.sense"
1223
+ if version:
1224
+ from nltk.corpus import CorpusReader, LazyCorpusLoader
1225
+
1226
+ ixreader = LazyCorpusLoader(version, CorpusReader, r".*/" + fn)
1227
+ else:
1228
+ ixreader = self
1229
+ with ixreader.open(fn) as fp:
1230
+ sensekey_map = {}
1231
+ for line in fp:
1232
+ fields = line.strip().split()
1233
+ sensekey = fields[0]
1234
+ pos = self._pos_names[int(sensekey.split("%")[1].split(":")[0])]
1235
+ sensekey_map[sensekey] = f"{fields[1]}-{pos}"
1236
+ return sensekey_map
1237
+
1238
+ def map_to_many(self):
1239
+ sensekey_map1 = self.index_sense("wordnet")
1240
+ sensekey_map2 = self.index_sense()
1241
+ synset_to_many = {}
1242
+ for synsetid in set(sensekey_map1.values()):
1243
+ synset_to_many[synsetid] = []
1244
+ for sensekey in set(sensekey_map1.keys()).intersection(
1245
+ set(sensekey_map2.keys())
1246
+ ):
1247
+ source = sensekey_map1[sensekey]
1248
+ target = sensekey_map2[sensekey]
1249
+ synset_to_many[source].append(target)
1250
+ return synset_to_many
1251
+
1252
+ def map_to_one(self):
1253
+ synset_to_many = self.map_to_many()
1254
+ synset_to_one = {}
1255
+ for source in synset_to_many:
1256
+ candidates_bag = synset_to_many[source]
1257
+ if candidates_bag:
1258
+ candidates_set = set(candidates_bag)
1259
+ if len(candidates_set) == 1:
1260
+ target = candidates_bag[0]
1261
+ else:
1262
+ counts = []
1263
+ for candidate in candidates_set:
1264
+ counts.append((candidates_bag.count(candidate), candidate))
1265
+ self.splits[source] = counts
1266
+ target = max(counts)[1]
1267
+ synset_to_one[source] = target
1268
+ if source[-1] == "s":
1269
+ # Add a mapping from "a" to target for applications like omw,
1270
+ # where only Lithuanian and Slovak use the "s" ss_type.
1271
+ synset_to_one[f"{source[:-1]}a"] = target
1272
+ else:
1273
+ self.nomap.append(source)
1274
+ return synset_to_one
1275
+
1276
+ def map_wn30(self):
1277
+ """Mapping from Wordnet 3.0 to currently loaded Wordnet version"""
1278
+ if self.get_version() == "3.0":
1279
+ return None
1280
+ else:
1281
+ return self.map_to_one()
1282
+
1283
+ # Open Multilingual WordNet functions, contributed by
1284
+ # Nasruddin A’aidil Shari, Sim Wei Ying Geraldine, and Soe Lynn
1285
+
1286
+ def of2ss(self, of):
1287
+ """take an id and return the synsets"""
1288
+ return self.synset_from_pos_and_offset(of[-1], int(of[:8]))
1289
+
1290
+ def ss2of(self, ss):
1291
+ """return the ID of the synset"""
1292
+ if ss:
1293
+ return f"{ss.offset():08d}-{ss.pos()}"
1294
+
1295
+ def _load_lang_data(self, lang):
1296
+ """load the wordnet data of the requested language from the file to
1297
+ the cache, _lang_data"""
1298
+
1299
+ if lang in self._lang_data:
1300
+ return
1301
+
1302
+ if self._omw_reader and not self.omw_langs:
1303
+ self.add_omw()
1304
+
1305
+ if lang not in self.langs():
1306
+ raise WordNetError("Language is not supported.")
1307
+
1308
+ if self._exomw_reader and lang not in self.omw_langs:
1309
+ reader = self._exomw_reader
1310
+ else:
1311
+ reader = self._omw_reader
1312
+
1313
+ prov = self.provenances[lang]
1314
+ if prov in ["cldr", "wikt"]:
1315
+ prov2 = prov
1316
+ else:
1317
+ prov2 = "data"
1318
+
1319
+ with reader.open(f"{prov}/wn-{prov2}-{lang.split('_')[0]}.tab") as fp:
1320
+ self.custom_lemmas(fp, lang)
1321
+ self.disable_custom_lemmas(lang)
1322
+
1323
+ def add_provs(self, reader):
1324
+ """Add languages from Multilingual Wordnet to the provenance dictionary"""
1325
+ fileids = reader.fileids()
1326
+ for fileid in fileids:
1327
+ prov, langfile = os.path.split(fileid)
1328
+ file_name, file_extension = os.path.splitext(langfile)
1329
+ if file_extension == ".tab":
1330
+ lang = file_name.split("-")[-1]
1331
+ if lang in self.provenances or prov in ["cldr", "wikt"]:
1332
+ # We already have another resource for this lang,
1333
+ # so we need to further specify the lang id:
1334
+ lang = f"{lang}_{prov}"
1335
+ self.provenances[lang] = prov
1336
+
1337
+ def add_omw(self):
1338
+ self.add_provs(self._omw_reader)
1339
+ self.omw_langs = set(self.provenances.keys())
1340
+
1341
+ def add_exomw(self):
1342
+ """
1343
+ Add languages from Extended OMW
1344
+
1345
+ >>> import nltk
1346
+ >>> from nltk.corpus import wordnet as wn
1347
+ >>> wn.add_exomw()
1348
+ >>> print(wn.synset('intrinsically.r.01').lemmas(lang="eng_wikt"))
1349
+ [Lemma('intrinsically.r.01.per_se'), Lemma('intrinsically.r.01.as_such')]
1350
+ """
1351
+ from nltk.corpus import extended_omw
1352
+
1353
+ self.add_omw()
1354
+ self._exomw_reader = extended_omw
1355
+ self.add_provs(self._exomw_reader)
1356
+
1357
+ def langs(self):
1358
+ """return a list of languages supported by Multilingual Wordnet"""
1359
+ return list(self.provenances.keys())
1360
+
1361
+ def _load_lemma_pos_offset_map(self):
1362
+ for suffix in self._FILEMAP.values():
1363
+
1364
+ # parse each line of the file (ignoring comment lines)
1365
+ with self.open("index.%s" % suffix) as fp:
1366
+ for i, line in enumerate(fp):
1367
+ if line.startswith(" "):
1368
+ continue
1369
+
1370
+ _iter = iter(line.split())
1371
+
1372
+ def _next_token():
1373
+ return next(_iter)
1374
+
1375
+ try:
1376
+
1377
+ # get the lemma and part-of-speech
1378
+ lemma = _next_token()
1379
+ pos = _next_token()
1380
+
1381
+ # get the number of synsets for this lemma
1382
+ n_synsets = int(_next_token())
1383
+ assert n_synsets > 0
1384
+
1385
+ # get and ignore the pointer symbols for all synsets of
1386
+ # this lemma
1387
+ n_pointers = int(_next_token())
1388
+ [_next_token() for _ in range(n_pointers)]
1389
+
1390
+ # same as number of synsets
1391
+ n_senses = int(_next_token())
1392
+ assert n_synsets == n_senses
1393
+
1394
+ # get and ignore number of senses ranked according to
1395
+ # frequency
1396
+ _next_token()
1397
+
1398
+ # get synset offsets
1399
+ synset_offsets = [int(_next_token()) for _ in range(n_synsets)]
1400
+
1401
+ # raise more informative error with file name and line number
1402
+ except (AssertionError, ValueError) as e:
1403
+ tup = ("index.%s" % suffix), (i + 1), e
1404
+ raise WordNetError("file %s, line %i: %s" % tup) from e
1405
+
1406
+ # map lemmas and parts of speech to synsets
1407
+ self._lemma_pos_offset_map[lemma][pos] = synset_offsets
1408
+ if pos == ADJ:
1409
+ self._lemma_pos_offset_map[lemma][ADJ_SAT] = synset_offsets
1410
+
1411
+ def _load_exception_map(self):
1412
+ # load the exception file data into memory
1413
+ for pos, suffix in self._FILEMAP.items():
1414
+ self._exception_map[pos] = {}
1415
+ with self.open("%s.exc" % suffix) as fp:
1416
+ for line in fp:
1417
+ terms = line.split()
1418
+ self._exception_map[pos][terms[0]] = terms[1:]
1419
+ self._exception_map[ADJ_SAT] = self._exception_map[ADJ]
1420
+
1421
+ def _compute_max_depth(self, pos, simulate_root):
1422
+ """
1423
+ Compute the max depth for the given part of speech. This is
1424
+ used by the lch similarity metric.
1425
+ """
1426
+ depth = 0
1427
+ for ii in self.all_synsets(pos):
1428
+ try:
1429
+ depth = max(depth, ii.max_depth())
1430
+ except RuntimeError:
1431
+ print(ii)
1432
+ if simulate_root:
1433
+ depth += 1
1434
+ self._max_depth[pos] = depth
1435
+
1436
+ def get_version(self):
1437
+ fh = self._data_file(ADJ)
1438
+ fh.seek(0)
1439
+ for line in fh:
1440
+ match = re.search(r"Word[nN]et (\d+|\d+\.\d+) Copyright", line)
1441
+ if match is not None:
1442
+ version = match.group(1)
1443
+ fh.seek(0)
1444
+ return version
1445
+
1446
+ #############################################################
1447
+ # Loading Lemmas
1448
+ #############################################################
1449
+
1450
+ def lemma(self, name, lang="eng"):
1451
+ """Return lemma object that matches the name"""
1452
+ # cannot simply split on first '.',
1453
+ # e.g.: '.45_caliber.a.01..45_caliber'
1454
+ separator = SENSENUM_RE.search(name).end()
1455
+
1456
+ synset_name, lemma_name = name[: separator - 1], name[separator:]
1457
+
1458
+ synset = self.synset(synset_name)
1459
+ for lemma in synset.lemmas(lang):
1460
+ if lemma._name == lemma_name:
1461
+ return lemma
1462
+ raise WordNetError(f"No lemma {lemma_name!r} in {synset_name!r}")
1463
+
1464
+ def lemma_from_key(self, key):
1465
+ # Keys are case sensitive and always lower-case
1466
+ key = key.lower()
1467
+
1468
+ lemma_name, lex_sense = key.split("%")
1469
+ pos_number, lexname_index, lex_id, _, _ = lex_sense.split(":")
1470
+ pos = self._pos_names[int(pos_number)]
1471
+
1472
+ # open the key -> synset file if necessary
1473
+ if self._key_synset_file is None:
1474
+ self._key_synset_file = self.open("index.sense")
1475
+
1476
+ # Find the synset for the lemma.
1477
+ synset_line = _binary_search_file(self._key_synset_file, key)
1478
+ if not synset_line:
1479
+ raise WordNetError("No synset found for key %r" % key)
1480
+ offset = int(synset_line.split()[1])
1481
+ synset = self.synset_from_pos_and_offset(pos, offset)
1482
+ # return the corresponding lemma
1483
+ for lemma in synset._lemmas:
1484
+ if lemma._key == key:
1485
+ return lemma
1486
+ raise WordNetError("No lemma found for for key %r" % key)
1487
+
1488
+ #############################################################
1489
+ # Loading Synsets
1490
+ #############################################################
1491
+ def synset(self, name):
1492
+ # split name into lemma, part of speech and synset number
1493
+ lemma, pos, synset_index_str = name.lower().rsplit(".", 2)
1494
+ synset_index = int(synset_index_str) - 1
1495
+
1496
+ # get the offset for this synset
1497
+ try:
1498
+ offset = self._lemma_pos_offset_map[lemma][pos][synset_index]
1499
+ except KeyError as e:
1500
+ raise WordNetError(f"No lemma {lemma!r} with part of speech {pos!r}") from e
1501
+ except IndexError as e:
1502
+ n_senses = len(self._lemma_pos_offset_map[lemma][pos])
1503
+ raise WordNetError(
1504
+ f"Lemma {lemma!r} with part of speech {pos!r} only "
1505
+ f"has {n_senses} {'sense' if n_senses == 1 else 'senses'}"
1506
+ ) from e
1507
+
1508
+ # load synset information from the appropriate file
1509
+ synset = self.synset_from_pos_and_offset(pos, offset)
1510
+
1511
+ # some basic sanity checks on loaded attributes
1512
+ if pos == "s" and synset._pos == "a":
1513
+ message = (
1514
+ "Adjective satellite requested but only plain "
1515
+ "adjective found for lemma %r"
1516
+ )
1517
+ raise WordNetError(message % lemma)
1518
+ assert synset._pos == pos or (pos == "a" and synset._pos == "s")
1519
+
1520
+ # Return the synset object.
1521
+ return synset
1522
+
1523
+ def _data_file(self, pos):
1524
+ """
1525
+ Return an open file pointer for the data file for the given
1526
+ part of speech.
1527
+ """
1528
+ if pos == ADJ_SAT:
1529
+ pos = ADJ
1530
+ if self._data_file_map.get(pos) is None:
1531
+ fileid = "data.%s" % self._FILEMAP[pos]
1532
+ self._data_file_map[pos] = self.open(fileid)
1533
+ return self._data_file_map[pos]
1534
+
1535
+ def synset_from_pos_and_offset(self, pos, offset):
1536
+ """
1537
+ - pos: The synset's part of speech, matching one of the module level
1538
+ attributes ADJ, ADJ_SAT, ADV, NOUN or VERB ('a', 's', 'r', 'n', or 'v').
1539
+ - offset: The byte offset of this synset in the WordNet dict file
1540
+ for this pos.
1541
+
1542
+ >>> from nltk.corpus import wordnet as wn
1543
+ >>> print(wn.synset_from_pos_and_offset('n', 1740))
1544
+ Synset('entity.n.01')
1545
+ """
1546
+ # Check to see if the synset is in the cache
1547
+ if offset in self._synset_offset_cache[pos]:
1548
+ return self._synset_offset_cache[pos][offset]
1549
+
1550
+ data_file = self._data_file(pos)
1551
+ data_file.seek(offset)
1552
+ data_file_line = data_file.readline()
1553
+ # If valid, the offset equals the 8-digit 0-padded integer found at the start of the line:
1554
+ line_offset = data_file_line[:8]
1555
+ if (
1556
+ line_offset.isalnum()
1557
+ and line_offset == f"{'0'*(8-len(str(offset)))}{str(offset)}"
1558
+ ):
1559
+ synset = self._synset_from_pos_and_line(pos, data_file_line)
1560
+ assert synset._offset == offset
1561
+ self._synset_offset_cache[pos][offset] = synset
1562
+ else:
1563
+ synset = None
1564
+ warnings.warn(f"No WordNet synset found for pos={pos} at offset={offset}.")
1565
+ data_file.seek(0)
1566
+ return synset
1567
+
1568
+ @deprecated("Use public method synset_from_pos_and_offset() instead")
1569
+ def _synset_from_pos_and_offset(self, *args, **kwargs):
1570
+ """
1571
+ Hack to help people like the readers of
1572
+ https://stackoverflow.com/a/27145655/1709587
1573
+ who were using this function before it was officially a public method
1574
+ """
1575
+ return self.synset_from_pos_and_offset(*args, **kwargs)
1576
+
1577
+ def _synset_from_pos_and_line(self, pos, data_file_line):
1578
+ # Construct a new (empty) synset.
1579
+ synset = Synset(self)
1580
+
1581
+ # parse the entry for this synset
1582
+ try:
1583
+
1584
+ # parse out the definitions and examples from the gloss
1585
+ columns_str, gloss = data_file_line.strip().split("|")
1586
+ definition = re.sub(r"[\"].*?[\"]", "", gloss).strip()
1587
+ examples = re.findall(r'"([^"]*)"', gloss)
1588
+ for example in examples:
1589
+ synset._examples.append(example)
1590
+
1591
+ synset._definition = definition.strip("; ")
1592
+
1593
+ # split the other info into fields
1594
+ _iter = iter(columns_str.split())
1595
+
1596
+ def _next_token():
1597
+ return next(_iter)
1598
+
1599
+ # get the offset
1600
+ synset._offset = int(_next_token())
1601
+
1602
+ # determine the lexicographer file name
1603
+ lexname_index = int(_next_token())
1604
+ synset._lexname = self._lexnames[lexname_index]
1605
+
1606
+ # get the part of speech
1607
+ synset._pos = _next_token()
1608
+
1609
+ # create Lemma objects for each lemma
1610
+ n_lemmas = int(_next_token(), 16)
1611
+ for _ in range(n_lemmas):
1612
+ # get the lemma name
1613
+ lemma_name = _next_token()
1614
+ # get the lex_id (used for sense_keys)
1615
+ lex_id = int(_next_token(), 16)
1616
+ # If the lemma has a syntactic marker, extract it.
1617
+ m = re.match(r"(.*?)(\(.*\))?$", lemma_name)
1618
+ lemma_name, syn_mark = m.groups()
1619
+ # create the lemma object
1620
+ lemma = Lemma(self, synset, lemma_name, lexname_index, lex_id, syn_mark)
1621
+ synset._lemmas.append(lemma)
1622
+ synset._lemma_names.append(lemma._name)
1623
+
1624
+ # collect the pointer tuples
1625
+ n_pointers = int(_next_token())
1626
+ for _ in range(n_pointers):
1627
+ symbol = _next_token()
1628
+ offset = int(_next_token())
1629
+ pos = _next_token()
1630
+ lemma_ids_str = _next_token()
1631
+ if lemma_ids_str == "0000":
1632
+ synset._pointers[symbol].add((pos, offset))
1633
+ else:
1634
+ source_index = int(lemma_ids_str[:2], 16) - 1
1635
+ target_index = int(lemma_ids_str[2:], 16) - 1
1636
+ source_lemma_name = synset._lemmas[source_index]._name
1637
+ lemma_pointers = synset._lemma_pointers
1638
+ tups = lemma_pointers[source_lemma_name, symbol]
1639
+ tups.append((pos, offset, target_index))
1640
+
1641
+ # read the verb frames
1642
+ try:
1643
+ frame_count = int(_next_token())
1644
+ except StopIteration:
1645
+ pass
1646
+ else:
1647
+ for _ in range(frame_count):
1648
+ # read the plus sign
1649
+ plus = _next_token()
1650
+ assert plus == "+"
1651
+ # read the frame and lemma number
1652
+ frame_number = int(_next_token())
1653
+ frame_string_fmt = VERB_FRAME_STRINGS[frame_number]
1654
+ lemma_number = int(_next_token(), 16)
1655
+ # lemma number of 00 means all words in the synset
1656
+ if lemma_number == 0:
1657
+ synset._frame_ids.append(frame_number)
1658
+ for lemma in synset._lemmas:
1659
+ lemma._frame_ids.append(frame_number)
1660
+ lemma._frame_strings.append(frame_string_fmt % lemma._name)
1661
+ # only a specific word in the synset
1662
+ else:
1663
+ lemma = synset._lemmas[lemma_number - 1]
1664
+ lemma._frame_ids.append(frame_number)
1665
+ lemma._frame_strings.append(frame_string_fmt % lemma._name)
1666
+
1667
+ # raise a more informative error with line text
1668
+ except ValueError as e:
1669
+ raise WordNetError(f"line {data_file_line!r}: {e}") from e
1670
+
1671
+ # set sense keys for Lemma objects - note that this has to be
1672
+ # done afterwards so that the relations are available
1673
+ for lemma in synset._lemmas:
1674
+ if synset._pos == ADJ_SAT:
1675
+ head_lemma = synset.similar_tos()[0]._lemmas[0]
1676
+ head_name = head_lemma._name
1677
+ head_id = "%02d" % head_lemma._lex_id
1678
+ else:
1679
+ head_name = head_id = ""
1680
+ tup = (
1681
+ lemma._name,
1682
+ WordNetCorpusReader._pos_numbers[synset._pos],
1683
+ lemma._lexname_index,
1684
+ lemma._lex_id,
1685
+ head_name,
1686
+ head_id,
1687
+ )
1688
+ lemma._key = ("%s%%%d:%02d:%02d:%s:%s" % tup).lower()
1689
+
1690
+ # the canonical name is based on the first lemma
1691
+ lemma_name = synset._lemmas[0]._name.lower()
1692
+ offsets = self._lemma_pos_offset_map[lemma_name][synset._pos]
1693
+ sense_index = offsets.index(synset._offset)
1694
+ tup = lemma_name, synset._pos, sense_index + 1
1695
+ synset._name = "%s.%s.%02i" % tup
1696
+
1697
+ return synset
1698
+
1699
+ def synset_from_sense_key(self, sense_key):
1700
+ """
1701
+ Retrieves synset based on a given sense_key. Sense keys can be
1702
+ obtained from lemma.key()
1703
+
1704
+ From https://wordnet.princeton.edu/documentation/senseidx5wn:
1705
+ A sense_key is represented as::
1706
+
1707
+ lemma % lex_sense (e.g. 'dog%1:18:01::')
1708
+
1709
+ where lex_sense is encoded as::
1710
+
1711
+ ss_type:lex_filenum:lex_id:head_word:head_id
1712
+
1713
+ :lemma: ASCII text of word/collocation, in lower case
1714
+ :ss_type: synset type for the sense (1 digit int)
1715
+ The synset type is encoded as follows::
1716
+
1717
+ 1 NOUN
1718
+ 2 VERB
1719
+ 3 ADJECTIVE
1720
+ 4 ADVERB
1721
+ 5 ADJECTIVE SATELLITE
1722
+ :lex_filenum: name of lexicographer file containing the synset for the sense (2 digit int)
1723
+ :lex_id: when paired with lemma, uniquely identifies a sense in the lexicographer file (2 digit int)
1724
+ :head_word: lemma of the first word in satellite's head synset
1725
+ Only used if sense is in an adjective satellite synset
1726
+ :head_id: uniquely identifies sense in a lexicographer file when paired with head_word
1727
+ Only used if head_word is present (2 digit int)
1728
+
1729
+ >>> import nltk
1730
+ >>> from nltk.corpus import wordnet as wn
1731
+ >>> print(wn.synset_from_sense_key("drive%1:04:03::"))
1732
+ Synset('drive.n.06')
1733
+
1734
+ >>> print(wn.synset_from_sense_key("driving%1:04:03::"))
1735
+ Synset('drive.n.06')
1736
+ """
1737
+ return self.lemma_from_key(sense_key).synset()
1738
+
1739
+ #############################################################
1740
+ # Retrieve synsets and lemmas.
1741
+ #############################################################
1742
+
1743
+ def synsets(self, lemma, pos=None, lang="eng", check_exceptions=True):
1744
+ """Load all synsets with a given lemma and part of speech tag.
1745
+ If no pos is specified, all synsets for all parts of speech
1746
+ will be loaded.
1747
+ If lang is specified, all the synsets associated with the lemma name
1748
+ of that language will be returned.
1749
+ """
1750
+ lemma = lemma.lower()
1751
+
1752
+ if lang == "eng":
1753
+ get_synset = self.synset_from_pos_and_offset
1754
+ index = self._lemma_pos_offset_map
1755
+ if pos is None:
1756
+ pos = POS_LIST
1757
+ return [
1758
+ get_synset(p, offset)
1759
+ for p in pos
1760
+ for form in self._morphy(lemma, p, check_exceptions)
1761
+ for offset in index[form].get(p, [])
1762
+ ]
1763
+
1764
+ else:
1765
+ self._load_lang_data(lang)
1766
+ synset_list = []
1767
+ if lemma in self._lang_data[lang][1]:
1768
+ for l in self._lang_data[lang][1][lemma]:
1769
+ if pos is not None and l[-1] != pos:
1770
+ continue
1771
+ synset_list.append(self.of2ss(l))
1772
+ return synset_list
1773
+
1774
+ def lemmas(self, lemma, pos=None, lang="eng"):
1775
+ """Return all Lemma objects with a name matching the specified lemma
1776
+ name and part of speech tag. Matches any part of speech tag if none is
1777
+ specified."""
1778
+
1779
+ lemma = lemma.lower()
1780
+ if lang == "eng":
1781
+ return [
1782
+ lemma_obj
1783
+ for synset in self.synsets(lemma, pos)
1784
+ for lemma_obj in synset.lemmas()
1785
+ if lemma_obj.name().lower() == lemma
1786
+ ]
1787
+
1788
+ else:
1789
+ self._load_lang_data(lang)
1790
+ lemmas = []
1791
+ syn = self.synsets(lemma, lang=lang)
1792
+ for s in syn:
1793
+ if pos is not None and s.pos() != pos:
1794
+ continue
1795
+ for lemma_obj in s.lemmas(lang=lang):
1796
+ if lemma_obj.name().lower() == lemma:
1797
+ lemmas.append(lemma_obj)
1798
+ return lemmas
1799
+
1800
+ def all_lemma_names(self, pos=None, lang="eng"):
1801
+ """Return all lemma names for all synsets for the given
1802
+ part of speech tag and language or languages. If pos is
1803
+ not specified, all synsets for all parts of speech will
1804
+ be used."""
1805
+
1806
+ if lang == "eng":
1807
+ if pos is None:
1808
+ return iter(self._lemma_pos_offset_map)
1809
+ else:
1810
+ return (
1811
+ lemma
1812
+ for lemma in self._lemma_pos_offset_map
1813
+ if pos in self._lemma_pos_offset_map[lemma]
1814
+ )
1815
+ else:
1816
+ self._load_lang_data(lang)
1817
+ lemma = []
1818
+ for i in self._lang_data[lang][0]:
1819
+ if pos is not None and i[-1] != pos:
1820
+ continue
1821
+ lemma.extend(self._lang_data[lang][0][i])
1822
+
1823
+ lemma = iter(set(lemma))
1824
+ return lemma
1825
+
1826
+ def all_omw_synsets(self, pos=None, lang=None):
1827
+ if lang not in self.langs():
1828
+ return None
1829
+ self._load_lang_data(lang)
1830
+ for of in self._lang_data[lang][0]:
1831
+ if not pos or of[-1] == pos:
1832
+ ss = self.of2ss(of)
1833
+ if ss:
1834
+ yield ss
1835
+
1836
+ # else:
1837
+ # A few OMW offsets don't exist in Wordnet 3.0.
1838
+ # warnings.warn(f"Language {lang}: no synset found for {of}")
1839
+
1840
+ def all_synsets(self, pos=None, lang="eng"):
1841
+ """Iterate over all synsets with a given part of speech tag.
1842
+ If no pos is specified, all synsets for all parts of speech
1843
+ will be loaded.
1844
+ """
1845
+ if lang == "eng":
1846
+ return self.all_eng_synsets(pos=pos)
1847
+ else:
1848
+ return self.all_omw_synsets(pos=pos, lang=lang)
1849
+
1850
+ def all_eng_synsets(self, pos=None):
1851
+ if pos is None:
1852
+ pos_tags = self._FILEMAP.keys()
1853
+ else:
1854
+ pos_tags = [pos]
1855
+
1856
+ cache = self._synset_offset_cache
1857
+ from_pos_and_line = self._synset_from_pos_and_line
1858
+
1859
+ # generate all synsets for each part of speech
1860
+ for pos_tag in pos_tags:
1861
+ # Open the file for reading. Note that we can not re-use
1862
+ # the file pointers from self._data_file_map here, because
1863
+ # we're defining an iterator, and those file pointers might
1864
+ # be moved while we're not looking.
1865
+ if pos_tag == ADJ_SAT:
1866
+ pos_file = ADJ
1867
+ else:
1868
+ pos_file = pos_tag
1869
+ fileid = "data.%s" % self._FILEMAP[pos_file]
1870
+ data_file = self.open(fileid)
1871
+
1872
+ try:
1873
+ # generate synsets for each line in the POS file
1874
+ offset = data_file.tell()
1875
+ line = data_file.readline()
1876
+ while line:
1877
+ if not line[0].isspace():
1878
+ if offset in cache[pos_tag]:
1879
+ # See if the synset is cached
1880
+ synset = cache[pos_tag][offset]
1881
+ else:
1882
+ # Otherwise, parse the line
1883
+ synset = from_pos_and_line(pos_tag, line)
1884
+ cache[pos_tag][offset] = synset
1885
+
1886
+ # adjective satellites are in the same file as
1887
+ # adjectives so only yield the synset if it's actually
1888
+ # a satellite
1889
+ if pos_tag == ADJ_SAT and synset._pos == ADJ_SAT:
1890
+ yield synset
1891
+ # for all other POS tags, yield all synsets (this means
1892
+ # that adjectives also include adjective satellites)
1893
+ elif pos_tag != ADJ_SAT:
1894
+ yield synset
1895
+ offset = data_file.tell()
1896
+ line = data_file.readline()
1897
+
1898
+ # close the extra file handle we opened
1899
+ except:
1900
+ data_file.close()
1901
+ raise
1902
+ else:
1903
+ data_file.close()
1904
+
1905
+ def words(self, lang="eng"):
1906
+ """return lemmas of the given language as list of words"""
1907
+ return self.all_lemma_names(lang=lang)
1908
+
1909
+ def synonyms(self, word, lang="eng"):
1910
+ """return nested list with the synonyms of the different senses of word in the given language"""
1911
+ return [
1912
+ sorted(list(set(ss.lemma_names(lang=lang)) - {word}))
1913
+ for ss in self.synsets(word, lang=lang)
1914
+ ]
1915
+
1916
+ def doc(self, file="README", lang="eng"):
1917
+ """Return the contents of readme, license or citation file
1918
+ use lang=lang to get the file for an individual language"""
1919
+ if lang == "eng":
1920
+ reader = self
1921
+ else:
1922
+ reader = self._omw_reader
1923
+ if lang in self.langs():
1924
+ file = f"{os.path.join(self.provenances[lang],file)}"
1925
+ try:
1926
+ with reader.open(file) as fp:
1927
+ return fp.read()
1928
+ except:
1929
+ if lang in self._lang_data:
1930
+ return f"Cannot determine {file} for {lang}"
1931
+ else:
1932
+ return f"Language {lang} is not supported."
1933
+
1934
+ def license(self, lang="eng"):
1935
+ """Return the contents of LICENSE (for omw)
1936
+ use lang=lang to get the license for an individual language"""
1937
+ return self.doc(file="LICENSE", lang=lang)
1938
+
1939
+ def readme(self, lang="eng"):
1940
+ """Return the contents of README (for omw)
1941
+ use lang=lang to get the readme for an individual language"""
1942
+ return self.doc(file="README", lang=lang)
1943
+
1944
+ def citation(self, lang="eng"):
1945
+ """Return the contents of citation.bib file (for omw)
1946
+ use lang=lang to get the citation for an individual language"""
1947
+ return self.doc(file="citation.bib", lang=lang)
1948
+
1949
+ #############################################################
1950
+ # Misc
1951
+ #############################################################
1952
+ def lemma_count(self, lemma):
1953
+ """Return the frequency count for this Lemma"""
1954
+ # Currently, count is only work for English
1955
+ if lemma._lang != "eng":
1956
+ return 0
1957
+ # open the count file if we haven't already
1958
+ if self._key_count_file is None:
1959
+ self._key_count_file = self.open("cntlist.rev")
1960
+ # find the key in the counts file and return the count
1961
+ line = _binary_search_file(self._key_count_file, lemma._key)
1962
+ if line:
1963
+ return int(line.rsplit(" ", 1)[-1])
1964
+ else:
1965
+ return 0
1966
+
1967
+ def path_similarity(self, synset1, synset2, verbose=False, simulate_root=True):
1968
+ return synset1.path_similarity(synset2, verbose, simulate_root)
1969
+
1970
+ path_similarity.__doc__ = Synset.path_similarity.__doc__
1971
+
1972
+ def lch_similarity(self, synset1, synset2, verbose=False, simulate_root=True):
1973
+ return synset1.lch_similarity(synset2, verbose, simulate_root)
1974
+
1975
+ lch_similarity.__doc__ = Synset.lch_similarity.__doc__
1976
+
1977
+ def wup_similarity(self, synset1, synset2, verbose=False, simulate_root=True):
1978
+ return synset1.wup_similarity(synset2, verbose, simulate_root)
1979
+
1980
+ wup_similarity.__doc__ = Synset.wup_similarity.__doc__
1981
+
1982
+ def res_similarity(self, synset1, synset2, ic, verbose=False):
1983
+ return synset1.res_similarity(synset2, ic, verbose)
1984
+
1985
+ res_similarity.__doc__ = Synset.res_similarity.__doc__
1986
+
1987
+ def jcn_similarity(self, synset1, synset2, ic, verbose=False):
1988
+ return synset1.jcn_similarity(synset2, ic, verbose)
1989
+
1990
+ jcn_similarity.__doc__ = Synset.jcn_similarity.__doc__
1991
+
1992
+ def lin_similarity(self, synset1, synset2, ic, verbose=False):
1993
+ return synset1.lin_similarity(synset2, ic, verbose)
1994
+
1995
+ lin_similarity.__doc__ = Synset.lin_similarity.__doc__
1996
+
1997
+ #############################################################
1998
+ # Morphy
1999
+ #############################################################
2000
+ # Morphy, adapted from Oliver Steele's pywordnet
2001
+ def morphy(self, form, pos=None, check_exceptions=True):
2002
+ """
2003
+ Find a possible base form for the given form, with the given
2004
+ part of speech, by checking WordNet's list of exceptional
2005
+ forms, and by recursively stripping affixes for this part of
2006
+ speech until a form in WordNet is found.
2007
+
2008
+ >>> from nltk.corpus import wordnet as wn
2009
+ >>> print(wn.morphy('dogs'))
2010
+ dog
2011
+ >>> print(wn.morphy('churches'))
2012
+ church
2013
+ >>> print(wn.morphy('aardwolves'))
2014
+ aardwolf
2015
+ >>> print(wn.morphy('abaci'))
2016
+ abacus
2017
+ >>> wn.morphy('hardrock', wn.ADV)
2018
+ >>> print(wn.morphy('book', wn.NOUN))
2019
+ book
2020
+ >>> wn.morphy('book', wn.ADJ)
2021
+ """
2022
+
2023
+ if pos is None:
2024
+ morphy = self._morphy
2025
+ analyses = chain(a for p in POS_LIST for a in morphy(form, p))
2026
+ else:
2027
+ analyses = self._morphy(form, pos, check_exceptions)
2028
+
2029
+ # get the first one we find
2030
+ first = list(islice(analyses, 1))
2031
+ if len(first) == 1:
2032
+ return first[0]
2033
+ else:
2034
+ return None
2035
+
2036
+ MORPHOLOGICAL_SUBSTITUTIONS = {
2037
+ NOUN: [
2038
+ ("s", ""),
2039
+ ("ses", "s"),
2040
+ ("ves", "f"),
2041
+ ("xes", "x"),
2042
+ ("zes", "z"),
2043
+ ("ches", "ch"),
2044
+ ("shes", "sh"),
2045
+ ("men", "man"),
2046
+ ("ies", "y"),
2047
+ ],
2048
+ VERB: [
2049
+ ("s", ""),
2050
+ ("ies", "y"),
2051
+ ("es", "e"),
2052
+ ("es", ""),
2053
+ ("ed", "e"),
2054
+ ("ed", ""),
2055
+ ("ing", "e"),
2056
+ ("ing", ""),
2057
+ ],
2058
+ ADJ: [("er", ""), ("est", ""), ("er", "e"), ("est", "e")],
2059
+ ADV: [],
2060
+ }
2061
+
2062
+ MORPHOLOGICAL_SUBSTITUTIONS[ADJ_SAT] = MORPHOLOGICAL_SUBSTITUTIONS[ADJ]
2063
+
2064
+ def _morphy(self, form, pos, check_exceptions=True):
2065
+ # from jordanbg:
2066
+ # Given an original string x
2067
+ # 1. Apply rules once to the input to get y1, y2, y3, etc.
2068
+ # 2. Return all that are in the database
2069
+ # 3. If there are no matches, keep applying rules until you either
2070
+ # find a match or you can't go any further
2071
+
2072
+ exceptions = self._exception_map[pos]
2073
+ substitutions = self.MORPHOLOGICAL_SUBSTITUTIONS[pos]
2074
+
2075
+ def apply_rules(forms):
2076
+ return [
2077
+ form[: -len(old)] + new
2078
+ for form in forms
2079
+ for old, new in substitutions
2080
+ if form.endswith(old)
2081
+ ]
2082
+
2083
+ def filter_forms(forms):
2084
+ result = []
2085
+ seen = set()
2086
+ for form in forms:
2087
+ if form in self._lemma_pos_offset_map:
2088
+ if pos in self._lemma_pos_offset_map[form]:
2089
+ if form not in seen:
2090
+ result.append(form)
2091
+ seen.add(form)
2092
+ return result
2093
+
2094
+ # 0. Check the exception lists
2095
+ if check_exceptions:
2096
+ if form in exceptions:
2097
+ return filter_forms([form] + exceptions[form])
2098
+
2099
+ # 1. Apply rules once to the input to get y1, y2, y3, etc.
2100
+ forms = apply_rules([form])
2101
+
2102
+ # 2. Return all that are in the database (and check the original too)
2103
+ results = filter_forms([form] + forms)
2104
+ if results:
2105
+ return results
2106
+
2107
+ # 3. If there are no matches, keep applying rules until we find a match
2108
+ while forms:
2109
+ forms = apply_rules(forms)
2110
+ results = filter_forms(forms)
2111
+ if results:
2112
+ return results
2113
+
2114
+ # Return an empty list if we can't find anything
2115
+ return []
2116
+
2117
+ #############################################################
2118
+ # Create information content from corpus
2119
+ #############################################################
2120
+ def ic(self, corpus, weight_senses_equally=False, smoothing=1.0):
2121
+ """
2122
+ Creates an information content lookup dictionary from a corpus.
2123
+
2124
+ :type corpus: CorpusReader
2125
+ :param corpus: The corpus from which we create an information
2126
+ content dictionary.
2127
+ :type weight_senses_equally: bool
2128
+ :param weight_senses_equally: If this is True, gives all
2129
+ possible senses equal weight rather than dividing by the
2130
+ number of possible senses. (If a word has 3 synses, each
2131
+ sense gets 0.3333 per appearance when this is False, 1.0 when
2132
+ it is true.)
2133
+ :param smoothing: How much do we smooth synset counts (default is 1.0)
2134
+ :type smoothing: float
2135
+ :return: An information content dictionary
2136
+ """
2137
+ counts = FreqDist()
2138
+ for ww in corpus.words():
2139
+ counts[ww] += 1
2140
+
2141
+ ic = {}
2142
+ for pp in POS_LIST:
2143
+ ic[pp] = defaultdict(float)
2144
+
2145
+ # Initialize the counts with the smoothing value
2146
+ if smoothing > 0.0:
2147
+ for pp in POS_LIST:
2148
+ ic[pp][0] = smoothing
2149
+ for ss in self.all_synsets():
2150
+ pos = ss._pos
2151
+ if pos == ADJ_SAT:
2152
+ pos = ADJ
2153
+ ic[pos][ss._offset] = smoothing
2154
+
2155
+ for ww in counts:
2156
+ possible_synsets = self.synsets(ww)
2157
+ if len(possible_synsets) == 0:
2158
+ continue
2159
+
2160
+ # Distribute weight among possible synsets
2161
+ weight = float(counts[ww])
2162
+ if not weight_senses_equally:
2163
+ weight /= float(len(possible_synsets))
2164
+
2165
+ for ss in possible_synsets:
2166
+ pos = ss._pos
2167
+ if pos == ADJ_SAT:
2168
+ pos = ADJ
2169
+ for level in ss._iter_hypernym_lists():
2170
+ for hh in level:
2171
+ ic[pos][hh._offset] += weight
2172
+ # Add the weight to the root
2173
+ ic[pos][0] += weight
2174
+ return ic
2175
+
2176
+ def custom_lemmas(self, tab_file, lang):
2177
+ """
2178
+ Reads a custom tab file containing mappings of lemmas in the given
2179
+ language to Princeton WordNet 3.0 synset offsets, allowing NLTK's
2180
+ WordNet functions to then be used with that language.
2181
+
2182
+ See the "Tab files" section at https://omwn.org/omw1.html for
2183
+ documentation on the Multilingual WordNet tab file format.
2184
+
2185
+ :param tab_file: Tab file as a file or file-like object
2186
+ :type: lang str
2187
+ :param: lang ISO 639-3 code of the language of the tab file
2188
+ """
2189
+ lg = lang.split("_")[0]
2190
+ if len(lg) != 3:
2191
+ raise ValueError("lang should be a (3 character) ISO 639-3 code")
2192
+ self._lang_data[lang] = [
2193
+ defaultdict(list),
2194
+ defaultdict(list),
2195
+ defaultdict(list),
2196
+ defaultdict(list),
2197
+ ]
2198
+ for line in tab_file.readlines():
2199
+ if isinstance(line, bytes):
2200
+ # Support byte-stream files (e.g. as returned by Python 2's
2201
+ # open() function) as well as text-stream ones
2202
+ line = line.decode("utf-8")
2203
+ if not line.startswith("#"):
2204
+ triple = line.strip().split("\t")
2205
+ if len(triple) < 3:
2206
+ continue
2207
+ offset_pos, label = triple[:2]
2208
+ val = triple[-1]
2209
+ if self.map30:
2210
+ if offset_pos in self.map30:
2211
+ # Map offset_pos to current Wordnet version:
2212
+ offset_pos = self.map30[offset_pos]
2213
+ else:
2214
+ # Some OMW offsets were never in Wordnet:
2215
+ if (
2216
+ offset_pos not in self.nomap
2217
+ and offset_pos.replace("a", "s") not in self.nomap
2218
+ ):
2219
+ warnings.warn(
2220
+ f"{lang}: invalid offset {offset_pos} in '{line}'"
2221
+ )
2222
+ continue
2223
+ elif offset_pos[-1] == "a":
2224
+ wnss = self.of2ss(offset_pos)
2225
+ if wnss and wnss.pos() == "s": # Wordnet pos is "s"
2226
+ # Label OMW adjective satellites back to their Wordnet pos ("s")
2227
+ offset_pos = self.ss2of(wnss)
2228
+ pair = label.split(":")
2229
+ attr = pair[-1]
2230
+ if len(pair) == 1 or pair[0] == lg:
2231
+ if attr == "lemma":
2232
+ val = val.strip().replace(" ", "_")
2233
+ self._lang_data[lang][1][val.lower()].append(offset_pos)
2234
+ if attr in self.lg_attrs:
2235
+ self._lang_data[lang][self.lg_attrs.index(attr)][
2236
+ offset_pos
2237
+ ].append(val)
2238
+
2239
+ def disable_custom_lemmas(self, lang):
2240
+ """prevent synsets from being mistakenly added"""
2241
+ for n in range(len(self.lg_attrs)):
2242
+ self._lang_data[lang][n].default_factory = None
2243
+
2244
+ ######################################################################
2245
+ # Visualize WordNet relation graphs using Graphviz
2246
+ ######################################################################
2247
+
2248
+ def digraph(
2249
+ self,
2250
+ inputs,
2251
+ rel=lambda s: s.hypernyms(),
2252
+ pos=None,
2253
+ maxdepth=-1,
2254
+ shapes=None,
2255
+ attr=None,
2256
+ verbose=False,
2257
+ ):
2258
+ """
2259
+ Produce a graphical representation from 'inputs' (a list of
2260
+ start nodes, which can be a mix of Synsets, Lemmas and/or words),
2261
+ and a synset relation, for drawing with the 'dot' graph visualisation
2262
+ program from the Graphviz package.
2263
+
2264
+ Return a string in the DOT graph file language, which can then be
2265
+ converted to an image by nltk.parse.dependencygraph.dot2img(dot_string).
2266
+
2267
+ Optional Parameters:
2268
+ :rel: Wordnet synset relation
2269
+ :pos: for words, restricts Part of Speech to 'n', 'v', 'a' or 'r'
2270
+ :maxdepth: limit the longest path
2271
+ :shapes: dictionary of strings that trigger a specified shape
2272
+ :attr: dictionary with global graph attributes
2273
+ :verbose: warn about cycles
2274
+
2275
+ >>> from nltk.corpus import wordnet as wn
2276
+ >>> print(wn.digraph([wn.synset('dog.n.01')]))
2277
+ digraph G {
2278
+ "Synset('animal.n.01')" -> "Synset('organism.n.01')";
2279
+ "Synset('canine.n.02')" -> "Synset('carnivore.n.01')";
2280
+ "Synset('carnivore.n.01')" -> "Synset('placental.n.01')";
2281
+ "Synset('chordate.n.01')" -> "Synset('animal.n.01')";
2282
+ "Synset('dog.n.01')" -> "Synset('canine.n.02')";
2283
+ "Synset('dog.n.01')" -> "Synset('domestic_animal.n.01')";
2284
+ "Synset('domestic_animal.n.01')" -> "Synset('animal.n.01')";
2285
+ "Synset('living_thing.n.01')" -> "Synset('whole.n.02')";
2286
+ "Synset('mammal.n.01')" -> "Synset('vertebrate.n.01')";
2287
+ "Synset('object.n.01')" -> "Synset('physical_entity.n.01')";
2288
+ "Synset('organism.n.01')" -> "Synset('living_thing.n.01')";
2289
+ "Synset('physical_entity.n.01')" -> "Synset('entity.n.01')";
2290
+ "Synset('placental.n.01')" -> "Synset('mammal.n.01')";
2291
+ "Synset('vertebrate.n.01')" -> "Synset('chordate.n.01')";
2292
+ "Synset('whole.n.02')" -> "Synset('object.n.01')";
2293
+ }
2294
+ <BLANKLINE>
2295
+ """
2296
+ from nltk.util import edge_closure, edges2dot
2297
+
2298
+ synsets = set()
2299
+ edges = set()
2300
+ if not shapes:
2301
+ shapes = dict()
2302
+ if not attr:
2303
+ attr = dict()
2304
+
2305
+ def add_lemma(lem):
2306
+ ss = lem.synset()
2307
+ synsets.add(ss)
2308
+ edges.add((lem, ss))
2309
+
2310
+ for node in inputs:
2311
+ typ = type(node)
2312
+ if typ == Synset:
2313
+ synsets.add(node)
2314
+ elif typ == Lemma:
2315
+ add_lemma(node)
2316
+ elif typ == str:
2317
+ for lemma in self.lemmas(node, pos):
2318
+ add_lemma(lemma)
2319
+
2320
+ for ss in synsets:
2321
+ edges = edges.union(edge_closure(ss, rel, maxdepth, verbose))
2322
+ dot_string = edges2dot(sorted(list(edges)), shapes=shapes, attr=attr)
2323
+ return dot_string
2324
+
2325
+
2326
+ ######################################################################
2327
+ # WordNet Information Content Corpus Reader
2328
+ ######################################################################
2329
+
2330
+
2331
+ class WordNetICCorpusReader(CorpusReader):
2332
+ """
2333
+ A corpus reader for the WordNet information content corpus.
2334
+ """
2335
+
2336
+ def __init__(self, root, fileids):
2337
+ CorpusReader.__init__(self, root, fileids, encoding="utf8")
2338
+
2339
+ # this load function would be more efficient if the data was pickled
2340
+ # Note that we can't use NLTK's frequency distributions because
2341
+ # synsets are overlapping (each instance of a synset also counts
2342
+ # as an instance of its hypernyms)
2343
+ def ic(self, icfile):
2344
+ """
2345
+ Load an information content file from the wordnet_ic corpus
2346
+ and return a dictionary. This dictionary has just two keys,
2347
+ NOUN and VERB, whose values are dictionaries that map from
2348
+ synsets to information content values.
2349
+
2350
+ :type icfile: str
2351
+ :param icfile: The name of the wordnet_ic file (e.g. "ic-brown.dat")
2352
+ :return: An information content dictionary
2353
+ """
2354
+ ic = {}
2355
+ ic[NOUN] = defaultdict(float)
2356
+ ic[VERB] = defaultdict(float)
2357
+ with self.open(icfile) as fp:
2358
+ for num, line in enumerate(fp):
2359
+ if num == 0: # skip the header
2360
+ continue
2361
+ fields = line.split()
2362
+ offset = int(fields[0][:-1])
2363
+ value = float(fields[1])
2364
+ pos = _get_pos(fields[0])
2365
+ if len(fields) == 3 and fields[2] == "ROOT":
2366
+ # Store root count.
2367
+ ic[pos][0] += value
2368
+ if value != 0:
2369
+ ic[pos][offset] = value
2370
+ return ic
2371
+
2372
+
2373
+ ######################################################################
2374
+ # Similarity metrics
2375
+ ######################################################################
2376
+
2377
+ # TODO: Add in the option to manually add a new root node; this will be
2378
+ # useful for verb similarity as there exist multiple verb taxonomies.
2379
+
2380
+ # More information about the metrics is available at
2381
+ # http://marimba.d.umn.edu/similarity/measures.html
2382
+
2383
+
2384
+ def path_similarity(synset1, synset2, verbose=False, simulate_root=True):
2385
+ return synset1.path_similarity(
2386
+ synset2, verbose=verbose, simulate_root=simulate_root
2387
+ )
2388
+
2389
+
2390
+ def lch_similarity(synset1, synset2, verbose=False, simulate_root=True):
2391
+ return synset1.lch_similarity(synset2, verbose=verbose, simulate_root=simulate_root)
2392
+
2393
+
2394
+ def wup_similarity(synset1, synset2, verbose=False, simulate_root=True):
2395
+ return synset1.wup_similarity(synset2, verbose=verbose, simulate_root=simulate_root)
2396
+
2397
+
2398
+ def res_similarity(synset1, synset2, ic, verbose=False):
2399
+ return synset1.res_similarity(synset2, ic, verbose=verbose)
2400
+
2401
+
2402
+ def jcn_similarity(synset1, synset2, ic, verbose=False):
2403
+ return synset1.jcn_similarity(synset2, ic, verbose=verbose)
2404
+
2405
+
2406
+ def lin_similarity(synset1, synset2, ic, verbose=False):
2407
+ return synset1.lin_similarity(synset2, ic, verbose=verbose)
2408
+
2409
+
2410
+ path_similarity.__doc__ = Synset.path_similarity.__doc__
2411
+ lch_similarity.__doc__ = Synset.lch_similarity.__doc__
2412
+ wup_similarity.__doc__ = Synset.wup_similarity.__doc__
2413
+ res_similarity.__doc__ = Synset.res_similarity.__doc__
2414
+ jcn_similarity.__doc__ = Synset.jcn_similarity.__doc__
2415
+ lin_similarity.__doc__ = Synset.lin_similarity.__doc__
2416
+
2417
+
2418
+ def _lcs_ic(synset1, synset2, ic, verbose=False):
2419
+ """
2420
+ Get the information content of the least common subsumer that has
2421
+ the highest information content value. If two nodes have no
2422
+ explicit common subsumer, assume that they share an artificial
2423
+ root node that is the hypernym of all explicit roots.
2424
+
2425
+ :type synset1: Synset
2426
+ :param synset1: First input synset.
2427
+ :type synset2: Synset
2428
+ :param synset2: Second input synset. Must be the same part of
2429
+ speech as the first synset.
2430
+ :type ic: dict
2431
+ :param ic: an information content object (as returned by ``load_ic()``).
2432
+ :return: The information content of the two synsets and their most
2433
+ informative subsumer
2434
+ """
2435
+ if synset1._pos != synset2._pos:
2436
+ raise WordNetError(
2437
+ "Computing the least common subsumer requires "
2438
+ "%s and %s to have the same part of speech." % (synset1, synset2)
2439
+ )
2440
+
2441
+ ic1 = information_content(synset1, ic)
2442
+ ic2 = information_content(synset2, ic)
2443
+ subsumers = synset1.common_hypernyms(synset2)
2444
+ if len(subsumers) == 0:
2445
+ subsumer_ic = 0
2446
+ else:
2447
+ subsumer_ic = max(information_content(s, ic) for s in subsumers)
2448
+
2449
+ if verbose:
2450
+ print("> LCS Subsumer by content:", subsumer_ic)
2451
+
2452
+ return ic1, ic2, subsumer_ic
2453
+
2454
+
2455
+ # Utility functions
2456
+
2457
+
2458
+ def information_content(synset, ic):
2459
+ pos = synset._pos
2460
+ if pos == ADJ_SAT:
2461
+ pos = ADJ
2462
+ try:
2463
+ icpos = ic[pos]
2464
+ except KeyError as e:
2465
+ msg = "Information content file has no entries for part-of-speech: %s"
2466
+ raise WordNetError(msg % pos) from e
2467
+
2468
+ counts = icpos[synset._offset]
2469
+ if counts == 0:
2470
+ return _INF
2471
+ else:
2472
+ return -math.log(counts / icpos[0])
2473
+
2474
+
2475
+ # get the part of speech (NOUN or VERB) from the information content record
2476
+ # (each identifier has a 'n' or 'v' suffix)
2477
+
2478
+
2479
+ def _get_pos(field):
2480
+ if field[-1] == "n":
2481
+ return NOUN
2482
+ elif field[-1] == "v":
2483
+ return VERB
2484
+ else:
2485
+ msg = (
2486
+ "Unidentified part of speech in WordNet Information Content file "
2487
+ "for field %s" % field
2488
+ )
2489
+ raise ValueError(msg)
venv/lib/python3.10/site-packages/nltk/corpus/reader/ycoe.py ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: York-Toronto-Helsinki Parsed Corpus of Old English Prose (YCOE)
2
+ #
3
+ # Copyright (C) 2001-2015 NLTK Project
4
+ # Author: Selina Dennis <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ Corpus reader for the York-Toronto-Helsinki Parsed Corpus of Old
10
+ English Prose (YCOE), a 1.5 million word syntactically-annotated
11
+ corpus of Old English prose texts. The corpus is distributed by the
12
+ Oxford Text Archive: http://www.ota.ahds.ac.uk/ It is not included
13
+ with NLTK.
14
+
15
+ The YCOE corpus is divided into 100 files, each representing
16
+ an Old English prose text. Tags used within each text complies
17
+ to the YCOE standard: https://www-users.york.ac.uk/~lang22/YCOE/YcoeHome.htm
18
+ """
19
+
20
+ import os
21
+ import re
22
+
23
+ from nltk.corpus.reader.api import *
24
+ from nltk.corpus.reader.bracket_parse import BracketParseCorpusReader
25
+ from nltk.corpus.reader.tagged import TaggedCorpusReader
26
+ from nltk.corpus.reader.util import *
27
+ from nltk.tokenize import RegexpTokenizer
28
+
29
+
30
+ class YCOECorpusReader(CorpusReader):
31
+ """
32
+ Corpus reader for the York-Toronto-Helsinki Parsed Corpus of Old
33
+ English Prose (YCOE), a 1.5 million word syntactically-annotated
34
+ corpus of Old English prose texts.
35
+ """
36
+
37
+ def __init__(self, root, encoding="utf8"):
38
+ CorpusReader.__init__(self, root, [], encoding)
39
+
40
+ self._psd_reader = YCOEParseCorpusReader(
41
+ self.root.join("psd"), ".*", ".psd", encoding=encoding
42
+ )
43
+ self._pos_reader = YCOETaggedCorpusReader(self.root.join("pos"), ".*", ".pos")
44
+
45
+ # Make sure we have a consistent set of items:
46
+ documents = {f[:-4] for f in self._psd_reader.fileids()}
47
+ if {f[:-4] for f in self._pos_reader.fileids()} != documents:
48
+ raise ValueError('Items in "psd" and "pos" ' "subdirectories do not match.")
49
+
50
+ fileids = sorted(
51
+ ["%s.psd" % doc for doc in documents]
52
+ + ["%s.pos" % doc for doc in documents]
53
+ )
54
+ CorpusReader.__init__(self, root, fileids, encoding)
55
+ self._documents = sorted(documents)
56
+
57
+ def documents(self, fileids=None):
58
+ """
59
+ Return a list of document identifiers for all documents in
60
+ this corpus, or for the documents with the given file(s) if
61
+ specified.
62
+ """
63
+ if fileids is None:
64
+ return self._documents
65
+ if isinstance(fileids, str):
66
+ fileids = [fileids]
67
+ for f in fileids:
68
+ if f not in self._fileids:
69
+ raise KeyError("File id %s not found" % fileids)
70
+ # Strip off the '.pos' and '.psd' extensions.
71
+ return sorted({f[:-4] for f in fileids})
72
+
73
+ def fileids(self, documents=None):
74
+ """
75
+ Return a list of file identifiers for the files that make up
76
+ this corpus, or that store the given document(s) if specified.
77
+ """
78
+ if documents is None:
79
+ return self._fileids
80
+ elif isinstance(documents, str):
81
+ documents = [documents]
82
+ return sorted(
83
+ set(
84
+ ["%s.pos" % doc for doc in documents]
85
+ + ["%s.psd" % doc for doc in documents]
86
+ )
87
+ )
88
+
89
+ def _getfileids(self, documents, subcorpus):
90
+ """
91
+ Helper that selects the appropriate fileids for a given set of
92
+ documents from a given subcorpus (pos or psd).
93
+ """
94
+ if documents is None:
95
+ documents = self._documents
96
+ else:
97
+ if isinstance(documents, str):
98
+ documents = [documents]
99
+ for document in documents:
100
+ if document not in self._documents:
101
+ if document[-4:] in (".pos", ".psd"):
102
+ raise ValueError(
103
+ "Expected a document identifier, not a file "
104
+ "identifier. (Use corpus.documents() to get "
105
+ "a list of document identifiers."
106
+ )
107
+ else:
108
+ raise ValueError("Document identifier %s not found" % document)
109
+ return [f"{d}.{subcorpus}" for d in documents]
110
+
111
+ # Delegate to one of our two sub-readers:
112
+ def words(self, documents=None):
113
+ return self._pos_reader.words(self._getfileids(documents, "pos"))
114
+
115
+ def sents(self, documents=None):
116
+ return self._pos_reader.sents(self._getfileids(documents, "pos"))
117
+
118
+ def paras(self, documents=None):
119
+ return self._pos_reader.paras(self._getfileids(documents, "pos"))
120
+
121
+ def tagged_words(self, documents=None):
122
+ return self._pos_reader.tagged_words(self._getfileids(documents, "pos"))
123
+
124
+ def tagged_sents(self, documents=None):
125
+ return self._pos_reader.tagged_sents(self._getfileids(documents, "pos"))
126
+
127
+ def tagged_paras(self, documents=None):
128
+ return self._pos_reader.tagged_paras(self._getfileids(documents, "pos"))
129
+
130
+ def parsed_sents(self, documents=None):
131
+ return self._psd_reader.parsed_sents(self._getfileids(documents, "psd"))
132
+
133
+
134
+ class YCOEParseCorpusReader(BracketParseCorpusReader):
135
+ """Specialized version of the standard bracket parse corpus reader
136
+ that strips out (CODE ...) and (ID ...) nodes."""
137
+
138
+ def _parse(self, t):
139
+ t = re.sub(r"(?u)\((CODE|ID)[^\)]*\)", "", t)
140
+ if re.match(r"\s*\(\s*\)\s*$", t):
141
+ return None
142
+ return BracketParseCorpusReader._parse(self, t)
143
+
144
+
145
+ class YCOETaggedCorpusReader(TaggedCorpusReader):
146
+ def __init__(self, root, items, encoding="utf8"):
147
+ gaps_re = r"(?u)(?<=/\.)\s+|\s*\S*_CODE\s*|\s*\S*_ID\s*"
148
+ sent_tokenizer = RegexpTokenizer(gaps_re, gaps=True)
149
+ TaggedCorpusReader.__init__(
150
+ self, root, items, sep="_", sent_tokenizer=sent_tokenizer
151
+ )
152
+
153
+
154
+ #: A list of all documents and their titles in ycoe.
155
+ documents = {
156
+ "coadrian.o34": "Adrian and Ritheus",
157
+ "coaelhom.o3": "Ælfric, Supplemental Homilies",
158
+ "coaelive.o3": "Ælfric's Lives of Saints",
159
+ "coalcuin": "Alcuin De virtutibus et vitiis",
160
+ "coalex.o23": "Alexander's Letter to Aristotle",
161
+ "coapollo.o3": "Apollonius of Tyre",
162
+ "coaugust": "Augustine",
163
+ "cobede.o2": "Bede's History of the English Church",
164
+ "cobenrul.o3": "Benedictine Rule",
165
+ "coblick.o23": "Blickling Homilies",
166
+ "coboeth.o2": "Boethius' Consolation of Philosophy",
167
+ "cobyrhtf.o3": "Byrhtferth's Manual",
168
+ "cocanedgD": "Canons of Edgar (D)",
169
+ "cocanedgX": "Canons of Edgar (X)",
170
+ "cocathom1.o3": "Ælfric's Catholic Homilies I",
171
+ "cocathom2.o3": "Ælfric's Catholic Homilies II",
172
+ "cochad.o24": "Saint Chad",
173
+ "cochdrul": "Chrodegang of Metz, Rule",
174
+ "cochristoph": "Saint Christopher",
175
+ "cochronA.o23": "Anglo-Saxon Chronicle A",
176
+ "cochronC": "Anglo-Saxon Chronicle C",
177
+ "cochronD": "Anglo-Saxon Chronicle D",
178
+ "cochronE.o34": "Anglo-Saxon Chronicle E",
179
+ "cocura.o2": "Cura Pastoralis",
180
+ "cocuraC": "Cura Pastoralis (Cotton)",
181
+ "codicts.o34": "Dicts of Cato",
182
+ "codocu1.o1": "Documents 1 (O1)",
183
+ "codocu2.o12": "Documents 2 (O1/O2)",
184
+ "codocu2.o2": "Documents 2 (O2)",
185
+ "codocu3.o23": "Documents 3 (O2/O3)",
186
+ "codocu3.o3": "Documents 3 (O3)",
187
+ "codocu4.o24": "Documents 4 (O2/O4)",
188
+ "coeluc1": "Honorius of Autun, Elucidarium 1",
189
+ "coeluc2": "Honorius of Autun, Elucidarium 1",
190
+ "coepigen.o3": "Ælfric's Epilogue to Genesis",
191
+ "coeuphr": "Saint Euphrosyne",
192
+ "coeust": "Saint Eustace and his companions",
193
+ "coexodusP": "Exodus (P)",
194
+ "cogenesiC": "Genesis (C)",
195
+ "cogregdC.o24": "Gregory's Dialogues (C)",
196
+ "cogregdH.o23": "Gregory's Dialogues (H)",
197
+ "coherbar": "Pseudo-Apuleius, Herbarium",
198
+ "coinspolD.o34": "Wulfstan's Institute of Polity (D)",
199
+ "coinspolX": "Wulfstan's Institute of Polity (X)",
200
+ "cojames": "Saint James",
201
+ "colacnu.o23": "Lacnunga",
202
+ "colaece.o2": "Leechdoms",
203
+ "colaw1cn.o3": "Laws, Cnut I",
204
+ "colaw2cn.o3": "Laws, Cnut II",
205
+ "colaw5atr.o3": "Laws, Æthelred V",
206
+ "colaw6atr.o3": "Laws, Æthelred VI",
207
+ "colawaf.o2": "Laws, Alfred",
208
+ "colawafint.o2": "Alfred's Introduction to Laws",
209
+ "colawger.o34": "Laws, Gerefa",
210
+ "colawine.ox2": "Laws, Ine",
211
+ "colawnorthu.o3": "Northumbra Preosta Lagu",
212
+ "colawwllad.o4": "Laws, William I, Lad",
213
+ "coleofri.o4": "Leofric",
214
+ "colsigef.o3": "Ælfric's Letter to Sigefyrth",
215
+ "colsigewB": "Ælfric's Letter to Sigeweard (B)",
216
+ "colsigewZ.o34": "Ælfric's Letter to Sigeweard (Z)",
217
+ "colwgeat": "Ælfric's Letter to Wulfgeat",
218
+ "colwsigeT": "Ælfric's Letter to Wulfsige (T)",
219
+ "colwsigeXa.o34": "Ælfric's Letter to Wulfsige (Xa)",
220
+ "colwstan1.o3": "Ælfric's Letter to Wulfstan I",
221
+ "colwstan2.o3": "Ælfric's Letter to Wulfstan II",
222
+ "comargaC.o34": "Saint Margaret (C)",
223
+ "comargaT": "Saint Margaret (T)",
224
+ "comart1": "Martyrology, I",
225
+ "comart2": "Martyrology, II",
226
+ "comart3.o23": "Martyrology, III",
227
+ "comarvel.o23": "Marvels of the East",
228
+ "comary": "Mary of Egypt",
229
+ "coneot": "Saint Neot",
230
+ "conicodA": "Gospel of Nicodemus (A)",
231
+ "conicodC": "Gospel of Nicodemus (C)",
232
+ "conicodD": "Gospel of Nicodemus (D)",
233
+ "conicodE": "Gospel of Nicodemus (E)",
234
+ "coorosiu.o2": "Orosius",
235
+ "cootest.o3": "Heptateuch",
236
+ "coprefcath1.o3": "Ælfric's Preface to Catholic Homilies I",
237
+ "coprefcath2.o3": "Ælfric's Preface to Catholic Homilies II",
238
+ "coprefcura.o2": "Preface to the Cura Pastoralis",
239
+ "coprefgen.o3": "Ælfric's Preface to Genesis",
240
+ "copreflives.o3": "Ælfric's Preface to Lives of Saints",
241
+ "coprefsolilo": "Preface to Augustine's Soliloquies",
242
+ "coquadru.o23": "Pseudo-Apuleius, Medicina de quadrupedibus",
243
+ "corood": "History of the Holy Rood-Tree",
244
+ "cosevensl": "Seven Sleepers",
245
+ "cosolilo": "St. Augustine's Soliloquies",
246
+ "cosolsat1.o4": "Solomon and Saturn I",
247
+ "cosolsat2": "Solomon and Saturn II",
248
+ "cotempo.o3": "Ælfric's De Temporibus Anni",
249
+ "coverhom": "Vercelli Homilies",
250
+ "coverhomE": "Vercelli Homilies (E)",
251
+ "coverhomL": "Vercelli Homilies (L)",
252
+ "covinceB": "Saint Vincent (Bodley 343)",
253
+ "covinsal": "Vindicta Salvatoris",
254
+ "cowsgosp.o3": "West-Saxon Gospels",
255
+ "cowulf.o34": "Wulfstan's Homilies",
256
+ }
venv/lib/python3.10/site-packages/nltk/corpus/util.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Corpus Reader Utility Functions
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ ######################################################################
9
+ # { Lazy Corpus Loader
10
+ ######################################################################
11
+
12
+ import gc
13
+ import re
14
+
15
+ import nltk
16
+
17
+ TRY_ZIPFILE_FIRST = False
18
+
19
+
20
+ class LazyCorpusLoader:
21
+ """
22
+ To see the API documentation for this lazily loaded corpus, first
23
+ run corpus.ensure_loaded(), and then run help(this_corpus).
24
+
25
+ LazyCorpusLoader is a proxy object which is used to stand in for a
26
+ corpus object before the corpus is loaded. This allows NLTK to
27
+ create an object for each corpus, but defer the costs associated
28
+ with loading those corpora until the first time that they're
29
+ actually accessed.
30
+
31
+ The first time this object is accessed in any way, it will load
32
+ the corresponding corpus, and transform itself into that corpus
33
+ (by modifying its own ``__class__`` and ``__dict__`` attributes).
34
+
35
+ If the corpus can not be found, then accessing this object will
36
+ raise an exception, displaying installation instructions for the
37
+ NLTK data package. Once they've properly installed the data
38
+ package (or modified ``nltk.data.path`` to point to its location),
39
+ they can then use the corpus object without restarting python.
40
+
41
+ :param name: The name of the corpus
42
+ :type name: str
43
+ :param reader_cls: The specific CorpusReader class, e.g. PlaintextCorpusReader, WordListCorpusReader
44
+ :type reader: nltk.corpus.reader.api.CorpusReader
45
+ :param nltk_data_subdir: The subdirectory where the corpus is stored.
46
+ :type nltk_data_subdir: str
47
+ :param `*args`: Any other non-keywords arguments that `reader_cls` might need.
48
+ :param `**kwargs`: Any other keywords arguments that `reader_cls` might need.
49
+ """
50
+
51
+ def __init__(self, name, reader_cls, *args, **kwargs):
52
+ from nltk.corpus.reader.api import CorpusReader
53
+
54
+ assert issubclass(reader_cls, CorpusReader)
55
+ self.__name = self.__name__ = name
56
+ self.__reader_cls = reader_cls
57
+ # If nltk_data_subdir is set explicitly
58
+ if "nltk_data_subdir" in kwargs:
59
+ # Use the specified subdirectory path
60
+ self.subdir = kwargs["nltk_data_subdir"]
61
+ # Pops the `nltk_data_subdir` argument, we don't need it anymore.
62
+ kwargs.pop("nltk_data_subdir", None)
63
+ else: # Otherwise use 'nltk_data/corpora'
64
+ self.subdir = "corpora"
65
+ self.__args = args
66
+ self.__kwargs = kwargs
67
+
68
+ def __load(self):
69
+ # Find the corpus root directory.
70
+ zip_name = re.sub(r"(([^/]+)(/.*)?)", r"\2.zip/\1/", self.__name)
71
+ if TRY_ZIPFILE_FIRST:
72
+ try:
73
+ root = nltk.data.find(f"{self.subdir}/{zip_name}")
74
+ except LookupError as e:
75
+ try:
76
+ root = nltk.data.find(f"{self.subdir}/{self.__name}")
77
+ except LookupError:
78
+ raise e
79
+ else:
80
+ try:
81
+ root = nltk.data.find(f"{self.subdir}/{self.__name}")
82
+ except LookupError as e:
83
+ try:
84
+ root = nltk.data.find(f"{self.subdir}/{zip_name}")
85
+ except LookupError:
86
+ raise e
87
+
88
+ # Load the corpus.
89
+ corpus = self.__reader_cls(root, *self.__args, **self.__kwargs)
90
+
91
+ # This is where the magic happens! Transform ourselves into
92
+ # the corpus by modifying our own __dict__ and __class__ to
93
+ # match that of the corpus.
94
+
95
+ args, kwargs = self.__args, self.__kwargs
96
+ name, reader_cls = self.__name, self.__reader_cls
97
+
98
+ self.__dict__ = corpus.__dict__
99
+ self.__class__ = corpus.__class__
100
+
101
+ # _unload support: assign __dict__ and __class__ back, then do GC.
102
+ # after reassigning __dict__ there shouldn't be any references to
103
+ # corpus data so the memory should be deallocated after gc.collect()
104
+ def _unload(self):
105
+ lazy_reader = LazyCorpusLoader(name, reader_cls, *args, **kwargs)
106
+ self.__dict__ = lazy_reader.__dict__
107
+ self.__class__ = lazy_reader.__class__
108
+ gc.collect()
109
+
110
+ self._unload = _make_bound_method(_unload, self)
111
+
112
+ def __getattr__(self, attr):
113
+
114
+ # Fix for inspect.isclass under Python 2.6
115
+ # (see https://bugs.python.org/issue1225107).
116
+ # Without this fix tests may take extra 1.5GB RAM
117
+ # because all corpora gets loaded during test collection.
118
+ if attr == "__bases__":
119
+ raise AttributeError("LazyCorpusLoader object has no attribute '__bases__'")
120
+
121
+ self.__load()
122
+ # This looks circular, but its not, since __load() changes our
123
+ # __class__ to something new:
124
+ return getattr(self, attr)
125
+
126
+ def __repr__(self):
127
+ return "<{} in {!r} (not loaded yet)>".format(
128
+ self.__reader_cls.__name__,
129
+ ".../corpora/" + self.__name,
130
+ )
131
+
132
+ def _unload(self):
133
+ # If an exception occurs during corpus loading then
134
+ # '_unload' method may be unattached, so __getattr__ can be called;
135
+ # we shouldn't trigger corpus loading again in this case.
136
+ pass
137
+
138
+
139
+ def _make_bound_method(func, self):
140
+ """
141
+ Magic for creating bound methods (used for _unload).
142
+ """
143
+
144
+ class Foo:
145
+ def meth(self):
146
+ pass
147
+
148
+ f = Foo()
149
+ bound_method = type(f.meth)
150
+
151
+ try:
152
+ return bound_method(func, self, self.__class__)
153
+ except TypeError: # python3
154
+ return bound_method(func, self)
venv/lib/python3.10/site-packages/nltk/tbl/__init__.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Transformation-based learning
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Marcus Uneson <[email protected]>
5
+ # based on previous (nltk2) version by
6
+ # Christopher Maloof, Edward Loper, Steven Bird
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+ """
11
+ Transformation Based Learning
12
+
13
+ A general purpose package for Transformation Based Learning,
14
+ currently used by nltk.tag.BrillTagger.
15
+
16
+ isort:skip_file
17
+ """
18
+
19
+ from nltk.tbl.template import Template
20
+
21
+ # API: Template(...), Template.expand(...)
22
+
23
+ from nltk.tbl.feature import Feature
24
+
25
+ # API: Feature(...), Feature.expand(...)
26
+
27
+ from nltk.tbl.rule import Rule
28
+
29
+ # API: Rule.format(...), Rule.templatetid
30
+
31
+ from nltk.tbl.erroranalysis import error_list
venv/lib/python3.10/site-packages/nltk/tbl/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (540 Bytes). View file
 
venv/lib/python3.10/site-packages/nltk/tbl/__pycache__/api.cpython-310.pyc ADDED
Binary file (171 Bytes). View file
 
venv/lib/python3.10/site-packages/nltk/tbl/__pycache__/demo.cpython-310.pyc ADDED
Binary file (12.5 kB). View file
 
venv/lib/python3.10/site-packages/nltk/tbl/__pycache__/erroranalysis.cpython-310.pyc ADDED
Binary file (1.37 kB). View file
 
venv/lib/python3.10/site-packages/nltk/tbl/__pycache__/feature.cpython-310.pyc ADDED
Binary file (9.85 kB). View file
 
venv/lib/python3.10/site-packages/nltk/tbl/__pycache__/rule.cpython-310.pyc ADDED
Binary file (10.4 kB). View file
 
venv/lib/python3.10/site-packages/nltk/tbl/__pycache__/template.cpython-310.pyc ADDED
Binary file (12.4 kB). View file
 
venv/lib/python3.10/site-packages/nltk/tbl/api.py ADDED
File without changes
venv/lib/python3.10/site-packages/nltk/tbl/demo.py ADDED
@@ -0,0 +1,418 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Transformation-based learning
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Marcus Uneson <[email protected]>
5
+ # based on previous (nltk2) version by
6
+ # Christopher Maloof, Edward Loper, Steven Bird
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+ import os
11
+ import pickle
12
+ import random
13
+ import time
14
+
15
+ from nltk.corpus import treebank
16
+ from nltk.tag import BrillTaggerTrainer, RegexpTagger, UnigramTagger
17
+ from nltk.tag.brill import Pos, Word
18
+ from nltk.tbl import Template, error_list
19
+
20
+
21
+ def demo():
22
+ """
23
+ Run a demo with defaults. See source comments for details,
24
+ or docstrings of any of the more specific demo_* functions.
25
+ """
26
+ postag()
27
+
28
+
29
+ def demo_repr_rule_format():
30
+ """
31
+ Exemplify repr(Rule) (see also str(Rule) and Rule.format("verbose"))
32
+ """
33
+ postag(ruleformat="repr")
34
+
35
+
36
+ def demo_str_rule_format():
37
+ """
38
+ Exemplify repr(Rule) (see also str(Rule) and Rule.format("verbose"))
39
+ """
40
+ postag(ruleformat="str")
41
+
42
+
43
+ def demo_verbose_rule_format():
44
+ """
45
+ Exemplify Rule.format("verbose")
46
+ """
47
+ postag(ruleformat="verbose")
48
+
49
+
50
+ def demo_multiposition_feature():
51
+ """
52
+ The feature/s of a template takes a list of positions
53
+ relative to the current word where the feature should be
54
+ looked for, conceptually joined by logical OR. For instance,
55
+ Pos([-1, 1]), given a value V, will hold whenever V is found
56
+ one step to the left and/or one step to the right.
57
+
58
+ For contiguous ranges, a 2-arg form giving inclusive end
59
+ points can also be used: Pos(-3, -1) is the same as the arg
60
+ below.
61
+ """
62
+ postag(templates=[Template(Pos([-3, -2, -1]))])
63
+
64
+
65
+ def demo_multifeature_template():
66
+ """
67
+ Templates can have more than a single feature.
68
+ """
69
+ postag(templates=[Template(Word([0]), Pos([-2, -1]))])
70
+
71
+
72
+ def demo_template_statistics():
73
+ """
74
+ Show aggregate statistics per template. Little used templates are
75
+ candidates for deletion, much used templates may possibly be refined.
76
+
77
+ Deleting unused templates is mostly about saving time and/or space:
78
+ training is basically O(T) in the number of templates T
79
+ (also in terms of memory usage, which often will be the limiting factor).
80
+ """
81
+ postag(incremental_stats=True, template_stats=True)
82
+
83
+
84
+ def demo_generated_templates():
85
+ """
86
+ Template.expand and Feature.expand are class methods facilitating
87
+ generating large amounts of templates. See their documentation for
88
+ details.
89
+
90
+ Note: training with 500 templates can easily fill all available
91
+ even on relatively small corpora
92
+ """
93
+ wordtpls = Word.expand([-1, 0, 1], [1, 2], excludezero=False)
94
+ tagtpls = Pos.expand([-2, -1, 0, 1], [1, 2], excludezero=True)
95
+ templates = list(Template.expand([wordtpls, tagtpls], combinations=(1, 3)))
96
+ print(
97
+ "Generated {} templates for transformation-based learning".format(
98
+ len(templates)
99
+ )
100
+ )
101
+ postag(templates=templates, incremental_stats=True, template_stats=True)
102
+
103
+
104
+ def demo_learning_curve():
105
+ """
106
+ Plot a learning curve -- the contribution on tagging accuracy of
107
+ the individual rules.
108
+ Note: requires matplotlib
109
+ """
110
+ postag(
111
+ incremental_stats=True,
112
+ separate_baseline_data=True,
113
+ learning_curve_output="learningcurve.png",
114
+ )
115
+
116
+
117
+ def demo_error_analysis():
118
+ """
119
+ Writes a file with context for each erroneous word after tagging testing data
120
+ """
121
+ postag(error_output="errors.txt")
122
+
123
+
124
+ def demo_serialize_tagger():
125
+ """
126
+ Serializes the learned tagger to a file in pickle format; reloads it
127
+ and validates the process.
128
+ """
129
+ postag(serialize_output="tagger.pcl")
130
+
131
+
132
+ def demo_high_accuracy_rules():
133
+ """
134
+ Discard rules with low accuracy. This may hurt performance a bit,
135
+ but will often produce rules which are more interesting read to a human.
136
+ """
137
+ postag(num_sents=3000, min_acc=0.96, min_score=10)
138
+
139
+
140
+ def postag(
141
+ templates=None,
142
+ tagged_data=None,
143
+ num_sents=1000,
144
+ max_rules=300,
145
+ min_score=3,
146
+ min_acc=None,
147
+ train=0.8,
148
+ trace=3,
149
+ randomize=False,
150
+ ruleformat="str",
151
+ incremental_stats=False,
152
+ template_stats=False,
153
+ error_output=None,
154
+ serialize_output=None,
155
+ learning_curve_output=None,
156
+ learning_curve_take=300,
157
+ baseline_backoff_tagger=None,
158
+ separate_baseline_data=False,
159
+ cache_baseline_tagger=None,
160
+ ):
161
+ """
162
+ Brill Tagger Demonstration
163
+ :param templates: how many sentences of training and testing data to use
164
+ :type templates: list of Template
165
+
166
+ :param tagged_data: maximum number of rule instances to create
167
+ :type tagged_data: C{int}
168
+
169
+ :param num_sents: how many sentences of training and testing data to use
170
+ :type num_sents: C{int}
171
+
172
+ :param max_rules: maximum number of rule instances to create
173
+ :type max_rules: C{int}
174
+
175
+ :param min_score: the minimum score for a rule in order for it to be considered
176
+ :type min_score: C{int}
177
+
178
+ :param min_acc: the minimum score for a rule in order for it to be considered
179
+ :type min_acc: C{float}
180
+
181
+ :param train: the fraction of the the corpus to be used for training (1=all)
182
+ :type train: C{float}
183
+
184
+ :param trace: the level of diagnostic tracing output to produce (0-4)
185
+ :type trace: C{int}
186
+
187
+ :param randomize: whether the training data should be a random subset of the corpus
188
+ :type randomize: C{bool}
189
+
190
+ :param ruleformat: rule output format, one of "str", "repr", "verbose"
191
+ :type ruleformat: C{str}
192
+
193
+ :param incremental_stats: if true, will tag incrementally and collect stats for each rule (rather slow)
194
+ :type incremental_stats: C{bool}
195
+
196
+ :param template_stats: if true, will print per-template statistics collected in training and (optionally) testing
197
+ :type template_stats: C{bool}
198
+
199
+ :param error_output: the file where errors will be saved
200
+ :type error_output: C{string}
201
+
202
+ :param serialize_output: the file where the learned tbl tagger will be saved
203
+ :type serialize_output: C{string}
204
+
205
+ :param learning_curve_output: filename of plot of learning curve(s) (train and also test, if available)
206
+ :type learning_curve_output: C{string}
207
+
208
+ :param learning_curve_take: how many rules plotted
209
+ :type learning_curve_take: C{int}
210
+
211
+ :param baseline_backoff_tagger: the file where rules will be saved
212
+ :type baseline_backoff_tagger: tagger
213
+
214
+ :param separate_baseline_data: use a fraction of the training data exclusively for training baseline
215
+ :type separate_baseline_data: C{bool}
216
+
217
+ :param cache_baseline_tagger: cache baseline tagger to this file (only interesting as a temporary workaround to get
218
+ deterministic output from the baseline unigram tagger between python versions)
219
+ :type cache_baseline_tagger: C{string}
220
+
221
+
222
+ Note on separate_baseline_data: if True, reuse training data both for baseline and rule learner. This
223
+ is fast and fine for a demo, but is likely to generalize worse on unseen data.
224
+ Also cannot be sensibly used for learning curves on training data (the baseline will be artificially high).
225
+ """
226
+
227
+ # defaults
228
+ baseline_backoff_tagger = baseline_backoff_tagger or REGEXP_TAGGER
229
+ if templates is None:
230
+ from nltk.tag.brill import brill24, describe_template_sets
231
+
232
+ # some pre-built template sets taken from typical systems or publications are
233
+ # available. Print a list with describe_template_sets()
234
+ # for instance:
235
+ templates = brill24()
236
+ (training_data, baseline_data, gold_data, testing_data) = _demo_prepare_data(
237
+ tagged_data, train, num_sents, randomize, separate_baseline_data
238
+ )
239
+
240
+ # creating (or reloading from cache) a baseline tagger (unigram tagger)
241
+ # this is just a mechanism for getting deterministic output from the baseline between
242
+ # python versions
243
+ if cache_baseline_tagger:
244
+ if not os.path.exists(cache_baseline_tagger):
245
+ baseline_tagger = UnigramTagger(
246
+ baseline_data, backoff=baseline_backoff_tagger
247
+ )
248
+ with open(cache_baseline_tagger, "w") as print_rules:
249
+ pickle.dump(baseline_tagger, print_rules)
250
+ print(
251
+ "Trained baseline tagger, pickled it to {}".format(
252
+ cache_baseline_tagger
253
+ )
254
+ )
255
+ with open(cache_baseline_tagger) as print_rules:
256
+ baseline_tagger = pickle.load(print_rules)
257
+ print(f"Reloaded pickled tagger from {cache_baseline_tagger}")
258
+ else:
259
+ baseline_tagger = UnigramTagger(baseline_data, backoff=baseline_backoff_tagger)
260
+ print("Trained baseline tagger")
261
+ if gold_data:
262
+ print(
263
+ " Accuracy on test set: {:0.4f}".format(
264
+ baseline_tagger.accuracy(gold_data)
265
+ )
266
+ )
267
+
268
+ # creating a Brill tagger
269
+ tbrill = time.time()
270
+ trainer = BrillTaggerTrainer(
271
+ baseline_tagger, templates, trace, ruleformat=ruleformat
272
+ )
273
+ print("Training tbl tagger...")
274
+ brill_tagger = trainer.train(training_data, max_rules, min_score, min_acc)
275
+ print(f"Trained tbl tagger in {time.time() - tbrill:0.2f} seconds")
276
+ if gold_data:
277
+ print(" Accuracy on test set: %.4f" % brill_tagger.accuracy(gold_data))
278
+
279
+ # printing the learned rules, if learned silently
280
+ if trace == 1:
281
+ print("\nLearned rules: ")
282
+ for (ruleno, rule) in enumerate(brill_tagger.rules(), 1):
283
+ print(f"{ruleno:4d} {rule.format(ruleformat):s}")
284
+
285
+ # printing template statistics (optionally including comparison with the training data)
286
+ # note: if not separate_baseline_data, then baseline accuracy will be artificially high
287
+ if incremental_stats:
288
+ print(
289
+ "Incrementally tagging the test data, collecting individual rule statistics"
290
+ )
291
+ (taggedtest, teststats) = brill_tagger.batch_tag_incremental(
292
+ testing_data, gold_data
293
+ )
294
+ print(" Rule statistics collected")
295
+ if not separate_baseline_data:
296
+ print(
297
+ "WARNING: train_stats asked for separate_baseline_data=True; the baseline "
298
+ "will be artificially high"
299
+ )
300
+ trainstats = brill_tagger.train_stats()
301
+ if template_stats:
302
+ brill_tagger.print_template_statistics(teststats)
303
+ if learning_curve_output:
304
+ _demo_plot(
305
+ learning_curve_output, teststats, trainstats, take=learning_curve_take
306
+ )
307
+ print(f"Wrote plot of learning curve to {learning_curve_output}")
308
+ else:
309
+ print("Tagging the test data")
310
+ taggedtest = brill_tagger.tag_sents(testing_data)
311
+ if template_stats:
312
+ brill_tagger.print_template_statistics()
313
+
314
+ # writing error analysis to file
315
+ if error_output is not None:
316
+ with open(error_output, "w") as f:
317
+ f.write("Errors for Brill Tagger %r\n\n" % serialize_output)
318
+ f.write("\n".join(error_list(gold_data, taggedtest)).encode("utf-8") + "\n")
319
+ print(f"Wrote tagger errors including context to {error_output}")
320
+
321
+ # serializing the tagger to a pickle file and reloading (just to see it works)
322
+ if serialize_output is not None:
323
+ taggedtest = brill_tagger.tag_sents(testing_data)
324
+ with open(serialize_output, "w") as print_rules:
325
+ pickle.dump(brill_tagger, print_rules)
326
+ print(f"Wrote pickled tagger to {serialize_output}")
327
+ with open(serialize_output) as print_rules:
328
+ brill_tagger_reloaded = pickle.load(print_rules)
329
+ print(f"Reloaded pickled tagger from {serialize_output}")
330
+ taggedtest_reloaded = brill_tagger.tag_sents(testing_data)
331
+ if taggedtest == taggedtest_reloaded:
332
+ print("Reloaded tagger tried on test set, results identical")
333
+ else:
334
+ print("PROBLEM: Reloaded tagger gave different results on test set")
335
+
336
+
337
+ def _demo_prepare_data(
338
+ tagged_data, train, num_sents, randomize, separate_baseline_data
339
+ ):
340
+ # train is the proportion of data used in training; the rest is reserved
341
+ # for testing.
342
+ if tagged_data is None:
343
+ print("Loading tagged data from treebank... ")
344
+ tagged_data = treebank.tagged_sents()
345
+ if num_sents is None or len(tagged_data) <= num_sents:
346
+ num_sents = len(tagged_data)
347
+ if randomize:
348
+ random.seed(len(tagged_data))
349
+ random.shuffle(tagged_data)
350
+ cutoff = int(num_sents * train)
351
+ training_data = tagged_data[:cutoff]
352
+ gold_data = tagged_data[cutoff:num_sents]
353
+ testing_data = [[t[0] for t in sent] for sent in gold_data]
354
+ if not separate_baseline_data:
355
+ baseline_data = training_data
356
+ else:
357
+ bl_cutoff = len(training_data) // 3
358
+ (baseline_data, training_data) = (
359
+ training_data[:bl_cutoff],
360
+ training_data[bl_cutoff:],
361
+ )
362
+ (trainseqs, traintokens) = corpus_size(training_data)
363
+ (testseqs, testtokens) = corpus_size(testing_data)
364
+ (bltrainseqs, bltraintokens) = corpus_size(baseline_data)
365
+ print(f"Read testing data ({testseqs:d} sents/{testtokens:d} wds)")
366
+ print(f"Read training data ({trainseqs:d} sents/{traintokens:d} wds)")
367
+ print(
368
+ "Read baseline data ({:d} sents/{:d} wds) {:s}".format(
369
+ bltrainseqs,
370
+ bltraintokens,
371
+ "" if separate_baseline_data else "[reused the training set]",
372
+ )
373
+ )
374
+ return (training_data, baseline_data, gold_data, testing_data)
375
+
376
+
377
+ def _demo_plot(learning_curve_output, teststats, trainstats=None, take=None):
378
+ testcurve = [teststats["initialerrors"]]
379
+ for rulescore in teststats["rulescores"]:
380
+ testcurve.append(testcurve[-1] - rulescore)
381
+ testcurve = [1 - x / teststats["tokencount"] for x in testcurve[:take]]
382
+
383
+ traincurve = [trainstats["initialerrors"]]
384
+ for rulescore in trainstats["rulescores"]:
385
+ traincurve.append(traincurve[-1] - rulescore)
386
+ traincurve = [1 - x / trainstats["tokencount"] for x in traincurve[:take]]
387
+
388
+ import matplotlib.pyplot as plt
389
+
390
+ r = list(range(len(testcurve)))
391
+ plt.plot(r, testcurve, r, traincurve)
392
+ plt.axis([None, None, None, 1.0])
393
+ plt.savefig(learning_curve_output)
394
+
395
+
396
+ NN_CD_TAGGER = RegexpTagger([(r"^-?[0-9]+(\.[0-9]+)?$", "CD"), (r".*", "NN")])
397
+
398
+ REGEXP_TAGGER = RegexpTagger(
399
+ [
400
+ (r"^-?[0-9]+(\.[0-9]+)?$", "CD"), # cardinal numbers
401
+ (r"(The|the|A|a|An|an)$", "AT"), # articles
402
+ (r".*able$", "JJ"), # adjectives
403
+ (r".*ness$", "NN"), # nouns formed from adjectives
404
+ (r".*ly$", "RB"), # adverbs
405
+ (r".*s$", "NNS"), # plural nouns
406
+ (r".*ing$", "VBG"), # gerunds
407
+ (r".*ed$", "VBD"), # past tense verbs
408
+ (r".*", "NN"), # nouns (default)
409
+ ]
410
+ )
411
+
412
+
413
+ def corpus_size(seqs):
414
+ return (len(seqs), sum(len(x) for x in seqs))
415
+
416
+
417
+ if __name__ == "__main__":
418
+ demo_learning_curve()