applied-ai-018 commited on
Commit
fd6d886
·
verified ·
1 Parent(s): 2b336e2

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/nltk/test/__pycache__/classify_fixt.cpython-310.pyc +0 -0
  2. env-llmeval/lib/python3.10/site-packages/nltk/test/__pycache__/gensim_fixt.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/nltk/test/__pycache__/portuguese_en_fixt.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/nltk/test/__pycache__/setup_fixt.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/nltk/test/bleu.doctest +29 -0
  6. env-llmeval/lib/python3.10/site-packages/nltk/test/bnc.doctest +60 -0
  7. env-llmeval/lib/python3.10/site-packages/nltk/test/ccg.doctest +376 -0
  8. env-llmeval/lib/python3.10/site-packages/nltk/test/chat80.doctest +232 -0
  9. env-llmeval/lib/python3.10/site-packages/nltk/test/classify.doctest +202 -0
  10. env-llmeval/lib/python3.10/site-packages/nltk/test/classify_fixt.py +5 -0
  11. env-llmeval/lib/python3.10/site-packages/nltk/test/collocations.doctest +307 -0
  12. env-llmeval/lib/python3.10/site-packages/nltk/test/concordance.doctest +75 -0
  13. env-llmeval/lib/python3.10/site-packages/nltk/test/data.doctest +387 -0
  14. env-llmeval/lib/python3.10/site-packages/nltk/test/discourse.doctest +552 -0
  15. env-llmeval/lib/python3.10/site-packages/nltk/test/featgram.doctest +610 -0
  16. env-llmeval/lib/python3.10/site-packages/nltk/test/framenet.doctest +288 -0
  17. env-llmeval/lib/python3.10/site-packages/nltk/test/generate.doctest +78 -0
  18. env-llmeval/lib/python3.10/site-packages/nltk/test/gensim.doctest +141 -0
  19. env-llmeval/lib/python3.10/site-packages/nltk/test/gluesemantics.doctest +383 -0
  20. env-llmeval/lib/python3.10/site-packages/nltk/test/gluesemantics_malt_fixt.py +9 -0
  21. env-llmeval/lib/python3.10/site-packages/nltk/test/index.doctest +100 -0
  22. env-llmeval/lib/python3.10/site-packages/nltk/test/internals.doctest +161 -0
  23. env-llmeval/lib/python3.10/site-packages/nltk/test/lm.doctest +135 -0
  24. env-llmeval/lib/python3.10/site-packages/nltk/test/meteor.doctest +54 -0
  25. env-llmeval/lib/python3.10/site-packages/nltk/test/metrics.doctest +321 -0
  26. env-llmeval/lib/python3.10/site-packages/nltk/test/misc.doctest +118 -0
  27. env-llmeval/lib/python3.10/site-packages/nltk/test/parse.doctest +933 -0
  28. env-llmeval/lib/python3.10/site-packages/nltk/test/portuguese_en.doctest +568 -0
  29. env-llmeval/lib/python3.10/site-packages/nltk/test/semantics.doctest +667 -0
  30. env-llmeval/lib/python3.10/site-packages/nltk/test/setup_fixt.py +26 -0
  31. env-llmeval/lib/python3.10/site-packages/nltk/test/tokenize.doctest +397 -0
  32. env-llmeval/lib/python3.10/site-packages/nltk/test/toolbox.doctest +306 -0
  33. env-llmeval/lib/python3.10/site-packages/nltk/test/tree.doctest +1223 -0
  34. env-llmeval/lib/python3.10/site-packages/nltk/test/treeprettyprinter.doctest +177 -0
  35. env-llmeval/lib/python3.10/site-packages/nltk/test/treetransforms.doctest +154 -0
  36. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__init__.py +0 -0
  37. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_distance.cpython-310.pyc +0 -0
  38. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/test_aline.py +48 -0
  39. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/test_bllip.py +42 -0
  40. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/test_brill.py +34 -0
  41. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/test_cfd_mutation.py +39 -0
  42. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/test_cfg2chomsky.py +49 -0
  43. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/test_chunk.py +85 -0
  44. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/test_classify.py +49 -0
  45. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/test_collocations.py +120 -0
  46. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/test_concordance.py +98 -0
  47. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/test_corenlp.py +1436 -0
  48. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/test_corpora.py +274 -0
  49. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/test_corpus_views.py +48 -0
  50. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/test_data.py +15 -0
env-llmeval/lib/python3.10/site-packages/nltk/test/__pycache__/classify_fixt.cpython-310.pyc ADDED
Binary file (334 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/__pycache__/gensim_fixt.cpython-310.pyc ADDED
Binary file (333 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/__pycache__/portuguese_en_fixt.cpython-310.pyc ADDED
Binary file (393 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/__pycache__/setup_fixt.cpython-310.pyc ADDED
Binary file (1.24 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/bleu.doctest ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ==========
2
+ BLEU tests
3
+ ==========
4
+
5
+ >>> from nltk.translate import bleu
6
+
7
+ If the candidate has no alignment to any of the references, the BLEU score is 0.
8
+
9
+ >>> bleu(
10
+ ... ['The candidate has no alignment to any of the references'.split()],
11
+ ... 'John loves Mary'.split(),
12
+ ... (1,),
13
+ ... )
14
+ 0
15
+
16
+ This is an implementation of the smoothing techniques
17
+ for segment-level BLEU scores that was presented in
18
+ Boxing Chen and Collin Cherry (2014) A Systematic Comparison of
19
+ Smoothing Techniques for Sentence-Level BLEU. In WMT14.
20
+ http://acl2014.org/acl2014/W14-33/pdf/W14-3346.pdf
21
+ >>> from nltk.translate.bleu_score import sentence_bleu,SmoothingFunction
22
+
23
+
24
+ >>> sentence_bleu(
25
+ ... ['It is a place of quiet contemplation .'.split()],
26
+ ... 'It is .'.split(),
27
+ ... smoothing_function=SmoothingFunction().method4,
28
+ ... )*100
29
+ 4.4267...
env-llmeval/lib/python3.10/site-packages/nltk/test/bnc.doctest ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ >>> import os.path
5
+
6
+ >>> from nltk.corpus.reader import BNCCorpusReader
7
+ >>> import nltk.test
8
+
9
+ >>> root = os.path.dirname(nltk.test.__file__)
10
+ >>> bnc = BNCCorpusReader(root=root, fileids='FX8.xml')
11
+
12
+ Checking the word access.
13
+ -------------------------
14
+
15
+ >>> len(bnc.words())
16
+ 151
17
+
18
+ >>> bnc.words()[:6]
19
+ ['Ah', 'there', 'we', 'are', ',', '.']
20
+ >>> bnc.words(stem=True)[:6]
21
+ ['ah', 'there', 'we', 'be', ',', '.']
22
+
23
+ >>> bnc.tagged_words()[:6]
24
+ [('Ah', 'INTERJ'), ('there', 'ADV'), ('we', 'PRON'), ('are', 'VERB'), (',', 'PUN'), ('.', 'PUN')]
25
+
26
+ >>> bnc.tagged_words(c5=True)[:6]
27
+ [('Ah', 'ITJ'), ('there', 'AV0'), ('we', 'PNP'), ('are', 'VBB'), (',', 'PUN'), ('.', 'PUN')]
28
+
29
+ Testing access to the sentences.
30
+ --------------------------------
31
+
32
+ >>> len(bnc.sents())
33
+ 15
34
+
35
+ >>> bnc.sents()[0]
36
+ ['Ah', 'there', 'we', 'are', ',', '.']
37
+ >>> bnc.sents(stem=True)[0]
38
+ ['ah', 'there', 'we', 'be', ',', '.']
39
+
40
+ >>> bnc.tagged_sents()[0]
41
+ [('Ah', 'INTERJ'), ('there', 'ADV'), ('we', 'PRON'), ('are', 'VERB'), (',', 'PUN'), ('.', 'PUN')]
42
+ >>> bnc.tagged_sents(c5=True)[0]
43
+ [('Ah', 'ITJ'), ('there', 'AV0'), ('we', 'PNP'), ('are', 'VBB'), (',', 'PUN'), ('.', 'PUN')]
44
+
45
+ A not lazy loader.
46
+ ------------------
47
+
48
+ >>> eager = BNCCorpusReader(root=root, fileids=r'FX8.xml', lazy=False)
49
+
50
+ >>> len(eager.words())
51
+ 151
52
+ >>> eager.words(stem=True)[6:17]
53
+ ['right', 'abdominal', 'wound', ',', 'she', 'be', 'a', 'wee', 'bit', 'confuse', '.']
54
+
55
+ >>> eager.tagged_words()[6:11]
56
+ [('Right', 'ADV'), ('abdominal', 'ADJ'), ('wound', 'SUBST'), (',', 'PUN'), ('she', 'PRON')]
57
+ >>> eager.tagged_words(c5=True)[6:17]
58
+ [('Right', 'AV0'), ('abdominal', 'AJ0'), ('wound', 'NN1'), (',', 'PUN'), ('she', 'PNP'), ("'s", 'VBZ'), ('a', 'AT0'), ('wee', 'AJ0-NN1'), ('bit', 'NN1'), ('confused', 'VVN-AJ0'), ('.', 'PUN')]
59
+ >>> len(eager.sents())
60
+ 15
env-llmeval/lib/python3.10/site-packages/nltk/test/ccg.doctest ADDED
@@ -0,0 +1,376 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ ==============================
5
+ Combinatory Categorial Grammar
6
+ ==============================
7
+
8
+ Relative Clauses
9
+ ----------------
10
+
11
+ >>> from nltk.ccg import chart, lexicon
12
+
13
+ Construct a lexicon:
14
+
15
+ >>> lex = lexicon.fromstring('''
16
+ ... :- S, NP, N, VP
17
+ ...
18
+ ... Det :: NP/N
19
+ ... Pro :: NP
20
+ ... Modal :: S\\NP/VP
21
+ ...
22
+ ... TV :: VP/NP
23
+ ... DTV :: TV/NP
24
+ ...
25
+ ... the => Det
26
+ ...
27
+ ... that => Det
28
+ ... that => NP
29
+ ...
30
+ ... I => Pro
31
+ ... you => Pro
32
+ ... we => Pro
33
+ ...
34
+ ... chef => N
35
+ ... cake => N
36
+ ... children => N
37
+ ... dough => N
38
+ ...
39
+ ... will => Modal
40
+ ... should => Modal
41
+ ... might => Modal
42
+ ... must => Modal
43
+ ...
44
+ ... and => var\\.,var/.,var
45
+ ...
46
+ ... to => VP[to]/VP
47
+ ...
48
+ ... without => (VP\\VP)/VP[ing]
49
+ ...
50
+ ... be => TV
51
+ ... cook => TV
52
+ ... eat => TV
53
+ ...
54
+ ... cooking => VP[ing]/NP
55
+ ...
56
+ ... give => DTV
57
+ ...
58
+ ... is => (S\\NP)/NP
59
+ ... prefer => (S\\NP)/NP
60
+ ...
61
+ ... which => (N\\N)/(S/NP)
62
+ ...
63
+ ... persuade => (VP/VP[to])/NP
64
+ ... ''')
65
+
66
+ >>> parser = chart.CCGChartParser(lex, chart.DefaultRuleSet)
67
+ >>> for parse in parser.parse("you prefer that cake".split()):
68
+ ... chart.printCCGDerivation(parse)
69
+ ... break
70
+ ...
71
+ you prefer that cake
72
+ NP ((S\NP)/NP) (NP/N) N
73
+ -------------->
74
+ NP
75
+ --------------------------->
76
+ (S\NP)
77
+ --------------------------------<
78
+ S
79
+
80
+ >>> for parse in parser.parse("that is the cake which you prefer".split()):
81
+ ... chart.printCCGDerivation(parse)
82
+ ... break
83
+ ...
84
+ that is the cake which you prefer
85
+ NP ((S\NP)/NP) (NP/N) N ((N\N)/(S/NP)) NP ((S\NP)/NP)
86
+ ----->T
87
+ (S/(S\NP))
88
+ ------------------>B
89
+ (S/NP)
90
+ ---------------------------------->
91
+ (N\N)
92
+ ----------------------------------------<
93
+ N
94
+ ------------------------------------------------>
95
+ NP
96
+ ------------------------------------------------------------->
97
+ (S\NP)
98
+ -------------------------------------------------------------------<
99
+ S
100
+
101
+
102
+ Some other sentences to try:
103
+ "that is the cake which we will persuade the chef to cook"
104
+ "that is the cake which we will persuade the chef to give the children"
105
+
106
+ >>> sent = "that is the dough which you will eat without cooking".split()
107
+ >>> nosub_parser = chart.CCGChartParser(lex, chart.ApplicationRuleSet +
108
+ ... chart.CompositionRuleSet + chart.TypeRaiseRuleSet)
109
+
110
+ Without Substitution (no output)
111
+
112
+ >>> for parse in nosub_parser.parse(sent):
113
+ ... chart.printCCGDerivation(parse)
114
+
115
+ With Substitution:
116
+
117
+ >>> for parse in parser.parse(sent):
118
+ ... chart.printCCGDerivation(parse)
119
+ ... break
120
+ ...
121
+ that is the dough which you will eat without cooking
122
+ NP ((S\NP)/NP) (NP/N) N ((N\N)/(S/NP)) NP ((S\NP)/VP) (VP/NP) ((VP\VP)/VP['ing']) (VP['ing']/NP)
123
+ ----->T
124
+ (S/(S\NP))
125
+ ------------------------------------->B
126
+ ((VP\VP)/NP)
127
+ ----------------------------------------------<Sx
128
+ (VP/NP)
129
+ ----------------------------------------------------------->B
130
+ ((S\NP)/NP)
131
+ ---------------------------------------------------------------->B
132
+ (S/NP)
133
+ -------------------------------------------------------------------------------->
134
+ (N\N)
135
+ ---------------------------------------------------------------------------------------<
136
+ N
137
+ ----------------------------------------------------------------------------------------------->
138
+ NP
139
+ ------------------------------------------------------------------------------------------------------------>
140
+ (S\NP)
141
+ ------------------------------------------------------------------------------------------------------------------<
142
+ S
143
+
144
+
145
+ Conjunction
146
+ -----------
147
+
148
+ >>> from nltk.ccg.chart import CCGChartParser, ApplicationRuleSet, CompositionRuleSet
149
+ >>> from nltk.ccg.chart import SubstitutionRuleSet, TypeRaiseRuleSet, printCCGDerivation
150
+ >>> from nltk.ccg import lexicon
151
+
152
+ Lexicons for the tests:
153
+
154
+ >>> test1_lex = '''
155
+ ... :- S,N,NP,VP
156
+ ... I => NP
157
+ ... you => NP
158
+ ... will => S\\NP/VP
159
+ ... cook => VP/NP
160
+ ... which => (N\\N)/(S/NP)
161
+ ... and => var\\.,var/.,var
162
+ ... might => S\\NP/VP
163
+ ... eat => VP/NP
164
+ ... the => NP/N
165
+ ... mushrooms => N
166
+ ... parsnips => N'''
167
+ >>> test2_lex = '''
168
+ ... :- N, S, NP, VP
169
+ ... articles => N
170
+ ... the => NP/N
171
+ ... and => var\\.,var/.,var
172
+ ... which => (N\\N)/(S/NP)
173
+ ... I => NP
174
+ ... anyone => NP
175
+ ... will => (S/VP)\\NP
176
+ ... file => VP/NP
177
+ ... without => (VP\\VP)/VP[ing]
178
+ ... forget => VP/NP
179
+ ... reading => VP[ing]/NP
180
+ ... '''
181
+
182
+ Tests handling of conjunctions.
183
+ Note that while the two derivations are different, they are semantically equivalent.
184
+
185
+ >>> lex = lexicon.fromstring(test1_lex)
186
+ >>> parser = CCGChartParser(lex, ApplicationRuleSet + CompositionRuleSet + SubstitutionRuleSet)
187
+ >>> for parse in parser.parse("I will cook and might eat the mushrooms and parsnips".split()):
188
+ ... printCCGDerivation(parse)
189
+ I will cook and might eat the mushrooms and parsnips
190
+ NP ((S\NP)/VP) (VP/NP) ((_var0\.,_var0)/.,_var0) ((S\NP)/VP) (VP/NP) (NP/N) N ((_var0\.,_var0)/.,_var0) N
191
+ ---------------------->B
192
+ ((S\NP)/NP)
193
+ ---------------------->B
194
+ ((S\NP)/NP)
195
+ ------------------------------------------------->
196
+ (((S\NP)/NP)\.,((S\NP)/NP))
197
+ -----------------------------------------------------------------------<
198
+ ((S\NP)/NP)
199
+ ------------------------------------->
200
+ (N\.,N)
201
+ ------------------------------------------------<
202
+ N
203
+ -------------------------------------------------------->
204
+ NP
205
+ ------------------------------------------------------------------------------------------------------------------------------->
206
+ (S\NP)
207
+ -----------------------------------------------------------------------------------------------------------------------------------<
208
+ S
209
+ I will cook and might eat the mushrooms and parsnips
210
+ NP ((S\NP)/VP) (VP/NP) ((_var0\.,_var0)/.,_var0) ((S\NP)/VP) (VP/NP) (NP/N) N ((_var0\.,_var0)/.,_var0) N
211
+ ---------------------->B
212
+ ((S\NP)/NP)
213
+ ---------------------->B
214
+ ((S\NP)/NP)
215
+ ------------------------------------------------->
216
+ (((S\NP)/NP)\.,((S\NP)/NP))
217
+ -----------------------------------------------------------------------<
218
+ ((S\NP)/NP)
219
+ ------------------------------------------------------------------------------->B
220
+ ((S\NP)/N)
221
+ ------------------------------------->
222
+ (N\.,N)
223
+ ------------------------------------------------<
224
+ N
225
+ ------------------------------------------------------------------------------------------------------------------------------->
226
+ (S\NP)
227
+ -----------------------------------------------------------------------------------------------------------------------------------<
228
+ S
229
+
230
+
231
+ Tests handling subject extraction.
232
+ Interesting to point that the two parses are clearly semantically different.
233
+
234
+ >>> lex = lexicon.fromstring(test2_lex)
235
+ >>> parser = CCGChartParser(lex, ApplicationRuleSet + CompositionRuleSet + SubstitutionRuleSet)
236
+ >>> for parse in parser.parse("articles which I will file and forget without reading".split()):
237
+ ... printCCGDerivation(parse)
238
+ articles which I will file and forget without reading
239
+ N ((N\N)/(S/NP)) NP ((S/VP)\NP) (VP/NP) ((_var0\.,_var0)/.,_var0) (VP/NP) ((VP\VP)/VP['ing']) (VP['ing']/NP)
240
+ -----------------<
241
+ (S/VP)
242
+ ------------------------------------->B
243
+ ((VP\VP)/NP)
244
+ ----------------------------------------------<Sx
245
+ (VP/NP)
246
+ ------------------------------------------------------------------------->
247
+ ((VP/NP)\.,(VP/NP))
248
+ ----------------------------------------------------------------------------------<
249
+ (VP/NP)
250
+ --------------------------------------------------------------------------------------------------->B
251
+ (S/NP)
252
+ ------------------------------------------------------------------------------------------------------------------->
253
+ (N\N)
254
+ -----------------------------------------------------------------------------------------------------------------------------<
255
+ N
256
+ articles which I will file and forget without reading
257
+ N ((N\N)/(S/NP)) NP ((S/VP)\NP) (VP/NP) ((_var0\.,_var0)/.,_var0) (VP/NP) ((VP\VP)/VP['ing']) (VP['ing']/NP)
258
+ -----------------<
259
+ (S/VP)
260
+ ------------------------------------>
261
+ ((VP/NP)\.,(VP/NP))
262
+ ---------------------------------------------<
263
+ (VP/NP)
264
+ ------------------------------------->B
265
+ ((VP\VP)/NP)
266
+ ----------------------------------------------------------------------------------<Sx
267
+ (VP/NP)
268
+ --------------------------------------------------------------------------------------------------->B
269
+ (S/NP)
270
+ ------------------------------------------------------------------------------------------------------------------->
271
+ (N\N)
272
+ -----------------------------------------------------------------------------------------------------------------------------<
273
+ N
274
+
275
+
276
+ Unicode support
277
+ ---------------
278
+
279
+ Unicode words are supported.
280
+
281
+ >>> from nltk.ccg import chart, lexicon
282
+
283
+ Lexicons for the tests:
284
+
285
+ >>> lex = lexicon.fromstring('''
286
+ ... :- S, N, NP, PP
287
+ ...
288
+ ... AdjI :: N\\N
289
+ ... AdjD :: N/N
290
+ ... AdvD :: S/S
291
+ ... AdvI :: S\\S
292
+ ... Det :: NP/N
293
+ ... PrepNPCompl :: PP/NP
294
+ ... PrepNAdjN :: S\\S/N
295
+ ... PrepNAdjNP :: S\\S/NP
296
+ ... VPNP :: S\\NP/NP
297
+ ... VPPP :: S\\NP/PP
298
+ ... VPser :: S\\NP/AdjI
299
+ ...
300
+ ... auto => N
301
+ ... bebidas => N
302
+ ... cine => N
303
+ ... ley => N
304
+ ... libro => N
305
+ ... ministro => N
306
+ ... panadería => N
307
+ ... presidente => N
308
+ ... super => N
309
+ ...
310
+ ... el => Det
311
+ ... la => Det
312
+ ... las => Det
313
+ ... un => Det
314
+ ...
315
+ ... Ana => NP
316
+ ... Pablo => NP
317
+ ...
318
+ ... y => var\\.,var/.,var
319
+ ...
320
+ ... pero => (S/NP)\\(S/NP)/(S/NP)
321
+ ...
322
+ ... anunció => VPNP
323
+ ... compró => VPNP
324
+ ... cree => S\\NP/S[dep]
325
+ ... desmintió => VPNP
326
+ ... lee => VPNP
327
+ ... fueron => VPPP
328
+ ...
329
+ ... es => VPser
330
+ ...
331
+ ... interesante => AdjD
332
+ ... interesante => AdjI
333
+ ... nueva => AdjD
334
+ ... nueva => AdjI
335
+ ...
336
+ ... a => PrepNPCompl
337
+ ... en => PrepNAdjN
338
+ ... en => PrepNAdjNP
339
+ ...
340
+ ... ayer => AdvI
341
+ ...
342
+ ... que => (NP\\NP)/(S/NP)
343
+ ... que => S[dep]/S
344
+ ... ''')
345
+
346
+ >>> parser = chart.CCGChartParser(lex, chart.DefaultRuleSet)
347
+ >>> for parse in parser.parse(u"el ministro anunció pero el presidente desmintió la nueva ley".split()):
348
+ ... printCCGDerivation(parse) # doctest: +SKIP
349
+ ... # it fails on python2.7 because of the unicode problem explained in https://github.com/nltk/nltk/pull/1354
350
+ ... break
351
+ el ministro anunció pero el presidente desmintió la nueva ley
352
+ (NP/N) N ((S\NP)/NP) (((S/NP)\(S/NP))/(S/NP)) (NP/N) N ((S\NP)/NP) (NP/N) (N/N) N
353
+ ------------------>
354
+ NP
355
+ ------------------>T
356
+ (S/(S\NP))
357
+ -------------------->
358
+ NP
359
+ -------------------->T
360
+ (S/(S\NP))
361
+ --------------------------------->B
362
+ (S/NP)
363
+ ----------------------------------------------------------->
364
+ ((S/NP)\(S/NP))
365
+ ------------>
366
+ N
367
+ -------------------->
368
+ NP
369
+ --------------------<T
370
+ (S\(S/NP))
371
+ -------------------------------------------------------------------------------<B
372
+ (S\(S/NP))
373
+ --------------------------------------------------------------------------------------------<B
374
+ (S/NP)
375
+ -------------------------------------------------------------------------------------------------------------->
376
+ S
env-llmeval/lib/python3.10/site-packages/nltk/test/chat80.doctest ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ =======
5
+ Chat-80
6
+ =======
7
+
8
+ Chat-80 was a natural language system which allowed the user to
9
+ interrogate a Prolog knowledge base in the domain of world
10
+ geography. It was developed in the early '80s by Warren and Pereira; see
11
+ `<https://aclanthology.org/J82-3002.pdf>`_ for a description and
12
+ `<http://www.cis.upenn.edu/~pereira/oldies.html>`_ for the source
13
+ files.
14
+
15
+ The ``chat80`` module contains functions to extract data from the Chat-80
16
+ relation files ('the world database'), and convert then into a format
17
+ that can be incorporated in the FOL models of
18
+ ``nltk.sem.evaluate``. The code assumes that the Prolog
19
+ input files are available in the NLTK corpora directory.
20
+
21
+ The Chat-80 World Database consists of the following files::
22
+
23
+ world0.pl
24
+ rivers.pl
25
+ cities.pl
26
+ countries.pl
27
+ contain.pl
28
+ borders.pl
29
+
30
+ This module uses a slightly modified version of ``world0.pl``, in which
31
+ a set of Prolog rules have been omitted. The modified file is named
32
+ ``world1.pl``. Currently, the file ``rivers.pl`` is not read in, since
33
+ it uses a list rather than a string in the second field.
34
+
35
+ Reading Chat-80 Files
36
+ =====================
37
+
38
+ Chat-80 relations are like tables in a relational database. The
39
+ relation acts as the name of the table; the first argument acts as the
40
+ 'primary key'; and subsequent arguments are further fields in the
41
+ table. In general, the name of the table provides a label for a unary
42
+ predicate whose extension is all the primary keys. For example,
43
+ relations in ``cities.pl`` are of the following form::
44
+
45
+ 'city(athens,greece,1368).'
46
+
47
+ Here, ``'athens'`` is the key, and will be mapped to a member of the
48
+ unary predicate *city*.
49
+
50
+ By analogy with NLTK corpora, ``chat80`` defines a number of 'items'
51
+ which correspond to the relations.
52
+
53
+ >>> from nltk.sem import chat80
54
+ >>> print(chat80.items)
55
+ ('borders', 'circle_of_lat', 'circle_of_long', 'city', ...)
56
+
57
+ The fields in the table are mapped to binary predicates. The first
58
+ argument of the predicate is the primary key, while the second
59
+ argument is the data in the relevant field. Thus, in the above
60
+ example, the third field is mapped to the binary predicate
61
+ *population_of*, whose extension is a set of pairs such as
62
+ ``'(athens, 1368)'``.
63
+
64
+ An exception to this general framework is required by the relations in
65
+ the files ``borders.pl`` and ``contains.pl``. These contain facts of the
66
+ following form::
67
+
68
+ 'borders(albania,greece).'
69
+
70
+ 'contains0(africa,central_africa).'
71
+
72
+ We do not want to form a unary concept out the element in
73
+ the first field of these records, and we want the label of the binary
74
+ relation just to be ``'border'``/``'contain'`` respectively.
75
+
76
+ In order to drive the extraction process, we use 'relation metadata bundles'
77
+ which are Python dictionaries such as the following::
78
+
79
+ city = {'label': 'city',
80
+ 'closures': [],
81
+ 'schema': ['city', 'country', 'population'],
82
+ 'filename': 'cities.pl'}
83
+
84
+ According to this, the file ``city['filename']`` contains a list of
85
+ relational tuples (or more accurately, the corresponding strings in
86
+ Prolog form) whose predicate symbol is ``city['label']`` and whose
87
+ relational schema is ``city['schema']``. The notion of a ``closure`` is
88
+ discussed in the next section.
89
+
90
+ Concepts
91
+ ========
92
+ In order to encapsulate the results of the extraction, a class of
93
+ ``Concept``\ s is introduced. A ``Concept`` object has a number of
94
+ attributes, in particular a ``prefLabel``, an arity and ``extension``.
95
+
96
+ >>> c1 = chat80.Concept('dog', arity=1, extension=set(['d1', 'd2']))
97
+ >>> print(c1)
98
+ Label = 'dog'
99
+ Arity = 1
100
+ Extension = ['d1', 'd2']
101
+
102
+
103
+
104
+ The ``extension`` attribute makes it easier to inspect the output of
105
+ the extraction.
106
+
107
+ >>> schema = ['city', 'country', 'population']
108
+ >>> concepts = chat80.clause2concepts('cities.pl', 'city', schema)
109
+ >>> concepts
110
+ [Concept('city'), Concept('country_of'), Concept('population_of')]
111
+ >>> for c in concepts:
112
+ ... print("%s:\n\t%s" % (c.prefLabel, c.extension[:4]))
113
+ city:
114
+ ['athens', 'bangkok', 'barcelona', 'berlin']
115
+ country_of:
116
+ [('athens', 'greece'), ('bangkok', 'thailand'), ('barcelona', 'spain'), ('berlin', 'east_germany')]
117
+ population_of:
118
+ [('athens', '1368'), ('bangkok', '1178'), ('barcelona', '1280'), ('berlin', '3481')]
119
+
120
+ In addition, the ``extension`` can be further
121
+ processed: in the case of the ``'border'`` relation, we check that the
122
+ relation is **symmetric**, and in the case of the ``'contain'``
123
+ relation, we carry out the **transitive closure**. The closure
124
+ properties associated with a concept is indicated in the relation
125
+ metadata, as indicated earlier.
126
+
127
+ >>> borders = set([('a1', 'a2'), ('a2', 'a3')])
128
+ >>> c2 = chat80.Concept('borders', arity=2, extension=borders)
129
+ >>> print(c2)
130
+ Label = 'borders'
131
+ Arity = 2
132
+ Extension = [('a1', 'a2'), ('a2', 'a3')]
133
+ >>> c3 = chat80.Concept('borders', arity=2, closures=['symmetric'], extension=borders)
134
+ >>> c3.close()
135
+ >>> print(c3)
136
+ Label = 'borders'
137
+ Arity = 2
138
+ Extension = [('a1', 'a2'), ('a2', 'a1'), ('a2', 'a3'), ('a3', 'a2')]
139
+
140
+ The ``extension`` of a ``Concept`` object is then incorporated into a
141
+ ``Valuation`` object.
142
+
143
+ Persistence
144
+ ===========
145
+ The functions ``val_dump`` and ``val_load`` are provided to allow a
146
+ valuation to be stored in a persistent database and re-loaded, rather
147
+ than having to be re-computed each time.
148
+
149
+ Individuals and Lexical Items
150
+ =============================
151
+ As well as deriving relations from the Chat-80 data, we also create a
152
+ set of individual constants, one for each entity in the domain. The
153
+ individual constants are string-identical to the entities. For
154
+ example, given a data item such as ``'zloty'``, we add to the valuation
155
+ a pair ``('zloty', 'zloty')``. In order to parse English sentences that
156
+ refer to these entities, we also create a lexical item such as the
157
+ following for each individual constant::
158
+
159
+ PropN[num=sg, sem=<\P.(P zloty)>] -> 'Zloty'
160
+
161
+ The set of rules is written to the file ``chat_pnames.fcfg`` in the
162
+ current directory.
163
+
164
+ SQL Query
165
+ =========
166
+
167
+ The ``city`` relation is also available in RDB form and can be queried
168
+ using SQL statements.
169
+
170
+ >>> import nltk
171
+ >>> q = "SELECT City, Population FROM city_table WHERE Country = 'china' and Population > 1000"
172
+ >>> for answer in chat80.sql_query('corpora/city_database/city.db', q):
173
+ ... print("%-10s %4s" % answer)
174
+ canton 1496
175
+ chungking 1100
176
+ mukden 1551
177
+ peking 2031
178
+ shanghai 5407
179
+ tientsin 1795
180
+
181
+ The (deliberately naive) grammar ``sql.fcfg`` translates from English
182
+ to SQL:
183
+
184
+ >>> nltk.data.show_cfg('grammars/book_grammars/sql0.fcfg')
185
+ % start S
186
+ S[SEM=(?np + WHERE + ?vp)] -> NP[SEM=?np] VP[SEM=?vp]
187
+ VP[SEM=(?v + ?pp)] -> IV[SEM=?v] PP[SEM=?pp]
188
+ VP[SEM=(?v + ?ap)] -> IV[SEM=?v] AP[SEM=?ap]
189
+ NP[SEM=(?det + ?n)] -> Det[SEM=?det] N[SEM=?n]
190
+ PP[SEM=(?p + ?np)] -> P[SEM=?p] NP[SEM=?np]
191
+ AP[SEM=?pp] -> A[SEM=?a] PP[SEM=?pp]
192
+ NP[SEM='Country="greece"'] -> 'Greece'
193
+ NP[SEM='Country="china"'] -> 'China'
194
+ Det[SEM='SELECT'] -> 'Which' | 'What'
195
+ N[SEM='City FROM city_table'] -> 'cities'
196
+ IV[SEM=''] -> 'are'
197
+ A[SEM=''] -> 'located'
198
+ P[SEM=''] -> 'in'
199
+
200
+ Given this grammar, we can express, and then execute, queries in English.
201
+
202
+ >>> cp = nltk.parse.load_parser('grammars/book_grammars/sql0.fcfg')
203
+ >>> query = 'What cities are in China'
204
+ >>> for tree in cp.parse(query.split()):
205
+ ... answer = tree.label()['SEM']
206
+ ... q = " ".join(answer)
207
+ ... print(q)
208
+ ...
209
+ SELECT City FROM city_table WHERE Country="china"
210
+
211
+ >>> rows = chat80.sql_query('corpora/city_database/city.db', q)
212
+ >>> for r in rows: print("%s" % r, end=' ')
213
+ canton chungking dairen harbin kowloon mukden peking shanghai sian tientsin
214
+
215
+
216
+ Using Valuations
217
+ -----------------
218
+
219
+ In order to convert such an extension into a valuation, we use the
220
+ ``make_valuation()`` method; setting ``read=True`` creates and returns
221
+ a new ``Valuation`` object which contains the results.
222
+
223
+ >>> val = chat80.make_valuation(concepts, read=True)
224
+ >>> 'calcutta' in val['city']
225
+ True
226
+ >>> [town for (town, country) in val['country_of'] if country == 'india']
227
+ ['bombay', 'calcutta', 'delhi', 'hyderabad', 'madras']
228
+ >>> dom = val.domain
229
+ >>> g = nltk.sem.Assignment(dom)
230
+ >>> m = nltk.sem.Model(dom, val)
231
+ >>> m.evaluate(r'population_of(jakarta, 533)', g)
232
+ True
env-llmeval/lib/python3.10/site-packages/nltk/test/classify.doctest ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ =============
5
+ Classifiers
6
+ =============
7
+
8
+ >>> from nltk.test.classify_fixt import setup_module
9
+ >>> setup_module()
10
+
11
+ Classifiers label tokens with category labels (or *class labels*).
12
+ Typically, labels are represented with strings (such as ``"health"``
13
+ or ``"sports"``. In NLTK, classifiers are defined using classes that
14
+ implement the `ClassifierI` interface, which supports the following operations:
15
+
16
+ - self.classify(featureset)
17
+ - self.classify_many(featuresets)
18
+ - self.labels()
19
+ - self.prob_classify(featureset)
20
+ - self.prob_classify_many(featuresets)
21
+
22
+ NLTK defines several classifier classes:
23
+
24
+ - `ConditionalExponentialClassifier`
25
+ - `DecisionTreeClassifier`
26
+ - `MaxentClassifier`
27
+ - `NaiveBayesClassifier`
28
+ - `WekaClassifier`
29
+
30
+ Classifiers are typically created by training them on a training
31
+ corpus.
32
+
33
+
34
+ Regression Tests
35
+ ~~~~~~~~~~~~~~~~
36
+
37
+ We define a very simple training corpus with 3 binary features: ['a',
38
+ 'b', 'c'], and are two labels: ['x', 'y']. We use a simple feature set so
39
+ that the correct answers can be calculated analytically (although we
40
+ haven't done this yet for all tests).
41
+
42
+ >>> import nltk
43
+ >>> train = [
44
+ ... (dict(a=1,b=1,c=1), 'y'),
45
+ ... (dict(a=1,b=1,c=1), 'x'),
46
+ ... (dict(a=1,b=1,c=0), 'y'),
47
+ ... (dict(a=0,b=1,c=1), 'x'),
48
+ ... (dict(a=0,b=1,c=1), 'y'),
49
+ ... (dict(a=0,b=0,c=1), 'y'),
50
+ ... (dict(a=0,b=1,c=0), 'x'),
51
+ ... (dict(a=0,b=0,c=0), 'x'),
52
+ ... (dict(a=0,b=1,c=1), 'y'),
53
+ ... (dict(a=None,b=1,c=0), 'x'),
54
+ ... ]
55
+ >>> test = [
56
+ ... (dict(a=1,b=0,c=1)), # unseen
57
+ ... (dict(a=1,b=0,c=0)), # unseen
58
+ ... (dict(a=0,b=1,c=1)), # seen 3 times, labels=y,y,x
59
+ ... (dict(a=0,b=1,c=0)), # seen 1 time, label=x
60
+ ... ]
61
+
62
+ Test the Naive Bayes classifier:
63
+
64
+ >>> classifier = nltk.classify.NaiveBayesClassifier.train(train)
65
+ >>> sorted(classifier.labels())
66
+ ['x', 'y']
67
+ >>> classifier.classify_many(test)
68
+ ['y', 'x', 'y', 'x']
69
+ >>> for pdist in classifier.prob_classify_many(test):
70
+ ... print('%.4f %.4f' % (pdist.prob('x'), pdist.prob('y')))
71
+ 0.2500 0.7500
72
+ 0.5833 0.4167
73
+ 0.3571 0.6429
74
+ 0.7000 0.3000
75
+ >>> classifier.show_most_informative_features()
76
+ Most Informative Features
77
+ c = 0 x : y = 2.3 : 1.0
78
+ c = 1 y : x = 1.8 : 1.0
79
+ a = 1 y : x = 1.7 : 1.0
80
+ a = 0 x : y = 1.0 : 1.0
81
+ b = 0 x : y = 1.0 : 1.0
82
+ b = 1 x : y = 1.0 : 1.0
83
+
84
+ Test the Decision Tree classifier (without None):
85
+
86
+ >>> classifier = nltk.classify.DecisionTreeClassifier.train(
87
+ ... train[:-1], entropy_cutoff=0,
88
+ ... support_cutoff=0)
89
+ >>> sorted(classifier.labels())
90
+ ['x', 'y']
91
+ >>> print(classifier)
92
+ c=0? .................................................. x
93
+ a=0? ................................................ x
94
+ a=1? ................................................ y
95
+ c=1? .................................................. y
96
+ <BLANKLINE>
97
+ >>> classifier.classify_many(test)
98
+ ['y', 'y', 'y', 'x']
99
+ >>> for pdist in classifier.prob_classify_many(test):
100
+ ... print('%.4f %.4f' % (pdist.prob('x'), pdist.prob('y')))
101
+ Traceback (most recent call last):
102
+ . . .
103
+ NotImplementedError
104
+
105
+
106
+ Test the Decision Tree classifier (with None):
107
+
108
+ >>> classifier = nltk.classify.DecisionTreeClassifier.train(
109
+ ... train, entropy_cutoff=0,
110
+ ... support_cutoff=0)
111
+ >>> sorted(classifier.labels())
112
+ ['x', 'y']
113
+ >>> print(classifier)
114
+ c=0? .................................................. x
115
+ a=0? ................................................ x
116
+ a=1? ................................................ y
117
+ a=None? ............................................. x
118
+ c=1? .................................................. y
119
+ <BLANKLINE>
120
+
121
+
122
+ Test SklearnClassifier, which requires the scikit-learn package.
123
+
124
+ >>> from nltk.classify import SklearnClassifier
125
+ >>> from sklearn.naive_bayes import BernoulliNB
126
+ >>> from sklearn.svm import SVC
127
+ >>> train_data = [({"a": 4, "b": 1, "c": 0}, "ham"),
128
+ ... ({"a": 5, "b": 2, "c": 1}, "ham"),
129
+ ... ({"a": 0, "b": 3, "c": 4}, "spam"),
130
+ ... ({"a": 5, "b": 1, "c": 1}, "ham"),
131
+ ... ({"a": 1, "b": 4, "c": 3}, "spam")]
132
+ >>> classif = SklearnClassifier(BernoulliNB()).train(train_data)
133
+ >>> test_data = [{"a": 3, "b": 2, "c": 1},
134
+ ... {"a": 0, "b": 3, "c": 7}]
135
+ >>> classif.classify_many(test_data)
136
+ ['ham', 'spam']
137
+ >>> classif = SklearnClassifier(SVC(), sparse=False).train(train_data)
138
+ >>> classif.classify_many(test_data)
139
+ ['ham', 'spam']
140
+
141
+ Test the Maximum Entropy classifier training algorithms; they should all
142
+ generate the same results.
143
+
144
+ >>> def print_maxent_test_header():
145
+ ... print(' '*11+''.join([' test[%s] ' % i
146
+ ... for i in range(len(test))]))
147
+ ... print(' '*11+' p(x) p(y)'*len(test))
148
+ ... print('-'*(11+15*len(test)))
149
+
150
+ >>> def test_maxent(algorithm):
151
+ ... print('%11s' % algorithm, end=' ')
152
+ ... try:
153
+ ... classifier = nltk.classify.MaxentClassifier.train(
154
+ ... train, algorithm, trace=0, max_iter=1000)
155
+ ... except Exception as e:
156
+ ... print('Error: %r' % e)
157
+ ... return
158
+ ...
159
+ ... for featureset in test:
160
+ ... pdist = classifier.prob_classify(featureset)
161
+ ... print('%8.2f%6.2f' % (pdist.prob('x'), pdist.prob('y')), end=' ')
162
+ ... print()
163
+
164
+ >>> print_maxent_test_header(); test_maxent('GIS'); test_maxent('IIS')
165
+ test[0] test[1] test[2] test[3]
166
+ p(x) p(y) p(x) p(y) p(x) p(y) p(x) p(y)
167
+ -----------------------------------------------------------------------
168
+ GIS 0.16 0.84 0.46 0.54 0.41 0.59 0.76 0.24
169
+ IIS 0.16 0.84 0.46 0.54 0.41 0.59 0.76 0.24
170
+
171
+ >>> test_maxent('MEGAM'); test_maxent('TADM') # doctest: +SKIP
172
+ MEGAM 0.16 0.84 0.46 0.54 0.41 0.59 0.76 0.24
173
+ TADM 0.16 0.84 0.46 0.54 0.41 0.59 0.76 0.24
174
+
175
+
176
+
177
+ Regression tests for TypedMaxentFeatureEncoding
178
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
179
+
180
+ >>> from nltk.classify import maxent
181
+ >>> train = [
182
+ ... ({'a': 1, 'b': 1, 'c': 1}, 'y'),
183
+ ... ({'a': 5, 'b': 5, 'c': 5}, 'x'),
184
+ ... ({'a': 0.9, 'b': 0.9, 'c': 0.9}, 'y'),
185
+ ... ({'a': 5.5, 'b': 5.4, 'c': 5.3}, 'x'),
186
+ ... ({'a': 0.8, 'b': 1.2, 'c': 1}, 'y'),
187
+ ... ({'a': 5.1, 'b': 4.9, 'c': 5.2}, 'x')
188
+ ... ]
189
+
190
+ >>> test = [
191
+ ... {'a': 1, 'b': 0.8, 'c': 1.2},
192
+ ... {'a': 5.2, 'b': 5.1, 'c': 5}
193
+ ... ]
194
+
195
+ >>> encoding = maxent.TypedMaxentFeatureEncoding.train(
196
+ ... train, count_cutoff=3, alwayson_features=True)
197
+
198
+ >>> classifier = maxent.MaxentClassifier.train(
199
+ ... train, bernoulli=False, encoding=encoding, trace=0)
200
+
201
+ >>> classifier.classify_many(test)
202
+ ['y', 'x']
env-llmeval/lib/python3.10/site-packages/nltk/test/classify_fixt.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # most of classify.doctest requires numpy
2
+ def setup_module():
3
+ import pytest
4
+
5
+ pytest.importorskip("numpy")
env-llmeval/lib/python3.10/site-packages/nltk/test/collocations.doctest ADDED
@@ -0,0 +1,307 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ ==============
5
+ Collocations
6
+ ==============
7
+
8
+ Overview
9
+ ~~~~~~~~
10
+
11
+ Collocations are expressions of multiple words which commonly co-occur. For
12
+ example, the top ten bigram collocations in Genesis are listed below, as
13
+ measured using Pointwise Mutual Information.
14
+
15
+ >>> import nltk
16
+ >>> from nltk.collocations import *
17
+ >>> bigram_measures = nltk.collocations.BigramAssocMeasures()
18
+ >>> trigram_measures = nltk.collocations.TrigramAssocMeasures()
19
+ >>> fourgram_measures = nltk.collocations.QuadgramAssocMeasures()
20
+ >>> finder = BigramCollocationFinder.from_words(
21
+ ... nltk.corpus.genesis.words('english-web.txt'))
22
+ >>> finder.nbest(bigram_measures.pmi, 10)
23
+ [('Allon', 'Bacuth'), ('Ashteroth', 'Karnaim'), ('Ben', 'Ammi'),
24
+ ('En', 'Mishpat'), ('Jegar', 'Sahadutha'), ('Salt', 'Sea'),
25
+ ('Whoever', 'sheds'), ('appoint', 'overseers'), ('aromatic', 'resin'),
26
+ ('cutting', 'instrument')]
27
+
28
+ While these words are highly collocated, the expressions are also very
29
+ infrequent. Therefore it is useful to apply filters, such as ignoring all
30
+ bigrams which occur less than three times in the corpus:
31
+
32
+ >>> finder.apply_freq_filter(3)
33
+ >>> finder.nbest(bigram_measures.pmi, 10)
34
+ [('Beer', 'Lahai'), ('Lahai', 'Roi'), ('gray', 'hairs'),
35
+ ('ewe', 'lambs'), ('Most', 'High'), ('many', 'colors'),
36
+ ('burnt', 'offering'), ('Paddan', 'Aram'), ('east', 'wind'),
37
+ ('living', 'creature')]
38
+
39
+ We may similarly find collocations among tagged words:
40
+
41
+ >>> finder = BigramCollocationFinder.from_words(
42
+ ... nltk.corpus.brown.tagged_words('ca01', tagset='universal'))
43
+ >>> finder.nbest(bigram_measures.pmi, 5)
44
+ [(('1,119', 'NUM'), ('votes', 'NOUN')),
45
+ (('1962', 'NUM'), ("governor's", 'NOUN')),
46
+ (('637', 'NUM'), ('E.', 'NOUN')),
47
+ (('Alpharetta', 'NOUN'), ('prison', 'NOUN')),
48
+ (('Bar', 'NOUN'), ('Association', 'NOUN'))]
49
+
50
+ Or tags alone:
51
+
52
+ >>> finder = BigramCollocationFinder.from_words(t for w, t in
53
+ ... nltk.corpus.brown.tagged_words('ca01', tagset='universal'))
54
+ >>> finder.nbest(bigram_measures.pmi, 10)
55
+ [('PRT', 'VERB'), ('PRON', 'VERB'), ('ADP', 'DET'), ('.', 'PRON'), ('DET', 'ADJ'),
56
+ ('CONJ', 'PRON'), ('ADP', 'NUM'), ('NUM', '.'), ('ADV', 'ADV'), ('VERB', 'ADV')]
57
+
58
+ Or spanning intervening words:
59
+
60
+ >>> finder = BigramCollocationFinder.from_words(
61
+ ... nltk.corpus.genesis.words('english-web.txt'),
62
+ ... window_size = 20)
63
+ >>> finder.apply_freq_filter(2)
64
+ >>> ignored_words = nltk.corpus.stopwords.words('english')
65
+ >>> finder.apply_word_filter(lambda w: len(w) < 3 or w.lower() in ignored_words)
66
+ >>> finder.nbest(bigram_measures.likelihood_ratio, 10)
67
+ [('chief', 'chief'), ('became', 'father'), ('years', 'became'),
68
+ ('hundred', 'years'), ('lived', 'became'), ('king', 'king'),
69
+ ('lived', 'years'), ('became', 'became'), ('chief', 'chiefs'),
70
+ ('hundred', 'became')]
71
+
72
+ Finders
73
+ ~~~~~~~
74
+
75
+ The collocations package provides collocation finders which by default
76
+ consider all ngrams in a text as candidate collocations:
77
+
78
+ >>> text = "I do not like green eggs and ham, I do not like them Sam I am!"
79
+ >>> tokens = nltk.wordpunct_tokenize(text)
80
+ >>> finder = BigramCollocationFinder.from_words(tokens)
81
+ >>> scored = finder.score_ngrams(bigram_measures.raw_freq)
82
+ >>> sorted(bigram for bigram, score in scored)
83
+ [(',', 'I'), ('I', 'am'), ('I', 'do'), ('Sam', 'I'), ('am', '!'),
84
+ ('and', 'ham'), ('do', 'not'), ('eggs', 'and'), ('green', 'eggs'),
85
+ ('ham', ','), ('like', 'green'), ('like', 'them'), ('not', 'like'),
86
+ ('them', 'Sam')]
87
+
88
+ We could otherwise construct the collocation finder from manually-derived
89
+ FreqDists:
90
+
91
+ >>> word_fd = nltk.FreqDist(tokens)
92
+ >>> bigram_fd = nltk.FreqDist(nltk.bigrams(tokens))
93
+ >>> finder = BigramCollocationFinder(word_fd, bigram_fd)
94
+ >>> scored == finder.score_ngrams(bigram_measures.raw_freq)
95
+ True
96
+
97
+ A similar interface is provided for trigrams:
98
+
99
+ >>> finder = TrigramCollocationFinder.from_words(tokens)
100
+ >>> scored = finder.score_ngrams(trigram_measures.raw_freq)
101
+ >>> set(trigram for trigram, score in scored) == set(nltk.trigrams(tokens))
102
+ True
103
+
104
+ We may want to select only the top n results:
105
+
106
+ >>> sorted(finder.nbest(trigram_measures.raw_freq, 2))
107
+ [('I', 'do', 'not'), ('do', 'not', 'like')]
108
+
109
+ Alternatively, we can select those above a minimum score value:
110
+
111
+ >>> sorted(finder.above_score(trigram_measures.raw_freq,
112
+ ... 1.0 / len(tuple(nltk.trigrams(tokens)))))
113
+ [('I', 'do', 'not'), ('do', 'not', 'like')]
114
+
115
+ Now spanning intervening words:
116
+
117
+ >>> finder = TrigramCollocationFinder.from_words(tokens)
118
+ >>> finder = TrigramCollocationFinder.from_words(tokens, window_size=4)
119
+ >>> sorted(finder.nbest(trigram_measures.raw_freq, 4))
120
+ [('I', 'do', 'like'), ('I', 'do', 'not'), ('I', 'not', 'like'), ('do', 'not', 'like')]
121
+
122
+ A closer look at the finder's ngram frequencies:
123
+
124
+ >>> sorted(finder.ngram_fd.items(), key=lambda t: (-t[1], t[0]))[:10]
125
+ [(('I', 'do', 'like'), 2), (('I', 'do', 'not'), 2), (('I', 'not', 'like'), 2),
126
+ (('do', 'not', 'like'), 2), ((',', 'I', 'do'), 1), ((',', 'I', 'not'), 1),
127
+ ((',', 'do', 'not'), 1), (('I', 'am', '!'), 1), (('Sam', 'I', '!'), 1),
128
+ (('Sam', 'I', 'am'), 1)]
129
+
130
+ A similar interface is provided for fourgrams:
131
+
132
+ >>> finder_4grams = QuadgramCollocationFinder.from_words(tokens)
133
+ >>> scored_4grams = finder_4grams.score_ngrams(fourgram_measures.raw_freq)
134
+ >>> set(fourgram for fourgram, score in scored_4grams) == set(nltk.ngrams(tokens, n=4))
135
+ True
136
+
137
+ Filtering candidates
138
+ ~~~~~~~~~~~~~~~~~~~~
139
+
140
+ All the ngrams in a text are often too many to be useful when finding
141
+ collocations. It is generally useful to remove some words or punctuation,
142
+ and to require a minimum frequency for candidate collocations.
143
+
144
+ Given our sample text above, if we remove all trigrams containing personal
145
+ pronouns from candidature, score_ngrams should return 6 less results, and
146
+ 'do not like' will be the only candidate which occurs more than once:
147
+
148
+ >>> finder = TrigramCollocationFinder.from_words(tokens)
149
+ >>> len(finder.score_ngrams(trigram_measures.raw_freq))
150
+ 14
151
+ >>> finder.apply_word_filter(lambda w: w in ('I', 'me'))
152
+ >>> len(finder.score_ngrams(trigram_measures.raw_freq))
153
+ 8
154
+ >>> sorted(finder.above_score(trigram_measures.raw_freq,
155
+ ... 1.0 / len(tuple(nltk.trigrams(tokens)))))
156
+ [('do', 'not', 'like')]
157
+
158
+ Sometimes a filter is a function on the whole ngram, rather than each word,
159
+ such as if we may permit 'and' to appear in the middle of a trigram, but
160
+ not on either edge:
161
+
162
+ >>> finder.apply_ngram_filter(lambda w1, w2, w3: 'and' in (w1, w3))
163
+ >>> len(finder.score_ngrams(trigram_measures.raw_freq))
164
+ 6
165
+
166
+ Finally, it is often important to remove low frequency candidates, as we
167
+ lack sufficient evidence about their significance as collocations:
168
+
169
+ >>> finder.apply_freq_filter(2)
170
+ >>> len(finder.score_ngrams(trigram_measures.raw_freq))
171
+ 1
172
+
173
+ Association measures
174
+ ~~~~~~~~~~~~~~~~~~~~
175
+
176
+ A number of measures are available to score collocations or other associations.
177
+ The arguments to measure functions are marginals of a contingency table, in the
178
+ bigram case (n_ii, (n_ix, n_xi), n_xx)::
179
+
180
+ w1 ~w1
181
+ ------ ------
182
+ w2 | n_ii | n_oi | = n_xi
183
+ ------ ------
184
+ ~w2 | n_io | n_oo |
185
+ ------ ------
186
+ = n_ix TOTAL = n_xx
187
+
188
+ We test their calculation using some known values presented in Manning and
189
+ Schutze's text and other papers.
190
+
191
+ Student's t: examples from Manning and Schutze 5.3.2
192
+
193
+ >>> print('%0.4f' % bigram_measures.student_t(8, (15828, 4675), 14307668))
194
+ 0.9999
195
+ >>> print('%0.4f' % bigram_measures.student_t(20, (42, 20), 14307668))
196
+ 4.4721
197
+
198
+ Chi-square: examples from Manning and Schutze 5.3.3
199
+
200
+ >>> print('%0.2f' % bigram_measures.chi_sq(8, (15828, 4675), 14307668))
201
+ 1.55
202
+ >>> print('%0.0f' % bigram_measures.chi_sq(59, (67, 65), 571007))
203
+ 456400
204
+
205
+ Likelihood ratios: examples from Dunning, CL, 1993
206
+
207
+ >>> print('%0.2f' % bigram_measures.likelihood_ratio(110, (2552, 221), 31777))
208
+ 270.72
209
+ >>> print('%0.2f' % bigram_measures.likelihood_ratio(8, (13, 32), 31777))
210
+ 95.29
211
+
212
+ Pointwise Mutual Information: examples from Manning and Schutze 5.4
213
+
214
+ >>> print('%0.2f' % bigram_measures.pmi(20, (42, 20), 14307668))
215
+ 18.38
216
+ >>> print('%0.2f' % bigram_measures.pmi(20, (15019, 15629), 14307668))
217
+ 0.29
218
+
219
+ TODO: Find authoritative results for trigrams.
220
+
221
+ Using contingency table values
222
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
223
+
224
+ While frequency counts make marginals readily available for collocation
225
+ finding, it is common to find published contingency table values. The
226
+ collocations package therefore provides a wrapper, ContingencyMeasures, which
227
+ wraps an association measures class, providing association measures which
228
+ take contingency values as arguments, (n_ii, n_io, n_oi, n_oo) in the
229
+ bigram case.
230
+
231
+ >>> from nltk.metrics import ContingencyMeasures
232
+ >>> cont_bigram_measures = ContingencyMeasures(bigram_measures)
233
+ >>> print('%0.2f' % cont_bigram_measures.likelihood_ratio(8, 5, 24, 31740))
234
+ 95.29
235
+ >>> print('%0.2f' % cont_bigram_measures.chi_sq(8, 15820, 4667, 14287173))
236
+ 1.55
237
+
238
+ Ranking and correlation
239
+ ~~~~~~~~~~~~~~~~~~~~~~~
240
+
241
+ It is useful to consider the results of finding collocations as a ranking, and
242
+ the rankings output using different association measures can be compared using
243
+ the Spearman correlation coefficient.
244
+
245
+ Ranks can be assigned to a sorted list of results trivially by assigning
246
+ strictly increasing ranks to each result:
247
+
248
+ >>> from nltk.metrics.spearman import *
249
+ >>> results_list = ['item1', 'item2', 'item3', 'item4', 'item5']
250
+ >>> print(list(ranks_from_sequence(results_list)))
251
+ [('item1', 0), ('item2', 1), ('item3', 2), ('item4', 3), ('item5', 4)]
252
+
253
+ If scores are available for each result, we may allow sufficiently similar
254
+ results (differing by no more than rank_gap) to be assigned the same rank:
255
+
256
+ >>> results_scored = [('item1', 50.0), ('item2', 40.0), ('item3', 38.0),
257
+ ... ('item4', 35.0), ('item5', 14.0)]
258
+ >>> print(list(ranks_from_scores(results_scored, rank_gap=5)))
259
+ [('item1', 0), ('item2', 1), ('item3', 1), ('item4', 1), ('item5', 4)]
260
+
261
+ The Spearman correlation coefficient gives a number from -1.0 to 1.0 comparing
262
+ two rankings. A coefficient of 1.0 indicates identical rankings; -1.0 indicates
263
+ exact opposite rankings.
264
+
265
+ >>> print('%0.1f' % spearman_correlation(
266
+ ... ranks_from_sequence(results_list),
267
+ ... ranks_from_sequence(results_list)))
268
+ 1.0
269
+ >>> print('%0.1f' % spearman_correlation(
270
+ ... ranks_from_sequence(reversed(results_list)),
271
+ ... ranks_from_sequence(results_list)))
272
+ -1.0
273
+ >>> results_list2 = ['item2', 'item3', 'item1', 'item5', 'item4']
274
+ >>> print('%0.1f' % spearman_correlation(
275
+ ... ranks_from_sequence(results_list),
276
+ ... ranks_from_sequence(results_list2)))
277
+ 0.6
278
+ >>> print('%0.1f' % spearman_correlation(
279
+ ... ranks_from_sequence(reversed(results_list)),
280
+ ... ranks_from_sequence(results_list2)))
281
+ -0.6
282
+
283
+ Keywords
284
+ ~~~~~~~~
285
+
286
+ Bigram association metrics can also be used to perform keyword analysis. . For example, this finds the keywords
287
+ associated with the "romance" section of the Brown corpus as measured by likelihood ratio:
288
+
289
+ >>> romance = nltk.FreqDist(w.lower() for w in nltk.corpus.brown.words(categories='romance') if w.isalpha())
290
+ >>> freq = nltk.FreqDist(w.lower() for w in nltk.corpus.brown.words() if w.isalpha())
291
+
292
+ >>> key = nltk.FreqDist()
293
+ >>> for w in romance:
294
+ ... key[w] = bigram_measures.likelihood_ratio(romance[w], (freq[w], romance.N()), freq.N())
295
+
296
+ >>> for k,v in key.most_common(10):
297
+ ... print(f'{k:10s} {v:9.3f}')
298
+ she 1163.325
299
+ i 995.961
300
+ her 930.528
301
+ you 513.149
302
+ of 501.891
303
+ is 463.386
304
+ had 421.615
305
+ he 411.000
306
+ the 347.632
307
+ said 300.811
env-llmeval/lib/python3.10/site-packages/nltk/test/concordance.doctest ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2016 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ ==================================
5
+ Concordance Example
6
+ ==================================
7
+
8
+ A concordance view shows us every occurrence of a given
9
+ word, together with some context. Here we look up the word monstrous
10
+ in Moby Dick by entering text1 followed by a period, then the term
11
+ concordance, and then placing "monstrous" in parentheses:
12
+
13
+ >>> from nltk.corpus import gutenberg
14
+ >>> from nltk.text import Text
15
+ >>> corpus = gutenberg.words('melville-moby_dick.txt')
16
+ >>> text = Text(corpus)
17
+
18
+ >>> text.concordance("monstrous")
19
+ Displaying 11 of 11 matches:
20
+ ong the former , one was of a most monstrous size . ... This came towards us ,
21
+ ON OF THE PSALMS . " Touching that monstrous bulk of the whale or ork we have r
22
+ ll over with a heathenish array of monstrous clubs and spears . Some were thick
23
+ d as you gazed , and wondered what monstrous cannibal and savage could ever hav
24
+ that has survived the flood ; most monstrous and most mountainous ! That Himmal
25
+ they might scout at Moby Dick as a monstrous fable , or still worse and more de
26
+ th of Radney .'" CHAPTER 55 Of the Monstrous Pictures of Whales . I shall ere l
27
+ ing Scenes . In connexion with the monstrous pictures of whales , I am strongly
28
+ ere to enter upon those still more monstrous stories of them which are to be fo
29
+ ght have been rummaged out of this monstrous cabinet there is no telling . But
30
+ of Whale - Bones ; for Whales of a monstrous size are oftentimes cast up dead u
31
+
32
+ >>> text.concordance("monstrous")
33
+ Displaying 11 of 11 matches:
34
+ ong the former , one was of a most monstrous size . ... This came towards us ,
35
+ ON OF THE PSALMS . " Touching that monstrous bulk of the whale or ork we have r
36
+ ll over with a heathenish array of monstrous clubs and spears . Some were thick
37
+ ...
38
+
39
+ We can also search for a multi-word phrase by passing a list of strings:
40
+
41
+ >>> text.concordance(["monstrous", "size"])
42
+ Displaying 2 of 2 matches:
43
+ the former , one was of a most monstrous size . ... This came towards us , op
44
+ Whale - Bones ; for Whales of a monstrous size are oftentimes cast up dead upo
45
+
46
+ =================================
47
+ Concordance List
48
+ =================================
49
+
50
+ Often we need to store the results of concordance for further usage.
51
+ To do so, call the concordance function with the stdout argument set
52
+ to false:
53
+
54
+ >>> from nltk.corpus import gutenberg
55
+ >>> from nltk.text import Text
56
+ >>> corpus = gutenberg.words('melville-moby_dick.txt')
57
+ >>> text = Text(corpus)
58
+ >>> con_list = text.concordance_list("monstrous")
59
+ >>> con_list[2].line
60
+ 'll over with a heathenish array of monstrous clubs and spears . Some were thick'
61
+ >>> len(con_list)
62
+ 11
63
+
64
+ =================================
65
+ Patching Issue #2088
66
+ =================================
67
+
68
+ Patching https://github.com/nltk/nltk/issues/2088
69
+ The left slice of the left context should be clip to 0 if the `i-context` < 0.
70
+
71
+ >>> from nltk import Text, word_tokenize
72
+ >>> jane_eyre = 'Chapter 1\nTHERE was no possibility of taking a walk that day. We had been wandering, indeed, in the leafless shrubbery an hour in the morning; but since dinner (Mrs. Reed, when there was no company, dined early) the cold winter wind had brought with it clouds so sombre, and a rain so penetrating, that further outdoor exercise was now out of the question.'
73
+ >>> text = Text(word_tokenize(jane_eyre))
74
+ >>> text.concordance_list('taking')[0].left
75
+ ['Chapter', '1', 'THERE', 'was', 'no', 'possibility', 'of']
env-llmeval/lib/python3.10/site-packages/nltk/test/data.doctest ADDED
@@ -0,0 +1,387 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ =========================================
5
+ Loading Resources From the Data Package
6
+ =========================================
7
+
8
+ >>> import nltk.data
9
+
10
+ Overview
11
+ ~~~~~~~~
12
+ The `nltk.data` module contains functions that can be used to load
13
+ NLTK resource files, such as corpora, grammars, and saved processing
14
+ objects.
15
+
16
+ Loading Data Files
17
+ ~~~~~~~~~~~~~~~~~~
18
+ Resources are loaded using the function `nltk.data.load()`, which
19
+ takes as its first argument a URL specifying what file should be
20
+ loaded. The ``nltk:`` protocol loads files from the NLTK data
21
+ distribution:
22
+
23
+ >>> tokenizer = nltk.data.load('nltk:tokenizers/punkt/english.pickle')
24
+ >>> tokenizer.tokenize('Hello. This is a test. It works!')
25
+ ['Hello.', 'This is a test.', 'It works!']
26
+
27
+ It is important to note that there should be no space following the
28
+ colon (':') in the URL; 'nltk: tokenizers/punkt/english.pickle' will
29
+ not work!
30
+
31
+ The ``nltk:`` protocol is used by default if no protocol is specified:
32
+
33
+ >>> nltk.data.load('tokenizers/punkt/english.pickle')
34
+ <nltk.tokenize.punkt.PunktSentenceTokenizer object at ...>
35
+
36
+ But it is also possible to load resources from ``http:``, ``ftp:``,
37
+ and ``file:`` URLs:
38
+
39
+ >>> # Load a grammar from the NLTK webpage.
40
+ >>> cfg = nltk.data.load('https://raw.githubusercontent.com/nltk/nltk/develop/nltk/test/toy.cfg')
41
+ >>> print(cfg) # doctest: +ELLIPSIS
42
+ Grammar with 14 productions (start state = S)
43
+ S -> NP VP
44
+ PP -> P NP
45
+ ...
46
+ P -> 'on'
47
+ P -> 'in'
48
+
49
+ >>> # Load a grammar using an absolute path.
50
+ >>> url = 'file:%s' % nltk.data.find('grammars/sample_grammars/toy.cfg')
51
+ >>> url.replace('\\', '/')
52
+ 'file:...toy.cfg'
53
+ >>> print(nltk.data.load(url))
54
+ Grammar with 14 productions (start state = S)
55
+ S -> NP VP
56
+ PP -> P NP
57
+ ...
58
+ P -> 'on'
59
+ P -> 'in'
60
+
61
+ The second argument to the `nltk.data.load()` function specifies the
62
+ file format, which determines how the file's contents are processed
63
+ before they are returned by ``load()``. The formats that are
64
+ currently supported by the data module are described by the dictionary
65
+ `nltk.data.FORMATS`:
66
+
67
+ >>> for format, descr in sorted(nltk.data.FORMATS.items()):
68
+ ... print('{0:<7} {1:}'.format(format, descr))
69
+ cfg A context free grammar.
70
+ fcfg A feature CFG.
71
+ fol A list of first order logic expressions, parsed with
72
+ nltk.sem.logic.Expression.fromstring.
73
+ json A serialized python object, stored using the json module.
74
+ logic A list of first order logic expressions, parsed with
75
+ nltk.sem.logic.LogicParser. Requires an additional logic_parser
76
+ parameter
77
+ pcfg A probabilistic CFG.
78
+ pickle A serialized python object, stored using the pickle
79
+ module.
80
+ raw The raw (byte string) contents of a file.
81
+ text The raw (unicode string) contents of a file.
82
+ val A semantic valuation, parsed by
83
+ nltk.sem.Valuation.fromstring.
84
+ yaml A serialized python object, stored using the yaml module.
85
+
86
+ `nltk.data.load()` will raise a ValueError if a bad format name is
87
+ specified:
88
+
89
+ >>> nltk.data.load('grammars/sample_grammars/toy.cfg', 'bar')
90
+ Traceback (most recent call last):
91
+ . . .
92
+ ValueError: Unknown format type!
93
+
94
+ By default, the ``"auto"`` format is used, which chooses a format
95
+ based on the filename's extension. The mapping from file extensions
96
+ to format names is specified by `nltk.data.AUTO_FORMATS`:
97
+
98
+ >>> for ext, format in sorted(nltk.data.AUTO_FORMATS.items()):
99
+ ... print('.%-7s -> %s' % (ext, format))
100
+ .cfg -> cfg
101
+ .fcfg -> fcfg
102
+ .fol -> fol
103
+ .json -> json
104
+ .logic -> logic
105
+ .pcfg -> pcfg
106
+ .pickle -> pickle
107
+ .text -> text
108
+ .txt -> text
109
+ .val -> val
110
+ .yaml -> yaml
111
+
112
+ If `nltk.data.load()` is unable to determine the format based on the
113
+ filename's extension, it will raise a ValueError:
114
+
115
+ >>> nltk.data.load('foo.bar')
116
+ Traceback (most recent call last):
117
+ . . .
118
+ ValueError: Could not determine format for foo.bar based on its file
119
+ extension; use the "format" argument to specify the format explicitly.
120
+
121
+ Note that by explicitly specifying the ``format`` argument, you can
122
+ override the load method's default processing behavior. For example,
123
+ to get the raw contents of any file, simply use ``format="raw"``:
124
+
125
+ >>> s = nltk.data.load('grammars/sample_grammars/toy.cfg', 'text')
126
+ >>> print(s)
127
+ S -> NP VP
128
+ PP -> P NP
129
+ NP -> Det N | NP PP
130
+ VP -> V NP | VP PP
131
+ ...
132
+
133
+ Making Local Copies
134
+ ~~~~~~~~~~~~~~~~~~~
135
+ .. This will not be visible in the html output: create a tempdir to
136
+ play in.
137
+ >>> import tempfile, os
138
+ >>> tempdir = tempfile.mkdtemp()
139
+ >>> old_dir = os.path.abspath('.')
140
+ >>> os.chdir(tempdir)
141
+
142
+ The function `nltk.data.retrieve()` copies a given resource to a local
143
+ file. This can be useful, for example, if you want to edit one of the
144
+ sample grammars.
145
+
146
+ >>> nltk.data.retrieve('grammars/sample_grammars/toy.cfg')
147
+ Retrieving 'nltk:grammars/sample_grammars/toy.cfg', saving to 'toy.cfg'
148
+
149
+ >>> # Simulate editing the grammar.
150
+ >>> with open('toy.cfg') as inp:
151
+ ... s = inp.read().replace('NP', 'DP')
152
+ >>> with open('toy.cfg', 'w') as out:
153
+ ... _bytes_written = out.write(s)
154
+
155
+ >>> # Load the edited grammar, & display it.
156
+ >>> cfg = nltk.data.load('file:///' + os.path.abspath('toy.cfg'))
157
+ >>> print(cfg)
158
+ Grammar with 14 productions (start state = S)
159
+ S -> DP VP
160
+ PP -> P DP
161
+ ...
162
+ P -> 'on'
163
+ P -> 'in'
164
+
165
+ The second argument to `nltk.data.retrieve()` specifies the filename
166
+ for the new copy of the file. By default, the source file's filename
167
+ is used.
168
+
169
+ >>> nltk.data.retrieve('grammars/sample_grammars/toy.cfg', 'mytoy.cfg')
170
+ Retrieving 'nltk:grammars/sample_grammars/toy.cfg', saving to 'mytoy.cfg'
171
+ >>> os.path.isfile('./mytoy.cfg')
172
+ True
173
+ >>> nltk.data.retrieve('grammars/sample_grammars/np.fcfg')
174
+ Retrieving 'nltk:grammars/sample_grammars/np.fcfg', saving to 'np.fcfg'
175
+ >>> os.path.isfile('./np.fcfg')
176
+ True
177
+
178
+ If a file with the specified (or default) filename already exists in
179
+ the current directory, then `nltk.data.retrieve()` will raise a
180
+ ValueError exception. It will *not* overwrite the file:
181
+
182
+ >>> os.path.isfile('./toy.cfg')
183
+ True
184
+ >>> nltk.data.retrieve('grammars/sample_grammars/toy.cfg')
185
+ Traceback (most recent call last):
186
+ . . .
187
+ ValueError: File '...toy.cfg' already exists!
188
+
189
+ .. This will not be visible in the html output: clean up the tempdir.
190
+ >>> os.chdir(old_dir)
191
+ >>> for f in os.listdir(tempdir):
192
+ ... os.remove(os.path.join(tempdir, f))
193
+ >>> os.rmdir(tempdir)
194
+
195
+ Finding Files in the NLTK Data Package
196
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
197
+ The `nltk.data.find()` function searches the NLTK data package for a
198
+ given file, and returns a pointer to that file. This pointer can
199
+ either be a `FileSystemPathPointer` (whose `path` attribute gives the
200
+ absolute path of the file); or a `ZipFilePathPointer`, specifying a
201
+ zipfile and the name of an entry within that zipfile. Both pointer
202
+ types define the `open()` method, which can be used to read the string
203
+ contents of the file.
204
+
205
+ >>> path = nltk.data.find('corpora/abc/rural.txt')
206
+ >>> str(path)
207
+ '...rural.txt'
208
+ >>> print(path.open().read(60).decode())
209
+ PM denies knowledge of AWB kickbacks
210
+ The Prime Minister has
211
+
212
+ Alternatively, the `nltk.data.load()` function can be used with the
213
+ keyword argument ``format="raw"``:
214
+
215
+ >>> s = nltk.data.load('corpora/abc/rural.txt', format='raw')[:60]
216
+ >>> print(s.decode())
217
+ PM denies knowledge of AWB kickbacks
218
+ The Prime Minister has
219
+
220
+ Alternatively, you can use the keyword argument ``format="text"``:
221
+
222
+ >>> s = nltk.data.load('corpora/abc/rural.txt', format='text')[:60]
223
+ >>> print(s)
224
+ PM denies knowledge of AWB kickbacks
225
+ The Prime Minister has
226
+
227
+ Resource Caching
228
+ ~~~~~~~~~~~~~~~~
229
+
230
+ NLTK uses a weakref dictionary to maintain a cache of resources that
231
+ have been loaded. If you load a resource that is already stored in
232
+ the cache, then the cached copy will be returned. This behavior can
233
+ be seen by the trace output generated when verbose=True:
234
+
235
+ >>> feat0 = nltk.data.load('grammars/book_grammars/feat0.fcfg', verbose=True)
236
+ <<Loading nltk:grammars/book_grammars/feat0.fcfg>>
237
+ >>> feat0 = nltk.data.load('grammars/book_grammars/feat0.fcfg', verbose=True)
238
+ <<Using cached copy of nltk:grammars/book_grammars/feat0.fcfg>>
239
+
240
+ If you wish to load a resource from its source, bypassing the cache,
241
+ use the ``cache=False`` argument to `nltk.data.load()`. This can be
242
+ useful, for example, if the resource is loaded from a local file, and
243
+ you are actively editing that file:
244
+
245
+ >>> feat0 = nltk.data.load('grammars/book_grammars/feat0.fcfg',cache=False,verbose=True)
246
+ <<Loading nltk:grammars/book_grammars/feat0.fcfg>>
247
+
248
+ The cache *no longer* uses weak references. A resource will not be
249
+ automatically expunged from the cache when no more objects are using
250
+ it. In the following example, when we clear the variable ``feat0``,
251
+ the reference count for the feature grammar object drops to zero.
252
+ However, the object remains cached:
253
+
254
+ >>> del feat0
255
+ >>> feat0 = nltk.data.load('grammars/book_grammars/feat0.fcfg',
256
+ ... verbose=True)
257
+ <<Using cached copy of nltk:grammars/book_grammars/feat0.fcfg>>
258
+
259
+ You can clear the entire contents of the cache, using
260
+ `nltk.data.clear_cache()`:
261
+
262
+ >>> nltk.data.clear_cache()
263
+
264
+ Retrieving other Data Sources
265
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
266
+ >>> formulas = nltk.data.load('grammars/book_grammars/background.fol')
267
+ >>> for f in formulas: print(str(f))
268
+ all x.(boxerdog(x) -> dog(x))
269
+ all x.(boxer(x) -> person(x))
270
+ all x.-(dog(x) & person(x))
271
+ all x.(married(x) <-> exists y.marry(x,y))
272
+ all x.(bark(x) -> dog(x))
273
+ all x y.(marry(x,y) -> (person(x) & person(y)))
274
+ -(Vincent = Mia)
275
+ -(Vincent = Fido)
276
+ -(Mia = Fido)
277
+
278
+ Regression Tests
279
+ ~~~~~~~~~~~~~~~~
280
+ Create a temp dir for tests that write files:
281
+
282
+ >>> import tempfile, os
283
+ >>> tempdir = tempfile.mkdtemp()
284
+ >>> old_dir = os.path.abspath('.')
285
+ >>> os.chdir(tempdir)
286
+
287
+ The `retrieve()` function accepts all url types:
288
+
289
+ >>> urls = ['https://raw.githubusercontent.com/nltk/nltk/develop/nltk/test/toy.cfg',
290
+ ... 'file:%s' % nltk.data.find('grammars/sample_grammars/toy.cfg'),
291
+ ... 'nltk:grammars/sample_grammars/toy.cfg',
292
+ ... 'grammars/sample_grammars/toy.cfg']
293
+ >>> for i, url in enumerate(urls):
294
+ ... nltk.data.retrieve(url, 'toy-%d.cfg' % i)
295
+ Retrieving 'https://raw.githubusercontent.com/nltk/nltk/develop/nltk/test/toy.cfg', saving to 'toy-0.cfg'
296
+ Retrieving 'file:...toy.cfg', saving to 'toy-1.cfg'
297
+ Retrieving 'nltk:grammars/sample_grammars/toy.cfg', saving to 'toy-2.cfg'
298
+ Retrieving 'nltk:grammars/sample_grammars/toy.cfg', saving to 'toy-3.cfg'
299
+
300
+ Clean up the temp dir:
301
+
302
+ >>> os.chdir(old_dir)
303
+ >>> for f in os.listdir(tempdir):
304
+ ... os.remove(os.path.join(tempdir, f))
305
+ >>> os.rmdir(tempdir)
306
+
307
+ Lazy Loader
308
+ -----------
309
+ A lazy loader is a wrapper object that defers loading a resource until
310
+ it is accessed or used in any way. This is mainly intended for
311
+ internal use by NLTK's corpus readers.
312
+
313
+ >>> # Create a lazy loader for toy.cfg.
314
+ >>> ll = nltk.data.LazyLoader('grammars/sample_grammars/toy.cfg')
315
+
316
+ >>> # Show that it's not loaded yet:
317
+ >>> object.__repr__(ll)
318
+ '<nltk.data.LazyLoader object at ...>'
319
+
320
+ >>> # printing it is enough to cause it to be loaded:
321
+ >>> print(ll)
322
+ <Grammar with 14 productions>
323
+
324
+ >>> # Show that it's now been loaded:
325
+ >>> object.__repr__(ll)
326
+ '<nltk.grammar.CFG object at ...>'
327
+
328
+
329
+ >>> # Test that accessing an attribute also loads it:
330
+ >>> ll = nltk.data.LazyLoader('grammars/sample_grammars/toy.cfg')
331
+ >>> ll.start()
332
+ S
333
+ >>> object.__repr__(ll)
334
+ '<nltk.grammar.CFG object at ...>'
335
+
336
+ Buffered Gzip Reading and Writing
337
+ ---------------------------------
338
+ Write performance to gzip-compressed is extremely poor when the files become large.
339
+ File creation can become a bottleneck in those cases.
340
+
341
+ Read performance from large gzipped pickle files was improved in data.py by
342
+ buffering the reads. A similar fix can be applied to writes by buffering
343
+ the writes to a StringIO object first.
344
+
345
+ This is mainly intended for internal use. The test simply tests that reading
346
+ and writing work as intended and does not test how much improvement buffering
347
+ provides.
348
+
349
+ >>> from io import StringIO
350
+ >>> test = nltk.data.BufferedGzipFile('testbuf.gz', 'wb', size=2**10)
351
+ >>> ans = []
352
+ >>> for i in range(10000):
353
+ ... ans.append(str(i).encode('ascii'))
354
+ ... test.write(str(i).encode('ascii'))
355
+ >>> test.close()
356
+ >>> test = nltk.data.BufferedGzipFile('testbuf.gz', 'rb')
357
+ >>> test.read() == b''.join(ans)
358
+ True
359
+ >>> test.close()
360
+ >>> import os
361
+ >>> os.unlink('testbuf.gz')
362
+
363
+ JSON Encoding and Decoding
364
+ --------------------------
365
+ JSON serialization is used instead of pickle for some classes.
366
+
367
+ >>> from nltk import jsontags
368
+ >>> from nltk.jsontags import JSONTaggedEncoder, JSONTaggedDecoder, register_tag
369
+ >>> @jsontags.register_tag
370
+ ... class JSONSerializable:
371
+ ... json_tag = 'JSONSerializable'
372
+ ...
373
+ ... def __init__(self, n):
374
+ ... self.n = n
375
+ ...
376
+ ... def encode_json_obj(self):
377
+ ... return self.n
378
+ ...
379
+ ... @classmethod
380
+ ... def decode_json_obj(cls, obj):
381
+ ... n = obj
382
+ ... return cls(n)
383
+ ...
384
+ >>> JSONTaggedEncoder().encode(JSONSerializable(1))
385
+ '{"!JSONSerializable": 1}'
386
+ >>> JSONTaggedDecoder().decode('{"!JSONSerializable": 1}').n
387
+ 1
env-llmeval/lib/python3.10/site-packages/nltk/test/discourse.doctest ADDED
@@ -0,0 +1,552 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ ==================
5
+ Discourse Checking
6
+ ==================
7
+
8
+ >>> from nltk import *
9
+ >>> from nltk.sem import logic
10
+ >>> logic._counter._value = 0
11
+
12
+ Setup
13
+ =====
14
+
15
+ >>> from nltk.test.childes_fixt import setup_module
16
+ >>> setup_module()
17
+
18
+ Introduction
19
+ ============
20
+
21
+ The NLTK discourse module makes it possible to test consistency and
22
+ redundancy of simple discourses, using theorem-proving and
23
+ model-building from `nltk.inference`.
24
+
25
+ The ``DiscourseTester`` constructor takes a list of sentences as a
26
+ parameter.
27
+
28
+ >>> dt = DiscourseTester(['a boxer walks', 'every boxer chases a girl'])
29
+
30
+ The ``DiscourseTester`` parses each sentence into a list of logical
31
+ forms. Once we have created ``DiscourseTester`` object, we can
32
+ inspect various properties of the discourse. First off, we might want
33
+ to double-check what sentences are currently stored as the discourse.
34
+
35
+ >>> dt.sentences()
36
+ s0: a boxer walks
37
+ s1: every boxer chases a girl
38
+
39
+ As you will see, each sentence receives an identifier `s`\ :subscript:`i`.
40
+ We might also want to check what grammar the ``DiscourseTester`` is
41
+ using (by default, ``book_grammars/discourse.fcfg``):
42
+
43
+ >>> dt.grammar()
44
+ % start S
45
+ # Grammar Rules
46
+ S[SEM = <app(?subj,?vp)>] -> NP[NUM=?n,SEM=?subj] VP[NUM=?n,SEM=?vp]
47
+ NP[NUM=?n,SEM=<app(?det,?nom)> ] -> Det[NUM=?n,SEM=?det] Nom[NUM=?n,SEM=?nom]
48
+ NP[LOC=?l,NUM=?n,SEM=?np] -> PropN[LOC=?l,NUM=?n,SEM=?np]
49
+ ...
50
+
51
+ A different grammar can be invoked by using the optional ``gramfile``
52
+ parameter when a ``DiscourseTester`` object is created.
53
+
54
+ Readings and Threads
55
+ ====================
56
+
57
+ Depending on
58
+ the grammar used, we may find some sentences have more than one
59
+ logical form. To check this, use the ``readings()`` method. Given a
60
+ sentence identifier of the form `s`\ :subscript:`i`, each reading of
61
+ that sentence is given an identifier `s`\ :sub:`i`-`r`\ :sub:`j`.
62
+
63
+
64
+ >>> dt.readings()
65
+ <BLANKLINE>
66
+ s0 readings:
67
+ <BLANKLINE>
68
+ s0-r0: exists z1.(boxer(z1) & walk(z1))
69
+ s0-r1: exists z1.(boxerdog(z1) & walk(z1))
70
+ <BLANKLINE>
71
+ s1 readings:
72
+ <BLANKLINE>
73
+ s1-r0: all z2.(boxer(z2) -> exists z3.(girl(z3) & chase(z2,z3)))
74
+ s1-r1: all z1.(boxerdog(z1) -> exists z2.(girl(z2) & chase(z1,z2)))
75
+
76
+
77
+ In this case, the only source of ambiguity lies in the word *boxer*,
78
+ which receives two translations: ``boxer`` and ``boxerdog``. The
79
+ intention is that one of these corresponds to the ``person`` sense and
80
+ one to the ``dog`` sense. In principle, we would also expect to see a
81
+ quantifier scope ambiguity in ``s1``. However, the simple grammar we
82
+ are using, namely `sem4.fcfg <sem4.fcfg>`_, doesn't support quantifier
83
+ scope ambiguity.
84
+
85
+ We can also investigate the readings of a specific sentence:
86
+
87
+ >>> dt.readings('a boxer walks')
88
+ The sentence 'a boxer walks' has these readings:
89
+ exists x.(boxer(x) & walk(x))
90
+ exists x.(boxerdog(x) & walk(x))
91
+
92
+ Given that each sentence is two-ways ambiguous, we potentially have
93
+ four different discourse 'threads', taking all combinations of
94
+ readings. To see these, specify the ``threaded=True`` parameter on
95
+ the ``readings()`` method. Again, each thread is assigned an
96
+ identifier of the form `d`\ :sub:`i`. Following the identifier is a
97
+ list of the readings that constitute that thread.
98
+
99
+ >>> dt.readings(threaded=True)
100
+ d0: ['s0-r0', 's1-r0']
101
+ d1: ['s0-r0', 's1-r1']
102
+ d2: ['s0-r1', 's1-r0']
103
+ d3: ['s0-r1', 's1-r1']
104
+
105
+ Of course, this simple-minded approach doesn't scale: a discourse with, say, three
106
+ sentences, each of which has 3 readings, will generate 27 different
107
+ threads. It is an interesting exercise to consider how to manage
108
+ discourse ambiguity more efficiently.
109
+
110
+ Checking Consistency
111
+ ====================
112
+
113
+ Now, we can check whether some or all of the discourse threads are
114
+ consistent, using the ``models()`` method. With no parameter, this
115
+ method will try to find a model for every discourse thread in the
116
+ current discourse. However, we can also specify just one thread, say ``d1``.
117
+
118
+ >>> dt.models('d1')
119
+ --------------------------------------------------------------------------------
120
+ Model for Discourse Thread d1
121
+ --------------------------------------------------------------------------------
122
+ % number = 1
123
+ % seconds = 0
124
+ <BLANKLINE>
125
+ % Interpretation of size 2
126
+ <BLANKLINE>
127
+ c1 = 0.
128
+ <BLANKLINE>
129
+ f1(0) = 0.
130
+ f1(1) = 0.
131
+ <BLANKLINE>
132
+ boxer(0).
133
+ - boxer(1).
134
+ <BLANKLINE>
135
+ - boxerdog(0).
136
+ - boxerdog(1).
137
+ <BLANKLINE>
138
+ - girl(0).
139
+ - girl(1).
140
+ <BLANKLINE>
141
+ walk(0).
142
+ - walk(1).
143
+ <BLANKLINE>
144
+ - chase(0,0).
145
+ - chase(0,1).
146
+ - chase(1,0).
147
+ - chase(1,1).
148
+ <BLANKLINE>
149
+ Consistent discourse: d1 ['s0-r0', 's1-r1']:
150
+ s0-r0: exists z1.(boxer(z1) & walk(z1))
151
+ s1-r1: all z1.(boxerdog(z1) -> exists z2.(girl(z2) & chase(z1,z2)))
152
+ <BLANKLINE>
153
+
154
+ There are various formats for rendering **Mace4** models --- here,
155
+ we have used the 'cooked' format (which is intended to be
156
+ human-readable). There are a number of points to note.
157
+
158
+ #. The entities in the domain are all treated as non-negative
159
+ integers. In this case, there are only two entities, ``0`` and
160
+ ``1``.
161
+
162
+ #. The ``-`` symbol indicates negation. So ``0`` is the only
163
+ ``boxerdog`` and the only thing that ``walk``\ s. Nothing is a
164
+ ``boxer``, or a ``girl`` or in the ``chase`` relation. Thus the
165
+ universal sentence is vacuously true.
166
+
167
+ #. ``c1`` is an introduced constant that denotes ``0``.
168
+
169
+ #. ``f1`` is a Skolem function, but it plays no significant role in
170
+ this model.
171
+
172
+
173
+ We might want to now add another sentence to the discourse, and there
174
+ is method ``add_sentence()`` for doing just this.
175
+
176
+ >>> dt.add_sentence('John is a boxer')
177
+ >>> dt.sentences()
178
+ s0: a boxer walks
179
+ s1: every boxer chases a girl
180
+ s2: John is a boxer
181
+
182
+ We can now test all the properties as before; here, we just show a
183
+ couple of them.
184
+
185
+ >>> dt.readings()
186
+ <BLANKLINE>
187
+ s0 readings:
188
+ <BLANKLINE>
189
+ s0-r0: exists z1.(boxer(z1) & walk(z1))
190
+ s0-r1: exists z1.(boxerdog(z1) & walk(z1))
191
+ <BLANKLINE>
192
+ s1 readings:
193
+ <BLANKLINE>
194
+ s1-r0: all z1.(boxer(z1) -> exists z2.(girl(z2) & chase(z1,z2)))
195
+ s1-r1: all z1.(boxerdog(z1) -> exists z2.(girl(z2) & chase(z1,z2)))
196
+ <BLANKLINE>
197
+ s2 readings:
198
+ <BLANKLINE>
199
+ s2-r0: boxer(John)
200
+ s2-r1: boxerdog(John)
201
+ >>> dt.readings(threaded=True)
202
+ d0: ['s0-r0', 's1-r0', 's2-r0']
203
+ d1: ['s0-r0', 's1-r0', 's2-r1']
204
+ d2: ['s0-r0', 's1-r1', 's2-r0']
205
+ d3: ['s0-r0', 's1-r1', 's2-r1']
206
+ d4: ['s0-r1', 's1-r0', 's2-r0']
207
+ d5: ['s0-r1', 's1-r0', 's2-r1']
208
+ d6: ['s0-r1', 's1-r1', 's2-r0']
209
+ d7: ['s0-r1', 's1-r1', 's2-r1']
210
+
211
+ If you are interested in a particular thread, the ``expand_threads()``
212
+ method will remind you of what readings it consists of:
213
+
214
+ >>> thread = dt.expand_threads('d1')
215
+ >>> for rid, reading in thread:
216
+ ... print(rid, str(reading.normalize()))
217
+ s0-r0 exists z1.(boxer(z1) & walk(z1))
218
+ s1-r0 all z1.(boxer(z1) -> exists z2.(girl(z2) & chase(z1,z2)))
219
+ s2-r1 boxerdog(John)
220
+
221
+ Suppose we have already defined a discourse, as follows:
222
+
223
+ >>> dt = DiscourseTester(['A student dances', 'Every student is a person'])
224
+
225
+ Now, when we add a new sentence, is it consistent with what we already
226
+ have? The `` consistchk=True`` parameter of ``add_sentence()`` allows
227
+ us to check:
228
+
229
+ >>> dt.add_sentence('No person dances', consistchk=True)
230
+ Inconsistent discourse: d0 ['s0-r0', 's1-r0', 's2-r0']:
231
+ s0-r0: exists z1.(student(z1) & dance(z1))
232
+ s1-r0: all z1.(student(z1) -> person(z1))
233
+ s2-r0: -exists z1.(person(z1) & dance(z1))
234
+ <BLANKLINE>
235
+ >>> dt.readings()
236
+ <BLANKLINE>
237
+ s0 readings:
238
+ <BLANKLINE>
239
+ s0-r0: exists z1.(student(z1) & dance(z1))
240
+ <BLANKLINE>
241
+ s1 readings:
242
+ <BLANKLINE>
243
+ s1-r0: all z1.(student(z1) -> person(z1))
244
+ <BLANKLINE>
245
+ s2 readings:
246
+ <BLANKLINE>
247
+ s2-r0: -exists z1.(person(z1) & dance(z1))
248
+
249
+ So let's retract the inconsistent sentence:
250
+
251
+ >>> dt.retract_sentence('No person dances', verbose=True)
252
+ Current sentences are
253
+ s0: A student dances
254
+ s1: Every student is a person
255
+
256
+ We can now verify that result is consistent.
257
+
258
+ >>> dt.models()
259
+ --------------------------------------------------------------------------------
260
+ Model for Discourse Thread d0
261
+ --------------------------------------------------------------------------------
262
+ % number = 1
263
+ % seconds = 0
264
+ <BLANKLINE>
265
+ % Interpretation of size 2
266
+ <BLANKLINE>
267
+ c1 = 0.
268
+ <BLANKLINE>
269
+ dance(0).
270
+ - dance(1).
271
+ <BLANKLINE>
272
+ person(0).
273
+ - person(1).
274
+ <BLANKLINE>
275
+ student(0).
276
+ - student(1).
277
+ <BLANKLINE>
278
+ Consistent discourse: d0 ['s0-r0', 's1-r0']:
279
+ s0-r0: exists z1.(student(z1) & dance(z1))
280
+ s1-r0: all z1.(student(z1) -> person(z1))
281
+ <BLANKLINE>
282
+
283
+ Checking Informativity
284
+ ======================
285
+
286
+ Let's assume that we are still trying to extend the discourse *A
287
+ student dances.* *Every student is a person.* We add a new sentence,
288
+ but this time, we check whether it is informative with respect to what
289
+ has gone before.
290
+
291
+ >>> dt.add_sentence('A person dances', informchk=True)
292
+ Sentence 'A person dances' under reading 'exists x.(person(x) & dance(x))':
293
+ Not informative relative to thread 'd0'
294
+
295
+ In fact, we are just checking whether the new sentence is entailed by
296
+ the preceding discourse.
297
+
298
+ >>> dt.models()
299
+ --------------------------------------------------------------------------------
300
+ Model for Discourse Thread d0
301
+ --------------------------------------------------------------------------------
302
+ % number = 1
303
+ % seconds = 0
304
+ <BLANKLINE>
305
+ % Interpretation of size 2
306
+ <BLANKLINE>
307
+ c1 = 0.
308
+ <BLANKLINE>
309
+ c2 = 0.
310
+ <BLANKLINE>
311
+ dance(0).
312
+ - dance(1).
313
+ <BLANKLINE>
314
+ person(0).
315
+ - person(1).
316
+ <BLANKLINE>
317
+ student(0).
318
+ - student(1).
319
+ <BLANKLINE>
320
+ Consistent discourse: d0 ['s0-r0', 's1-r0', 's2-r0']:
321
+ s0-r0: exists z1.(student(z1) & dance(z1))
322
+ s1-r0: all z1.(student(z1) -> person(z1))
323
+ s2-r0: exists z1.(person(z1) & dance(z1))
324
+ <BLANKLINE>
325
+
326
+
327
+
328
+ Adding Background Knowledge
329
+ ===========================
330
+
331
+ Let's build a new discourse, and look at the readings of the component sentences:
332
+
333
+ >>> dt = DiscourseTester(['Vincent is a boxer', 'Fido is a boxer', 'Vincent is married', 'Fido barks'])
334
+ >>> dt.readings()
335
+ <BLANKLINE>
336
+ s0 readings:
337
+ <BLANKLINE>
338
+ s0-r0: boxer(Vincent)
339
+ s0-r1: boxerdog(Vincent)
340
+ <BLANKLINE>
341
+ s1 readings:
342
+ <BLANKLINE>
343
+ s1-r0: boxer(Fido)
344
+ s1-r1: boxerdog(Fido)
345
+ <BLANKLINE>
346
+ s2 readings:
347
+ <BLANKLINE>
348
+ s2-r0: married(Vincent)
349
+ <BLANKLINE>
350
+ s3 readings:
351
+ <BLANKLINE>
352
+ s3-r0: bark(Fido)
353
+
354
+ This gives us a lot of threads:
355
+
356
+ >>> dt.readings(threaded=True)
357
+ d0: ['s0-r0', 's1-r0', 's2-r0', 's3-r0']
358
+ d1: ['s0-r0', 's1-r1', 's2-r0', 's3-r0']
359
+ d2: ['s0-r1', 's1-r0', 's2-r0', 's3-r0']
360
+ d3: ['s0-r1', 's1-r1', 's2-r0', 's3-r0']
361
+
362
+
363
+ We can eliminate some of the readings, and hence some of the threads,
364
+ by adding background information.
365
+
366
+ >>> import nltk.data
367
+ >>> bg = nltk.data.load('grammars/book_grammars/background.fol')
368
+ >>> dt.add_background(bg)
369
+ >>> dt.background()
370
+ all x.(boxerdog(x) -> dog(x))
371
+ all x.(boxer(x) -> person(x))
372
+ all x.-(dog(x) & person(x))
373
+ all x.(married(x) <-> exists y.marry(x,y))
374
+ all x.(bark(x) -> dog(x))
375
+ all x y.(marry(x,y) -> (person(x) & person(y)))
376
+ -(Vincent = Mia)
377
+ -(Vincent = Fido)
378
+ -(Mia = Fido)
379
+
380
+ The background information allows us to reject three of the threads as
381
+ inconsistent. To see what remains, use the ``filter=True`` parameter
382
+ on ``readings()``.
383
+
384
+ >>> dt.readings(filter=True)
385
+ d1: ['s0-r0', 's1-r1', 's2-r0', 's3-r0']
386
+
387
+ The ``models()`` method gives us more information about the surviving thread.
388
+
389
+ >>> dt.models()
390
+ --------------------------------------------------------------------------------
391
+ Model for Discourse Thread d0
392
+ --------------------------------------------------------------------------------
393
+ No model found!
394
+ <BLANKLINE>
395
+ --------------------------------------------------------------------------------
396
+ Model for Discourse Thread d1
397
+ --------------------------------------------------------------------------------
398
+ % number = 1
399
+ % seconds = 0
400
+ <BLANKLINE>
401
+ % Interpretation of size 3
402
+ <BLANKLINE>
403
+ Fido = 0.
404
+ <BLANKLINE>
405
+ Mia = 1.
406
+ <BLANKLINE>
407
+ Vincent = 2.
408
+ <BLANKLINE>
409
+ f1(0) = 0.
410
+ f1(1) = 0.
411
+ f1(2) = 2.
412
+ <BLANKLINE>
413
+ bark(0).
414
+ - bark(1).
415
+ - bark(2).
416
+ <BLANKLINE>
417
+ - boxer(0).
418
+ - boxer(1).
419
+ boxer(2).
420
+ <BLANKLINE>
421
+ boxerdog(0).
422
+ - boxerdog(1).
423
+ - boxerdog(2).
424
+ <BLANKLINE>
425
+ dog(0).
426
+ - dog(1).
427
+ - dog(2).
428
+ <BLANKLINE>
429
+ - married(0).
430
+ - married(1).
431
+ married(2).
432
+ <BLANKLINE>
433
+ - person(0).
434
+ - person(1).
435
+ person(2).
436
+ <BLANKLINE>
437
+ - marry(0,0).
438
+ - marry(0,1).
439
+ - marry(0,2).
440
+ - marry(1,0).
441
+ - marry(1,1).
442
+ - marry(1,2).
443
+ - marry(2,0).
444
+ - marry(2,1).
445
+ marry(2,2).
446
+ <BLANKLINE>
447
+ --------------------------------------------------------------------------------
448
+ Model for Discourse Thread d2
449
+ --------------------------------------------------------------------------------
450
+ No model found!
451
+ <BLANKLINE>
452
+ --------------------------------------------------------------------------------
453
+ Model for Discourse Thread d3
454
+ --------------------------------------------------------------------------------
455
+ No model found!
456
+ <BLANKLINE>
457
+ Inconsistent discourse: d0 ['s0-r0', 's1-r0', 's2-r0', 's3-r0']:
458
+ s0-r0: boxer(Vincent)
459
+ s1-r0: boxer(Fido)
460
+ s2-r0: married(Vincent)
461
+ s3-r0: bark(Fido)
462
+ <BLANKLINE>
463
+ Consistent discourse: d1 ['s0-r0', 's1-r1', 's2-r0', 's3-r0']:
464
+ s0-r0: boxer(Vincent)
465
+ s1-r1: boxerdog(Fido)
466
+ s2-r0: married(Vincent)
467
+ s3-r0: bark(Fido)
468
+ <BLANKLINE>
469
+ Inconsistent discourse: d2 ['s0-r1', 's1-r0', 's2-r0', 's3-r0']:
470
+ s0-r1: boxerdog(Vincent)
471
+ s1-r0: boxer(Fido)
472
+ s2-r0: married(Vincent)
473
+ s3-r0: bark(Fido)
474
+ <BLANKLINE>
475
+ Inconsistent discourse: d3 ['s0-r1', 's1-r1', 's2-r0', 's3-r0']:
476
+ s0-r1: boxerdog(Vincent)
477
+ s1-r1: boxerdog(Fido)
478
+ s2-r0: married(Vincent)
479
+ s3-r0: bark(Fido)
480
+ <BLANKLINE>
481
+
482
+
483
+ .. This will not be visible in the html output: create a tempdir to
484
+ play in.
485
+ >>> import tempfile, os
486
+ >>> tempdir = tempfile.mkdtemp()
487
+ >>> old_dir = os.path.abspath('.')
488
+ >>> os.chdir(tempdir)
489
+
490
+ In order to play around with your own version of background knowledge,
491
+ you might want to start off with a local copy of ``background.fol``:
492
+
493
+ >>> nltk.data.retrieve('grammars/book_grammars/background.fol')
494
+ Retrieving 'nltk:grammars/book_grammars/background.fol', saving to 'background.fol'
495
+
496
+ After you have modified the file, the ``load_fol()`` function will parse
497
+ the strings in the file into expressions of ``nltk.sem.logic``.
498
+
499
+ >>> from nltk.inference.discourse import load_fol
500
+ >>> mybg = load_fol(open('background.fol').read())
501
+
502
+ The result can be loaded as an argument of ``add_background()`` in the
503
+ manner shown earlier.
504
+
505
+ .. This will not be visible in the html output: clean up the tempdir.
506
+ >>> os.chdir(old_dir)
507
+ >>> for f in os.listdir(tempdir):
508
+ ... os.remove(os.path.join(tempdir, f))
509
+ >>> os.rmdir(tempdir)
510
+ >>> nltk.data.clear_cache()
511
+
512
+
513
+ Regression Testing from book
514
+ ============================
515
+
516
+ >>> logic._counter._value = 0
517
+
518
+ >>> from nltk.tag import RegexpTagger
519
+ >>> tagger = RegexpTagger(
520
+ ... [('^(chases|runs)$', 'VB'),
521
+ ... ('^(a)$', 'ex_quant'),
522
+ ... ('^(every)$', 'univ_quant'),
523
+ ... ('^(dog|boy)$', 'NN'),
524
+ ... ('^(He)$', 'PRP')
525
+ ... ])
526
+ >>> rc = DrtGlueReadingCommand(depparser=MaltParser(tagger=tagger))
527
+ >>> dt = DiscourseTester(map(str.split, ['Every dog chases a boy', 'He runs']), rc)
528
+ >>> dt.readings()
529
+ <BLANKLINE>
530
+ s0 readings:
531
+ <BLANKLINE>
532
+ s0-r0: ([z2],[boy(z2), (([z5],[dog(z5)]) -> ([],[chases(z5,z2)]))])
533
+ s0-r1: ([],[(([z1],[dog(z1)]) -> ([z2],[boy(z2), chases(z1,z2)]))])
534
+ <BLANKLINE>
535
+ s1 readings:
536
+ <BLANKLINE>
537
+ s1-r0: ([z1],[PRO(z1), runs(z1)])
538
+ >>> dt.readings(show_thread_readings=True)
539
+ d0: ['s0-r0', 's1-r0'] : ([z1,z2],[boy(z1), (([z3],[dog(z3)]) -> ([],[chases(z3,z1)])), (z2 = z1), runs(z2)])
540
+ d1: ['s0-r1', 's1-r0'] : INVALID: AnaphoraResolutionException
541
+ >>> dt.readings(filter=True, show_thread_readings=True)
542
+ d0: ['s0-r0', 's1-r0'] : ([z1,z3],[boy(z1), (([z2],[dog(z2)]) -> ([],[chases(z2,z1)])), (z3 = z1), runs(z3)])
543
+
544
+ >>> logic._counter._value = 0
545
+
546
+ >>> from nltk.parse import FeatureEarleyChartParser
547
+ >>> from nltk.sem.drt import DrtParser
548
+ >>> grammar = nltk.data.load('grammars/book_grammars/drt.fcfg', logic_parser=DrtParser())
549
+ >>> parser = FeatureEarleyChartParser(grammar, trace=0)
550
+ >>> trees = parser.parse('Angus owns a dog'.split())
551
+ >>> print(list(trees)[0].label()['SEM'].simplify().normalize())
552
+ ([z1,z2],[Angus(z1), dog(z2), own(z1,z2)])
env-llmeval/lib/python3.10/site-packages/nltk/test/featgram.doctest ADDED
@@ -0,0 +1,610 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ =========================
5
+ Feature Grammar Parsing
6
+ =========================
7
+
8
+ .. definitions from nltk_book/definitions.rst
9
+
10
+ .. role:: feat
11
+ :class: feature
12
+ .. role:: fval
13
+ :class: fval
14
+ .. |rarr| unicode:: U+2192 .. right arrow
15
+ .. |dot| unicode:: U+2022 .. bullet
16
+ .. |pi| unicode:: U+03C0
17
+
18
+ Grammars can be parsed from strings.
19
+
20
+ >>> import nltk
21
+ >>> from nltk import grammar, parse
22
+ >>> g = """
23
+ ... % start DP
24
+ ... DP[AGR=?a] -> D[AGR=?a] N[AGR=?a]
25
+ ... D[AGR=[NUM='sg', PERS=3]] -> 'this' | 'that'
26
+ ... D[AGR=[NUM='pl', PERS=3]] -> 'these' | 'those'
27
+ ... D[AGR=[NUM='pl', PERS=1]] -> 'we'
28
+ ... D[AGR=[PERS=2]] -> 'you'
29
+ ... N[AGR=[NUM='sg', GND='m']] -> 'boy'
30
+ ... N[AGR=[NUM='pl', GND='m']] -> 'boys'
31
+ ... N[AGR=[NUM='sg', GND='f']] -> 'girl'
32
+ ... N[AGR=[NUM='pl', GND='f']] -> 'girls'
33
+ ... N[AGR=[NUM='sg']] -> 'student'
34
+ ... N[AGR=[NUM='pl']] -> 'students'
35
+ ... """
36
+ >>> grammar = grammar.FeatureGrammar.fromstring(g)
37
+ >>> tokens = 'these girls'.split()
38
+ >>> parser = parse.FeatureEarleyChartParser(grammar)
39
+ >>> trees = parser.parse(tokens)
40
+ >>> for tree in trees: print(tree)
41
+ (DP[AGR=[GND='f', NUM='pl', PERS=3]]
42
+ (D[AGR=[NUM='pl', PERS=3]] these)
43
+ (N[AGR=[GND='f', NUM='pl']] girls))
44
+
45
+ In general, when we are trying to develop even a very small grammar,
46
+ it is convenient to put the rules in a file where they can be edited,
47
+ tested and revised. Let's assume that we have saved feat0cfg as a file named
48
+ ``'feat0.fcfg'`` and placed it in the NLTK ``data`` directory. We can
49
+ inspect it as follows:
50
+
51
+ >>> nltk.data.show_cfg('grammars/book_grammars/feat0.fcfg')
52
+ % start S
53
+ # ###################
54
+ # Grammar Productions
55
+ # ###################
56
+ # S expansion productions
57
+ S -> NP[NUM=?n] VP[NUM=?n]
58
+ # NP expansion productions
59
+ NP[NUM=?n] -> N[NUM=?n]
60
+ NP[NUM=?n] -> PropN[NUM=?n]
61
+ NP[NUM=?n] -> Det[NUM=?n] N[NUM=?n]
62
+ NP[NUM=pl] -> N[NUM=pl]
63
+ # VP expansion productions
64
+ VP[TENSE=?t, NUM=?n] -> IV[TENSE=?t, NUM=?n]
65
+ VP[TENSE=?t, NUM=?n] -> TV[TENSE=?t, NUM=?n] NP
66
+ # ###################
67
+ # Lexical Productions
68
+ # ###################
69
+ Det[NUM=sg] -> 'this' | 'every'
70
+ Det[NUM=pl] -> 'these' | 'all'
71
+ Det -> 'the' | 'some' | 'several'
72
+ PropN[NUM=sg]-> 'Kim' | 'Jody'
73
+ N[NUM=sg] -> 'dog' | 'girl' | 'car' | 'child'
74
+ N[NUM=pl] -> 'dogs' | 'girls' | 'cars' | 'children'
75
+ IV[TENSE=pres, NUM=sg] -> 'disappears' | 'walks'
76
+ TV[TENSE=pres, NUM=sg] -> 'sees' | 'likes'
77
+ IV[TENSE=pres, NUM=pl] -> 'disappear' | 'walk'
78
+ TV[TENSE=pres, NUM=pl] -> 'see' | 'like'
79
+ IV[TENSE=past] -> 'disappeared' | 'walked'
80
+ TV[TENSE=past] -> 'saw' | 'liked'
81
+
82
+ Assuming we have saved feat0cfg as a file named
83
+ ``'feat0.fcfg'``, the function ``parse.load_parser`` allows us to
84
+ read the grammar into NLTK, ready for use in parsing.
85
+
86
+
87
+ >>> cp = parse.load_parser('grammars/book_grammars/feat0.fcfg', trace=1)
88
+ >>> sent = 'Kim likes children'
89
+ >>> tokens = sent.split()
90
+ >>> tokens
91
+ ['Kim', 'likes', 'children']
92
+ >>> trees = cp.parse(tokens)
93
+ |.Kim .like.chil.|
94
+ |[----] . .| [0:1] 'Kim'
95
+ |. [----] .| [1:2] 'likes'
96
+ |. . [----]| [2:3] 'children'
97
+ |[----] . .| [0:1] PropN[NUM='sg'] -> 'Kim' *
98
+ |[----] . .| [0:1] NP[NUM='sg'] -> PropN[NUM='sg'] *
99
+ |[----> . .| [0:1] S[] -> NP[NUM=?n] * VP[NUM=?n] {?n: 'sg'}
100
+ |. [----] .| [1:2] TV[NUM='sg', TENSE='pres'] -> 'likes' *
101
+ |. [----> .| [1:2] VP[NUM=?n, TENSE=?t] -> TV[NUM=?n, TENSE=?t] * NP[] {?n: 'sg', ?t: 'pres'}
102
+ |. . [----]| [2:3] N[NUM='pl'] -> 'children' *
103
+ |. . [----]| [2:3] NP[NUM='pl'] -> N[NUM='pl'] *
104
+ |. . [---->| [2:3] S[] -> NP[NUM=?n] * VP[NUM=?n] {?n: 'pl'}
105
+ |. [---------]| [1:3] VP[NUM='sg', TENSE='pres'] -> TV[NUM='sg', TENSE='pres'] NP[] *
106
+ |[==============]| [0:3] S[] -> NP[NUM='sg'] VP[NUM='sg'] *
107
+ >>> for tree in trees: print(tree)
108
+ (S[]
109
+ (NP[NUM='sg'] (PropN[NUM='sg'] Kim))
110
+ (VP[NUM='sg', TENSE='pres']
111
+ (TV[NUM='sg', TENSE='pres'] likes)
112
+ (NP[NUM='pl'] (N[NUM='pl'] children))))
113
+
114
+ The parser works directly with
115
+ the underspecified productions given by the grammar. That is, the
116
+ Predictor rule does not attempt to compile out all admissible feature
117
+ combinations before trying to expand the non-terminals on the left hand
118
+ side of a production. However, when the Scanner matches an input word
119
+ against a lexical production that has been predicted, the new edge will
120
+ typically contain fully specified features; e.g., the edge
121
+ [PropN[`num`:feat: = `sg`:fval:] |rarr| 'Kim', (0, 1)]. Recall from
122
+ Chapter 8 that the Fundamental (or Completer) Rule in
123
+ standard CFGs is used to combine an incomplete edge that's expecting a
124
+ nonterminal *B* with a following, complete edge whose left hand side
125
+ matches *B*. In our current setting, rather than checking for a
126
+ complete match, we test whether the expected category *B* will
127
+ unify with the left hand side *B'* of a following complete
128
+ edge. We will explain in more detail in Section 9.2 how
129
+ unification works; for the moment, it is enough to know that as a
130
+ result of unification, any variable values of features in *B* will be
131
+ instantiated by constant values in the corresponding feature structure
132
+ in *B'*, and these instantiated values will be used in the new edge
133
+ added by the Completer. This instantiation can be seen, for example,
134
+ in the edge
135
+ [NP [`num`:feat:\ =\ `sg`:fval:] |rarr| PropN[`num`:feat:\ =\ `sg`:fval:] |dot|, (0, 1)]
136
+ in Example 9.2, where the feature `num`:feat: has been assigned the value `sg`:fval:.
137
+
138
+ Feature structures in NLTK are ... Atomic feature values can be strings or
139
+ integers.
140
+
141
+ >>> fs1 = nltk.FeatStruct(TENSE='past', NUM='sg')
142
+ >>> print(fs1)
143
+ [ NUM = 'sg' ]
144
+ [ TENSE = 'past' ]
145
+
146
+ We can think of a feature structure as being like a Python dictionary,
147
+ and access its values by indexing in the usual way.
148
+
149
+ >>> fs1 = nltk.FeatStruct(PER=3, NUM='pl', GND='fem')
150
+ >>> print(fs1['GND'])
151
+ fem
152
+
153
+ We can also define feature structures which have complex values, as
154
+ discussed earlier.
155
+
156
+ >>> fs2 = nltk.FeatStruct(POS='N', AGR=fs1)
157
+ >>> print(fs2)
158
+ [ [ GND = 'fem' ] ]
159
+ [ AGR = [ NUM = 'pl' ] ]
160
+ [ [ PER = 3 ] ]
161
+ [ ]
162
+ [ POS = 'N' ]
163
+ >>> print(fs2['AGR'])
164
+ [ GND = 'fem' ]
165
+ [ NUM = 'pl' ]
166
+ [ PER = 3 ]
167
+ >>> print(fs2['AGR']['PER'])
168
+ 3
169
+
170
+ Feature structures can also be constructed using the ``parse()``
171
+ method of the ``nltk.FeatStruct`` class. Note that in this case, atomic
172
+ feature values do not need to be enclosed in quotes.
173
+
174
+ >>> f1 = nltk.FeatStruct("[NUMBER = sg]")
175
+ >>> f2 = nltk.FeatStruct("[PERSON = 3]")
176
+ >>> print(nltk.unify(f1, f2))
177
+ [ NUMBER = 'sg' ]
178
+ [ PERSON = 3 ]
179
+
180
+ >>> f1 = nltk.FeatStruct("[A = [B = b, D = d]]")
181
+ >>> f2 = nltk.FeatStruct("[A = [C = c, D = d]]")
182
+ >>> print(nltk.unify(f1, f2))
183
+ [ [ B = 'b' ] ]
184
+ [ A = [ C = 'c' ] ]
185
+ [ [ D = 'd' ] ]
186
+
187
+
188
+ Feature Structures as Graphs
189
+ ----------------------------
190
+
191
+ Feature structures are not inherently tied to linguistic objects; they are
192
+ general purpose structures for representing knowledge. For example, we
193
+ could encode information about a person in a feature structure:
194
+
195
+ >>> person01 = nltk.FeatStruct("[NAME=Lee, TELNO='01 27 86 42 96',AGE=33]")
196
+ >>> print(person01)
197
+ [ AGE = 33 ]
198
+ [ NAME = 'Lee' ]
199
+ [ TELNO = '01 27 86 42 96' ]
200
+
201
+ There are a number of notations for representing reentrancy in
202
+ matrix-style representations of feature structures. In NLTK, we adopt
203
+ the following convention: the first occurrence of a shared feature structure
204
+ is prefixed with an integer in parentheses, such as ``(1)``, and any
205
+ subsequent reference to that structure uses the notation
206
+ ``->(1)``, as shown below.
207
+
208
+
209
+ >>> fs = nltk.FeatStruct("""[NAME=Lee, ADDRESS=(1)[NUMBER=74, STREET='rue Pascal'],
210
+ ... SPOUSE=[NAME=Kim, ADDRESS->(1)]]""")
211
+ >>> print(fs)
212
+ [ ADDRESS = (1) [ NUMBER = 74 ] ]
213
+ [ [ STREET = 'rue Pascal' ] ]
214
+ [ ]
215
+ [ NAME = 'Lee' ]
216
+ [ ]
217
+ [ SPOUSE = [ ADDRESS -> (1) ] ]
218
+ [ [ NAME = 'Kim' ] ]
219
+
220
+ There can be any number of tags within a single feature structure.
221
+
222
+ >>> fs3 = nltk.FeatStruct("[A=(1)[B=b], C=(2)[], D->(1), E->(2)]")
223
+ >>> print(fs3)
224
+ [ A = (1) [ B = 'b' ] ]
225
+ [ ]
226
+ [ C = (2) [] ]
227
+ [ ]
228
+ [ D -> (1) ]
229
+ [ E -> (2) ]
230
+ >>> fs1 = nltk.FeatStruct(NUMBER=74, STREET='rue Pascal')
231
+ >>> fs2 = nltk.FeatStruct(CITY='Paris')
232
+ >>> print(nltk.unify(fs1, fs2))
233
+ [ CITY = 'Paris' ]
234
+ [ NUMBER = 74 ]
235
+ [ STREET = 'rue Pascal' ]
236
+
237
+ Unification is symmetric:
238
+
239
+ >>> nltk.unify(fs1, fs2) == nltk.unify(fs2, fs1)
240
+ True
241
+
242
+ Unification is commutative:
243
+
244
+ >>> fs3 = nltk.FeatStruct(TELNO='01 27 86 42 96')
245
+ >>> nltk.unify(nltk.unify(fs1, fs2), fs3) == nltk.unify(fs1, nltk.unify(fs2, fs3))
246
+ True
247
+
248
+ Unification between *FS*:math:`_0` and *FS*:math:`_1` will fail if the
249
+ two feature structures share a path |pi|,
250
+ but the value of |pi| in *FS*:math:`_0` is a distinct
251
+ atom from the value of |pi| in *FS*:math:`_1`. In NLTK,
252
+ this is implemented by setting the result of unification to be
253
+ ``None``.
254
+
255
+ >>> fs0 = nltk.FeatStruct(A='a')
256
+ >>> fs1 = nltk.FeatStruct(A='b')
257
+ >>> print(nltk.unify(fs0, fs1))
258
+ None
259
+
260
+ Now, if we look at how unification interacts with structure-sharing,
261
+ things become really interesting.
262
+
263
+
264
+
265
+ >>> fs0 = nltk.FeatStruct("""[NAME=Lee,
266
+ ... ADDRESS=[NUMBER=74,
267
+ ... STREET='rue Pascal'],
268
+ ... SPOUSE= [NAME=Kim,
269
+ ... ADDRESS=[NUMBER=74,
270
+ ... STREET='rue Pascal']]]""")
271
+ >>> print(fs0)
272
+ [ ADDRESS = [ NUMBER = 74 ] ]
273
+ [ [ STREET = 'rue Pascal' ] ]
274
+ [ ]
275
+ [ NAME = 'Lee' ]
276
+ [ ]
277
+ [ [ ADDRESS = [ NUMBER = 74 ] ] ]
278
+ [ SPOUSE = [ [ STREET = 'rue Pascal' ] ] ]
279
+ [ [ ] ]
280
+ [ [ NAME = 'Kim' ] ]
281
+
282
+
283
+ >>> fs1 = nltk.FeatStruct("[SPOUSE=[ADDRESS=[CITY=Paris]]]")
284
+ >>> print(nltk.unify(fs0, fs1))
285
+ [ ADDRESS = [ NUMBER = 74 ] ]
286
+ [ [ STREET = 'rue Pascal' ] ]
287
+ [ ]
288
+ [ NAME = 'Lee' ]
289
+ [ ]
290
+ [ [ [ CITY = 'Paris' ] ] ]
291
+ [ [ ADDRESS = [ NUMBER = 74 ] ] ]
292
+ [ SPOUSE = [ [ STREET = 'rue Pascal' ] ] ]
293
+ [ [ ] ]
294
+ [ [ NAME = 'Kim' ] ]
295
+
296
+ >>> fs2 = nltk.FeatStruct("""[NAME=Lee, ADDRESS=(1)[NUMBER=74, STREET='rue Pascal'],
297
+ ... SPOUSE=[NAME=Kim, ADDRESS->(1)]]""")
298
+
299
+
300
+ >>> print(fs2)
301
+ [ ADDRESS = (1) [ NUMBER = 74 ] ]
302
+ [ [ STREET = 'rue Pascal' ] ]
303
+ [ ]
304
+ [ NAME = 'Lee' ]
305
+ [ ]
306
+ [ SPOUSE = [ ADDRESS -> (1) ] ]
307
+ [ [ NAME = 'Kim' ] ]
308
+
309
+
310
+ >>> print(nltk.unify(fs2, fs1))
311
+ [ [ CITY = 'Paris' ] ]
312
+ [ ADDRESS = (1) [ NUMBER = 74 ] ]
313
+ [ [ STREET = 'rue Pascal' ] ]
314
+ [ ]
315
+ [ NAME = 'Lee' ]
316
+ [ ]
317
+ [ SPOUSE = [ ADDRESS -> (1) ] ]
318
+ [ [ NAME = 'Kim' ] ]
319
+
320
+
321
+ >>> fs1 = nltk.FeatStruct("[ADDRESS1=[NUMBER=74, STREET='rue Pascal']]")
322
+ >>> fs2 = nltk.FeatStruct("[ADDRESS1=?x, ADDRESS2=?x]")
323
+ >>> print(fs2)
324
+ [ ADDRESS1 = ?x ]
325
+ [ ADDRESS2 = ?x ]
326
+ >>> print(nltk.unify(fs1, fs2))
327
+ [ ADDRESS1 = (1) [ NUMBER = 74 ] ]
328
+ [ [ STREET = 'rue Pascal' ] ]
329
+ [ ]
330
+ [ ADDRESS2 -> (1) ]
331
+
332
+
333
+
334
+
335
+ >>> sent = 'who do you claim that you like'
336
+ >>> tokens = sent.split()
337
+ >>> cp = parse.load_parser('grammars/book_grammars/feat1.fcfg', trace=1)
338
+ >>> trees = cp.parse(tokens)
339
+ |.w.d.y.c.t.y.l.|
340
+ |[-] . . . . . .| [0:1] 'who'
341
+ |. [-] . . . . .| [1:2] 'do'
342
+ |. . [-] . . . .| [2:3] 'you'
343
+ |. . . [-] . . .| [3:4] 'claim'
344
+ |. . . . [-] . .| [4:5] 'that'
345
+ |. . . . . [-] .| [5:6] 'you'
346
+ |. . . . . . [-]| [6:7] 'like'
347
+ |# . . . . . . .| [0:0] NP[]/NP[] -> *
348
+ |. # . . . . . .| [1:1] NP[]/NP[] -> *
349
+ |. . # . . . . .| [2:2] NP[]/NP[] -> *
350
+ |. . . # . . . .| [3:3] NP[]/NP[] -> *
351
+ |. . . . # . . .| [4:4] NP[]/NP[] -> *
352
+ |. . . . . # . .| [5:5] NP[]/NP[] -> *
353
+ |. . . . . . # .| [6:6] NP[]/NP[] -> *
354
+ |. . . . . . . #| [7:7] NP[]/NP[] -> *
355
+ |[-] . . . . . .| [0:1] NP[+WH] -> 'who' *
356
+ |[-> . . . . . .| [0:1] S[-INV] -> NP[] * VP[] {}
357
+ |[-> . . . . . .| [0:1] S[-INV]/?x[] -> NP[] * VP[]/?x[] {}
358
+ |[-> . . . . . .| [0:1] S[-INV] -> NP[] * S[]/NP[] {}
359
+ |. [-] . . . . .| [1:2] V[+AUX] -> 'do' *
360
+ |. [-> . . . . .| [1:2] S[+INV] -> V[+AUX] * NP[] VP[] {}
361
+ |. [-> . . . . .| [1:2] S[+INV]/?x[] -> V[+AUX] * NP[] VP[]/?x[] {}
362
+ |. [-> . . . . .| [1:2] VP[] -> V[+AUX] * VP[] {}
363
+ |. [-> . . . . .| [1:2] VP[]/?x[] -> V[+AUX] * VP[]/?x[] {}
364
+ |. . [-] . . . .| [2:3] NP[-WH] -> 'you' *
365
+ |. . [-> . . . .| [2:3] S[-INV] -> NP[] * VP[] {}
366
+ |. . [-> . . . .| [2:3] S[-INV]/?x[] -> NP[] * VP[]/?x[] {}
367
+ |. . [-> . . . .| [2:3] S[-INV] -> NP[] * S[]/NP[] {}
368
+ |. [---> . . . .| [1:3] S[+INV] -> V[+AUX] NP[] * VP[] {}
369
+ |. [---> . . . .| [1:3] S[+INV]/?x[] -> V[+AUX] NP[] * VP[]/?x[] {}
370
+ |. . . [-] . . .| [3:4] V[-AUX, SUBCAT='clause'] -> 'claim' *
371
+ |. . . [-> . . .| [3:4] VP[] -> V[-AUX, SUBCAT='clause'] * SBar[] {}
372
+ |. . . [-> . . .| [3:4] VP[]/?x[] -> V[-AUX, SUBCAT='clause'] * SBar[]/?x[] {}
373
+ |. . . . [-] . .| [4:5] Comp[] -> 'that' *
374
+ |. . . . [-> . .| [4:5] SBar[] -> Comp[] * S[-INV] {}
375
+ |. . . . [-> . .| [4:5] SBar[]/?x[] -> Comp[] * S[-INV]/?x[] {}
376
+ |. . . . . [-] .| [5:6] NP[-WH] -> 'you' *
377
+ |. . . . . [-> .| [5:6] S[-INV] -> NP[] * VP[] {}
378
+ |. . . . . [-> .| [5:6] S[-INV]/?x[] -> NP[] * VP[]/?x[] {}
379
+ |. . . . . [-> .| [5:6] S[-INV] -> NP[] * S[]/NP[] {}
380
+ |. . . . . . [-]| [6:7] V[-AUX, SUBCAT='trans'] -> 'like' *
381
+ |. . . . . . [->| [6:7] VP[] -> V[-AUX, SUBCAT='trans'] * NP[] {}
382
+ |. . . . . . [->| [6:7] VP[]/?x[] -> V[-AUX, SUBCAT='trans'] * NP[]/?x[] {}
383
+ |. . . . . . [-]| [6:7] VP[]/NP[] -> V[-AUX, SUBCAT='trans'] NP[]/NP[] *
384
+ |. . . . . [---]| [5:7] S[-INV]/NP[] -> NP[] VP[]/NP[] *
385
+ |. . . . [-----]| [4:7] SBar[]/NP[] -> Comp[] S[-INV]/NP[] *
386
+ |. . . [-------]| [3:7] VP[]/NP[] -> V[-AUX, SUBCAT='clause'] SBar[]/NP[] *
387
+ |. . [---------]| [2:7] S[-INV]/NP[] -> NP[] VP[]/NP[] *
388
+ |. [-----------]| [1:7] S[+INV]/NP[] -> V[+AUX] NP[] VP[]/NP[] *
389
+ |[=============]| [0:7] S[-INV] -> NP[] S[]/NP[] *
390
+
391
+ >>> trees = list(trees)
392
+ >>> for tree in trees: print(tree)
393
+ (S[-INV]
394
+ (NP[+WH] who)
395
+ (S[+INV]/NP[]
396
+ (V[+AUX] do)
397
+ (NP[-WH] you)
398
+ (VP[]/NP[]
399
+ (V[-AUX, SUBCAT='clause'] claim)
400
+ (SBar[]/NP[]
401
+ (Comp[] that)
402
+ (S[-INV]/NP[]
403
+ (NP[-WH] you)
404
+ (VP[]/NP[] (V[-AUX, SUBCAT='trans'] like) (NP[]/NP[] )))))))
405
+
406
+ A different parser should give the same parse trees, but perhaps in a different order:
407
+
408
+ >>> cp2 = parse.load_parser('grammars/book_grammars/feat1.fcfg', trace=1,
409
+ ... parser=parse.FeatureEarleyChartParser)
410
+ >>> trees2 = cp2.parse(tokens)
411
+ |.w.d.y.c.t.y.l.|
412
+ |[-] . . . . . .| [0:1] 'who'
413
+ |. [-] . . . . .| [1:2] 'do'
414
+ |. . [-] . . . .| [2:3] 'you'
415
+ |. . . [-] . . .| [3:4] 'claim'
416
+ |. . . . [-] . .| [4:5] 'that'
417
+ |. . . . . [-] .| [5:6] 'you'
418
+ |. . . . . . [-]| [6:7] 'like'
419
+ |> . . . . . . .| [0:0] S[-INV] -> * NP[] VP[] {}
420
+ |> . . . . . . .| [0:0] S[-INV]/?x[] -> * NP[] VP[]/?x[] {}
421
+ |> . . . . . . .| [0:0] S[-INV] -> * NP[] S[]/NP[] {}
422
+ |> . . . . . . .| [0:0] S[-INV] -> * Adv[+NEG] S[+INV] {}
423
+ |> . . . . . . .| [0:0] S[+INV] -> * V[+AUX] NP[] VP[] {}
424
+ |> . . . . . . .| [0:0] S[+INV]/?x[] -> * V[+AUX] NP[] VP[]/?x[] {}
425
+ |> . . . . . . .| [0:0] NP[+WH] -> * 'who' {}
426
+ |[-] . . . . . .| [0:1] NP[+WH] -> 'who' *
427
+ |[-> . . . . . .| [0:1] S[-INV] -> NP[] * VP[] {}
428
+ |[-> . . . . . .| [0:1] S[-INV]/?x[] -> NP[] * VP[]/?x[] {}
429
+ |[-> . . . . . .| [0:1] S[-INV] -> NP[] * S[]/NP[] {}
430
+ |. > . . . . . .| [1:1] S[-INV]/?x[] -> * NP[] VP[]/?x[] {}
431
+ |. > . . . . . .| [1:1] S[+INV]/?x[] -> * V[+AUX] NP[] VP[]/?x[] {}
432
+ |. > . . . . . .| [1:1] V[+AUX] -> * 'do' {}
433
+ |. > . . . . . .| [1:1] VP[]/?x[] -> * V[-AUX, SUBCAT='trans'] NP[]/?x[] {}
434
+ |. > . . . . . .| [1:1] VP[]/?x[] -> * V[-AUX, SUBCAT='clause'] SBar[]/?x[] {}
435
+ |. > . . . . . .| [1:1] VP[]/?x[] -> * V[+AUX] VP[]/?x[] {}
436
+ |. > . . . . . .| [1:1] VP[] -> * V[-AUX, SUBCAT='intrans'] {}
437
+ |. > . . . . . .| [1:1] VP[] -> * V[-AUX, SUBCAT='trans'] NP[] {}
438
+ |. > . . . . . .| [1:1] VP[] -> * V[-AUX, SUBCAT='clause'] SBar[] {}
439
+ |. > . . . . . .| [1:1] VP[] -> * V[+AUX] VP[] {}
440
+ |. [-] . . . . .| [1:2] V[+AUX] -> 'do' *
441
+ |. [-> . . . . .| [1:2] S[+INV]/?x[] -> V[+AUX] * NP[] VP[]/?x[] {}
442
+ |. [-> . . . . .| [1:2] VP[]/?x[] -> V[+AUX] * VP[]/?x[] {}
443
+ |. [-> . . . . .| [1:2] VP[] -> V[+AUX] * VP[] {}
444
+ |. . > . . . . .| [2:2] VP[] -> * V[-AUX, SUBCAT='intrans'] {}
445
+ |. . > . . . . .| [2:2] VP[] -> * V[-AUX, SUBCAT='trans'] NP[] {}
446
+ |. . > . . . . .| [2:2] VP[] -> * V[-AUX, SUBCAT='clause'] SBar[] {}
447
+ |. . > . . . . .| [2:2] VP[] -> * V[+AUX] VP[] {}
448
+ |. . > . . . . .| [2:2] VP[]/?x[] -> * V[-AUX, SUBCAT='trans'] NP[]/?x[] {}
449
+ |. . > . . . . .| [2:2] VP[]/?x[] -> * V[-AUX, SUBCAT='clause'] SBar[]/?x[] {}
450
+ |. . > . . . . .| [2:2] VP[]/?x[] -> * V[+AUX] VP[]/?x[] {}
451
+ |. . > . . . . .| [2:2] NP[-WH] -> * 'you' {}
452
+ |. . [-] . . . .| [2:3] NP[-WH] -> 'you' *
453
+ |. [---> . . . .| [1:3] S[+INV]/?x[] -> V[+AUX] NP[] * VP[]/?x[] {}
454
+ |. . . > . . . .| [3:3] VP[]/?x[] -> * V[-AUX, SUBCAT='trans'] NP[]/?x[] {}
455
+ |. . . > . . . .| [3:3] VP[]/?x[] -> * V[-AUX, SUBCAT='clause'] SBar[]/?x[] {}
456
+ |. . . > . . . .| [3:3] VP[]/?x[] -> * V[+AUX] VP[]/?x[] {}
457
+ |. . . > . . . .| [3:3] V[-AUX, SUBCAT='clause'] -> * 'claim' {}
458
+ |. . . [-] . . .| [3:4] V[-AUX, SUBCAT='clause'] -> 'claim' *
459
+ |. . . [-> . . .| [3:4] VP[]/?x[] -> V[-AUX, SUBCAT='clause'] * SBar[]/?x[] {}
460
+ |. . . . > . . .| [4:4] SBar[]/?x[] -> * Comp[] S[-INV]/?x[] {}
461
+ |. . . . > . . .| [4:4] Comp[] -> * 'that' {}
462
+ |. . . . [-] . .| [4:5] Comp[] -> 'that' *
463
+ |. . . . [-> . .| [4:5] SBar[]/?x[] -> Comp[] * S[-INV]/?x[] {}
464
+ |. . . . . > . .| [5:5] S[-INV]/?x[] -> * NP[] VP[]/?x[] {}
465
+ |. . . . . > . .| [5:5] NP[-WH] -> * 'you' {}
466
+ |. . . . . [-] .| [5:6] NP[-WH] -> 'you' *
467
+ |. . . . . [-> .| [5:6] S[-INV]/?x[] -> NP[] * VP[]/?x[] {}
468
+ |. . . . . . > .| [6:6] VP[]/?x[] -> * V[-AUX, SUBCAT='trans'] NP[]/?x[] {}
469
+ |. . . . . . > .| [6:6] VP[]/?x[] -> * V[-AUX, SUBCAT='clause'] SBar[]/?x[] {}
470
+ |. . . . . . > .| [6:6] VP[]/?x[] -> * V[+AUX] VP[]/?x[] {}
471
+ |. . . . . . > .| [6:6] V[-AUX, SUBCAT='trans'] -> * 'like' {}
472
+ |. . . . . . [-]| [6:7] V[-AUX, SUBCAT='trans'] -> 'like' *
473
+ |. . . . . . [->| [6:7] VP[]/?x[] -> V[-AUX, SUBCAT='trans'] * NP[]/?x[] {}
474
+ |. . . . . . . #| [7:7] NP[]/NP[] -> *
475
+ |. . . . . . [-]| [6:7] VP[]/NP[] -> V[-AUX, SUBCAT='trans'] NP[]/NP[] *
476
+ |. . . . . [---]| [5:7] S[-INV]/NP[] -> NP[] VP[]/NP[] *
477
+ |. . . . [-----]| [4:7] SBar[]/NP[] -> Comp[] S[-INV]/NP[] *
478
+ |. . . [-------]| [3:7] VP[]/NP[] -> V[-AUX, SUBCAT='clause'] SBar[]/NP[] *
479
+ |. [-----------]| [1:7] S[+INV]/NP[] -> V[+AUX] NP[] VP[]/NP[] *
480
+ |[=============]| [0:7] S[-INV] -> NP[] S[]/NP[] *
481
+
482
+ >>> sorted(trees) == sorted(trees2)
483
+ True
484
+
485
+
486
+ Let's load a German grammar:
487
+
488
+ >>> cp = parse.load_parser('grammars/book_grammars/german.fcfg', trace=0)
489
+ >>> sent = 'die Katze sieht den Hund'
490
+ >>> tokens = sent.split()
491
+ >>> trees = cp.parse(tokens)
492
+ >>> for tree in trees: print(tree)
493
+ (S[]
494
+ (NP[AGR=[GND='fem', NUM='sg', PER=3], CASE='nom']
495
+ (Det[AGR=[GND='fem', NUM='sg', PER=3], CASE='nom'] die)
496
+ (N[AGR=[GND='fem', NUM='sg', PER=3]] Katze))
497
+ (VP[AGR=[NUM='sg', PER=3]]
498
+ (TV[AGR=[NUM='sg', PER=3], OBJCASE='acc'] sieht)
499
+ (NP[AGR=[GND='masc', NUM='sg', PER=3], CASE='acc']
500
+ (Det[AGR=[GND='masc', NUM='sg', PER=3], CASE='acc'] den)
501
+ (N[AGR=[GND='masc', NUM='sg', PER=3]] Hund))))
502
+
503
+ Grammar with Binding Operators
504
+ ------------------------------
505
+ The bindop.fcfg grammar is a semantic grammar that uses lambda
506
+ calculus. Each element has a core semantics, which is a single lambda
507
+ calculus expression; and a set of binding operators, which bind
508
+ variables.
509
+
510
+ In order to make the binding operators work right, they need to
511
+ instantiate their bound variable every time they are added to the
512
+ chart. To do this, we use a special subclass of `Chart`, called
513
+ `InstantiateVarsChart`.
514
+
515
+ >>> from nltk.parse.featurechart import InstantiateVarsChart
516
+ >>> cp = parse.load_parser('grammars/sample_grammars/bindop.fcfg', trace=1,
517
+ ... chart_class=InstantiateVarsChart)
518
+ >>> print(cp.grammar())
519
+ Grammar with 15 productions (start state = S[])
520
+ S[SEM=[BO={?b1+?b2}, CORE=<?vp(?subj)>]] -> NP[SEM=[BO=?b1, CORE=?subj]] VP[SEM=[BO=?b2, CORE=?vp]]
521
+ VP[SEM=[BO={?b1+?b2}, CORE=<?v(?obj)>]] -> TV[SEM=[BO=?b1, CORE=?v]] NP[SEM=[BO=?b2, CORE=?obj]]
522
+ VP[SEM=?s] -> IV[SEM=?s]
523
+ NP[SEM=[BO={?b1+?b2+{bo(?det(?n),@x)}}, CORE=<@x>]] -> Det[SEM=[BO=?b1, CORE=?det]] N[SEM=[BO=?b2, CORE=?n]]
524
+ Det[SEM=[BO={/}, CORE=<\Q P.exists x.(Q(x) & P(x))>]] -> 'a'
525
+ N[SEM=[BO={/}, CORE=<dog>]] -> 'dog'
526
+ N[SEM=[BO={/}, CORE=<dog>]] -> 'cat'
527
+ N[SEM=[BO={/}, CORE=<dog>]] -> 'mouse'
528
+ IV[SEM=[BO={/}, CORE=<\x.bark(x)>]] -> 'barks'
529
+ IV[SEM=[BO={/}, CORE=<\x.bark(x)>]] -> 'eats'
530
+ IV[SEM=[BO={/}, CORE=<\x.bark(x)>]] -> 'walks'
531
+ TV[SEM=[BO={/}, CORE=<\x y.feed(y,x)>]] -> 'feeds'
532
+ TV[SEM=[BO={/}, CORE=<\x y.feed(y,x)>]] -> 'walks'
533
+ NP[SEM=[BO={bo(\P.P(John),@x)}, CORE=<@x>]] -> 'john'
534
+ NP[SEM=[BO={bo(\P.P(John),@x)}, CORE=<@x>]] -> 'alex'
535
+
536
+ A simple intransitive sentence:
537
+
538
+ >>> from nltk.sem import logic
539
+ >>> logic._counter._value = 100
540
+
541
+ >>> trees = cp.parse('john barks'.split())
542
+ |. john.barks.|
543
+ |[-----] .| [0:1] 'john'
544
+ |. [-----]| [1:2] 'barks'
545
+ |[-----] .| [0:1] NP[SEM=[BO={bo(\P.P(John),z101)}, CORE=<z101>]] -> 'john' *
546
+ |[-----> .| [0:1] S[SEM=[BO={?b1+?b2}, CORE=<?vp(?subj)>]] -> NP[SEM=[BO=?b1, CORE=?subj]] * VP[SEM=[BO=?b2, CORE=?vp]] {?b1: {bo(\P.P(John),z2)}, ?subj: <IndividualVariableExpression z2>}
547
+ |. [-----]| [1:2] IV[SEM=[BO={/}, CORE=<\x.bark(x)>]] -> 'barks' *
548
+ |. [-----]| [1:2] VP[SEM=[BO={/}, CORE=<\x.bark(x)>]] -> IV[SEM=[BO={/}, CORE=<\x.bark(x)>]] *
549
+ |[===========]| [0:2] S[SEM=[BO={bo(\P.P(John),z2)}, CORE=<bark(z2)>]] -> NP[SEM=[BO={bo(\P.P(John),z2)}, CORE=<z2>]] VP[SEM=[BO={/}, CORE=<\x.bark(x)>]] *
550
+ >>> for tree in trees: print(tree)
551
+ (S[SEM=[BO={bo(\P.P(John),z2)}, CORE=<bark(z2)>]]
552
+ (NP[SEM=[BO={bo(\P.P(John),z101)}, CORE=<z101>]] john)
553
+ (VP[SEM=[BO={/}, CORE=<\x.bark(x)>]]
554
+ (IV[SEM=[BO={/}, CORE=<\x.bark(x)>]] barks)))
555
+
556
+ A transitive sentence:
557
+
558
+ >>> trees = cp.parse('john feeds a dog'.split())
559
+ |.joh.fee. a .dog.|
560
+ |[---] . . .| [0:1] 'john'
561
+ |. [---] . .| [1:2] 'feeds'
562
+ |. . [---] .| [2:3] 'a'
563
+ |. . . [---]| [3:4] 'dog'
564
+ |[---] . . .| [0:1] NP[SEM=[BO={bo(\P.P(John),z102)}, CORE=<z102>]] -> 'john' *
565
+ |[---> . . .| [0:1] S[SEM=[BO={?b1+?b2}, CORE=<?vp(?subj)>]] -> NP[SEM=[BO=?b1, CORE=?subj]] * VP[SEM=[BO=?b2, CORE=?vp]] {?b1: {bo(\P.P(John),z2)}, ?subj: <IndividualVariableExpression z2>}
566
+ |. [---] . .| [1:2] TV[SEM=[BO={/}, CORE=<\x y.feed(y,x)>]] -> 'feeds' *
567
+ |. [---> . .| [1:2] VP[SEM=[BO={?b1+?b2}, CORE=<?v(?obj)>]] -> TV[SEM=[BO=?b1, CORE=?v]] * NP[SEM=[BO=?b2, CORE=?obj]] {?b1: {/}, ?v: <LambdaExpression \x y.feed(y,x)>}
568
+ |. . [---] .| [2:3] Det[SEM=[BO={/}, CORE=<\Q P.exists x.(Q(x) & P(x))>]] -> 'a' *
569
+ |. . [---> .| [2:3] NP[SEM=[BO={?b1+?b2+{bo(?det(?n),@x)}}, CORE=<@x>]] -> Det[SEM=[BO=?b1, CORE=?det]] * N[SEM=[BO=?b2, CORE=?n]] {?b1: {/}, ?det: <LambdaExpression \Q P.exists x.(Q(x) & P(x))>}
570
+ |. . . [---]| [3:4] N[SEM=[BO={/}, CORE=<dog>]] -> 'dog' *
571
+ |. . [-------]| [2:4] NP[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z103)}, CORE=<z103>]] -> Det[SEM=[BO={/}, CORE=<\Q P.exists x.(Q(x) & P(x))>]] N[SEM=[BO={/}, CORE=<dog>]] *
572
+ |. . [------->| [2:4] S[SEM=[BO={?b1+?b2}, CORE=<?vp(?subj)>]] -> NP[SEM=[BO=?b1, CORE=?subj]] * VP[SEM=[BO=?b2, CORE=?vp]] {?b1: {bo(\P.exists x.(dog(x) & P(x)),z2)}, ?subj: <IndividualVariableExpression z2>}
573
+ |. [-----------]| [1:4] VP[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z2)}, CORE=<\y.feed(y,z2)>]] -> TV[SEM=[BO={/}, CORE=<\x y.feed(y,x)>]] NP[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z2)}, CORE=<z2>]] *
574
+ |[===============]| [0:4] S[SEM=[BO={bo(\P.P(John),z2), bo(\P.exists x.(dog(x) & P(x)),z3)}, CORE=<feed(z2,z3)>]] -> NP[SEM=[BO={bo(\P.P(John),z2)}, CORE=<z2>]] VP[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z3)}, CORE=<\y.feed(y,z3)>]] *
575
+
576
+ >>> for tree in trees: print(tree)
577
+ (S[SEM=[BO={bo(\P.P(John),z2), bo(\P.exists x.(dog(x) & P(x)),z3)}, CORE=<feed(z2,z3)>]]
578
+ (NP[SEM=[BO={bo(\P.P(John),z102)}, CORE=<z102>]] john)
579
+ (VP[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z2)}, CORE=<\y.feed(y,z2)>]]
580
+ (TV[SEM=[BO={/}, CORE=<\x y.feed(y,x)>]] feeds)
581
+ (NP[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z103)}, CORE=<z103>]]
582
+ (Det[SEM=[BO={/}, CORE=<\Q P.exists x.(Q(x) & P(x))>]] a)
583
+ (N[SEM=[BO={/}, CORE=<dog>]] dog))))
584
+
585
+ Turn down the verbosity:
586
+
587
+ >>> cp = parse.load_parser('grammars/sample_grammars/bindop.fcfg', trace=0,
588
+ ... chart_class=InstantiateVarsChart)
589
+
590
+ Reuse the same lexical item twice:
591
+
592
+ >>> trees = cp.parse('john feeds john'.split())
593
+ >>> for tree in trees: print(tree)
594
+ (S[SEM=[BO={bo(\P.P(John),z2), bo(\P.P(John),z3)}, CORE=<feed(z2,z3)>]]
595
+ (NP[SEM=[BO={bo(\P.P(John),z104)}, CORE=<z104>]] john)
596
+ (VP[SEM=[BO={bo(\P.P(John),z2)}, CORE=<\y.feed(y,z2)>]]
597
+ (TV[SEM=[BO={/}, CORE=<\x y.feed(y,x)>]] feeds)
598
+ (NP[SEM=[BO={bo(\P.P(John),z105)}, CORE=<z105>]] john)))
599
+
600
+ >>> trees = cp.parse('a dog feeds a dog'.split())
601
+ >>> for tree in trees: print(tree)
602
+ (S[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z2), bo(\P.exists x.(dog(x) & P(x)),z3)}, CORE=<feed(z2,z3)>]]
603
+ (NP[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z106)}, CORE=<z106>]]
604
+ (Det[SEM=[BO={/}, CORE=<\Q P.exists x.(Q(x) & P(x))>]] a)
605
+ (N[SEM=[BO={/}, CORE=<dog>]] dog))
606
+ (VP[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z2)}, CORE=<\y.feed(y,z2)>]]
607
+ (TV[SEM=[BO={/}, CORE=<\x y.feed(y,x)>]] feeds)
608
+ (NP[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z107)}, CORE=<z107>]]
609
+ (Det[SEM=[BO={/}, CORE=<\Q P.exists x.(Q(x) & P(x))>]] a)
610
+ (N[SEM=[BO={/}, CORE=<dog>]] dog))))
env-llmeval/lib/python3.10/site-packages/nltk/test/framenet.doctest ADDED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ ========
5
+ FrameNet
6
+ ========
7
+
8
+ The FrameNet corpus is a lexical database of English that is both human-
9
+ and machine-readable, based on annotating examples of how words are used
10
+ in actual texts. FrameNet is based on a theory of meaning called Frame
11
+ Semantics, deriving from the work of Charles J. Fillmore and colleagues.
12
+ The basic idea is straightforward: that the meanings of most words can
13
+ best be understood on the basis of a semantic frame: a description of a
14
+ type of event, relation, or entity and the participants in it. For
15
+ example, the concept of cooking typically involves a person doing the
16
+ cooking (Cook), the food that is to be cooked (Food), something to hold
17
+ the food while cooking (Container) and a source of heat
18
+ (Heating_instrument). In the FrameNet project, this is represented as a
19
+ frame called Apply_heat, and the Cook, Food, Heating_instrument and
20
+ Container are called frame elements (FEs). Words that evoke this frame,
21
+ such as fry, bake, boil, and broil, are called lexical units (LUs) of
22
+ the Apply_heat frame. The job of FrameNet is to define the frames
23
+ and to annotate sentences to show how the FEs fit syntactically around
24
+ the word that evokes the frame.
25
+
26
+ ------
27
+ Frames
28
+ ------
29
+
30
+ A Frame is a script-like conceptual structure that describes a
31
+ particular type of situation, object, or event along with the
32
+ participants and props that are needed for that Frame. For
33
+ example, the "Apply_heat" frame describes a common situation
34
+ involving a Cook, some Food, and a Heating_Instrument, and is
35
+ evoked by words such as bake, blanch, boil, broil, brown,
36
+ simmer, steam, etc.
37
+
38
+ We call the roles of a Frame "frame elements" (FEs) and the
39
+ frame-evoking words are called "lexical units" (LUs).
40
+
41
+ FrameNet includes relations between Frames. Several types of
42
+ relations are defined, of which the most important are:
43
+
44
+ - Inheritance: An IS-A relation. The child frame is a subtype
45
+ of the parent frame, and each FE in the parent is bound to
46
+ a corresponding FE in the child. An example is the
47
+ "Revenge" frame which inherits from the
48
+ "Rewards_and_punishments" frame.
49
+
50
+ - Using: The child frame presupposes the parent frame as
51
+ background, e.g the "Speed" frame "uses" (or presupposes)
52
+ the "Motion" frame; however, not all parent FEs need to be
53
+ bound to child FEs.
54
+
55
+ - Subframe: The child frame is a subevent of a complex event
56
+ represented by the parent, e.g. the "Criminal_process" frame
57
+ has subframes of "Arrest", "Arraignment", "Trial", and
58
+ "Sentencing".
59
+
60
+ - Perspective_on: The child frame provides a particular
61
+ perspective on an un-perspectivized parent frame. A pair of
62
+ examples consists of the "Hiring" and "Get_a_job" frames,
63
+ which perspectivize the "Employment_start" frame from the
64
+ Employer's and the Employee's point of view, respectively.
65
+
66
+ To get a list of all of the Frames in FrameNet, you can use the
67
+ `frames()` function. If you supply a regular expression pattern to the
68
+ `frames()` function, you will get a list of all Frames whose names match
69
+ that pattern:
70
+
71
+ >>> from pprint import pprint
72
+ >>> from operator import itemgetter
73
+ >>> from nltk.corpus import framenet as fn
74
+ >>> from nltk.corpus.reader.framenet import PrettyList
75
+ >>> x = fn.frames(r'(?i)crim')
76
+ >>> x.sort(key=itemgetter('ID'))
77
+ >>> x
78
+ [<frame ID=200 name=Criminal_process>, <frame ID=500 name=Criminal_investigation>, ...]
79
+ >>> PrettyList(sorted(x, key=itemgetter('ID')))
80
+ [<frame ID=200 name=Criminal_process>, <frame ID=500 name=Criminal_investigation>, ...]
81
+
82
+ To get the details of a particular Frame, you can use the `frame()`
83
+ function passing in the frame number:
84
+
85
+ >>> from pprint import pprint
86
+ >>> from nltk.corpus import framenet as fn
87
+ >>> f = fn.frame(202)
88
+ >>> f.ID
89
+ 202
90
+ >>> f.name
91
+ 'Arrest'
92
+ >>> f.definition
93
+ "Authorities charge a Suspect, who is under suspicion of having committed a crime..."
94
+ >>> len(f.lexUnit)
95
+ 11
96
+ >>> pprint(sorted([x for x in f.FE]))
97
+ ['Authorities',
98
+ 'Charges',
99
+ 'Co-participant',
100
+ 'Manner',
101
+ 'Means',
102
+ 'Offense',
103
+ 'Place',
104
+ 'Purpose',
105
+ 'Source_of_legal_authority',
106
+ 'Suspect',
107
+ 'Time',
108
+ 'Type']
109
+ >>> pprint(f.frameRelations)
110
+ [<Parent=Intentionally_affect -- Inheritance -> Child=Arrest>, <Complex=Criminal_process -- Subframe -> Component=Arrest>, ...]
111
+
112
+ The `frame()` function shown above returns a dict object containing
113
+ detailed information about the Frame. See the documentation on the
114
+ `frame()` function for the specifics.
115
+
116
+ You can also search for Frames by their Lexical Units (LUs). The
117
+ `frames_by_lemma()` function returns a list of all frames that contain
118
+ LUs in which the 'name' attribute of the LU matches the given regular
119
+ expression. Note that LU names are composed of "lemma.POS", where the
120
+ "lemma" part can be made up of either a single lexeme (e.g. 'run') or
121
+ multiple lexemes (e.g. 'a little') (see below).
122
+
123
+ >>> PrettyList(sorted(fn.frames_by_lemma(r'(?i)a little'), key=itemgetter('ID')))
124
+ [<frame ID=189 name=Quanti...>, <frame ID=2001 name=Degree>]
125
+
126
+ -------------
127
+ Lexical Units
128
+ -------------
129
+
130
+ A lexical unit (LU) is a pairing of a word with a meaning. For
131
+ example, the "Apply_heat" Frame describes a common situation
132
+ involving a Cook, some Food, and a Heating Instrument, and is
133
+ _evoked_ by words such as bake, blanch, boil, broil, brown,
134
+ simmer, steam, etc. These frame-evoking words are the LUs in the
135
+ Apply_heat frame. Each sense of a polysemous word is a different
136
+ LU.
137
+
138
+ We have used the word "word" in talking about LUs. The reality
139
+ is actually rather complex. When we say that the word "bake" is
140
+ polysemous, we mean that the lemma "bake.v" (which has the
141
+ word-forms "bake", "bakes", "baked", and "baking") is linked to
142
+ three different frames:
143
+
144
+ - Apply_heat: "Michelle baked the potatoes for 45 minutes."
145
+
146
+ - Cooking_creation: "Michelle baked her mother a cake for her birthday."
147
+
148
+ - Absorb_heat: "The potatoes have to bake for more than 30 minutes."
149
+
150
+ These constitute three different LUs, with different
151
+ definitions.
152
+
153
+ Multiword expressions such as "given name" and hyphenated words
154
+ like "shut-eye" can also be LUs. Idiomatic phrases such as
155
+ "middle of nowhere" and "give the slip (to)" are also defined as
156
+ LUs in the appropriate frames ("Isolated_places" and "Evading",
157
+ respectively), and their internal structure is not analyzed.
158
+
159
+ Framenet provides multiple annotated examples of each sense of a
160
+ word (i.e. each LU). Moreover, the set of examples
161
+ (approximately 20 per LU) illustrates all of the combinatorial
162
+ possibilities of the lexical unit.
163
+
164
+ Each LU is linked to a Frame, and hence to the other words which
165
+ evoke that Frame. This makes the FrameNet database similar to a
166
+ thesaurus, grouping together semantically similar words.
167
+
168
+ In the simplest case, frame-evoking words are verbs such as
169
+ "fried" in:
170
+
171
+ "Matilde fried the catfish in a heavy iron skillet."
172
+
173
+ Sometimes event nouns may evoke a Frame. For example,
174
+ "reduction" evokes "Cause_change_of_scalar_position" in:
175
+
176
+ "...the reduction of debt levels to $665 million from $2.6 billion."
177
+
178
+ Adjectives may also evoke a Frame. For example, "asleep" may
179
+ evoke the "Sleep" frame as in:
180
+
181
+ "They were asleep for hours."
182
+
183
+ Many common nouns, such as artifacts like "hat" or "tower",
184
+ typically serve as dependents rather than clearly evoking their
185
+ own frames.
186
+
187
+ Details for a specific lexical unit can be obtained using this class's
188
+ `lus()` function, which takes an optional regular expression
189
+ pattern that will be matched against the name of the lexical unit:
190
+
191
+ >>> from pprint import pprint
192
+ >>> PrettyList(sorted(fn.lus(r'(?i)a little'), key=itemgetter('ID')))
193
+ [<lu ID=14733 name=a little.n>, <lu ID=14743 name=a little.adv>, ...]
194
+
195
+ You can obtain detailed information on a particular LU by calling the
196
+ `lu()` function and passing in an LU's 'ID' number:
197
+
198
+ >>> from pprint import pprint
199
+ >>> from nltk.corpus import framenet as fn
200
+ >>> fn.lu(256).name
201
+ 'foresee.v'
202
+ >>> fn.lu(256).definition
203
+ 'COD: be aware of beforehand; predict.'
204
+ >>> fn.lu(256).frame.name
205
+ 'Expectation'
206
+ >>> fn.lu(256).lexemes[0].name
207
+ 'foresee'
208
+
209
+ Note that LU names take the form of a dotted string (e.g. "run.v" or "a
210
+ little.adv") in which a lemma precedes the "." and a part of speech
211
+ (POS) follows the dot. The lemma may be composed of a single lexeme
212
+ (e.g. "run") or of multiple lexemes (e.g. "a little"). The list of
213
+ POSs used in the LUs is:
214
+
215
+ v - verb
216
+ n - noun
217
+ a - adjective
218
+ adv - adverb
219
+ prep - preposition
220
+ num - numbers
221
+ intj - interjection
222
+ art - article
223
+ c - conjunction
224
+ scon - subordinating conjunction
225
+
226
+ For more detailed information about the info that is contained in the
227
+ dict that is returned by the `lu()` function, see the documentation on
228
+ the `lu()` function.
229
+
230
+ -------------------
231
+ Annotated Documents
232
+ -------------------
233
+
234
+ The FrameNet corpus contains a small set of annotated documents. A list
235
+ of these documents can be obtained by calling the `docs()` function:
236
+
237
+ >>> from pprint import pprint
238
+ >>> from nltk.corpus import framenet as fn
239
+ >>> d = fn.docs('BellRinging')[0]
240
+ >>> d.corpname
241
+ 'PropBank'
242
+ >>> d.sentence[49]
243
+ full-text sentence (...) in BellRinging:
244
+ <BLANKLINE>
245
+ <BLANKLINE>
246
+ [POS] 17 tags
247
+ <BLANKLINE>
248
+ [POS_tagset] PENN
249
+ <BLANKLINE>
250
+ [text] + [annotationSet]
251
+ <BLANKLINE>
252
+ `` I live in hopes that the ringers themselves will be drawn into
253
+ ***** ******* *****
254
+ Desir Cause_t Cause
255
+ [1] [3] [2]
256
+ <BLANKLINE>
257
+ that fuller life .
258
+ ******
259
+ Comple
260
+ [4]
261
+ (Desir=Desiring, Cause_t=Cause_to_make_noise, Cause=Cause_motion, Comple=Completeness)
262
+ <BLANKLINE>
263
+
264
+ >>> d.sentence[49].annotationSet[1]
265
+ annotation set (...):
266
+ <BLANKLINE>
267
+ [status] MANUAL
268
+ <BLANKLINE>
269
+ [LU] (6605) hope.n in Desiring
270
+ <BLANKLINE>
271
+ [frame] (366) Desiring
272
+ <BLANKLINE>
273
+ [GF] 2 relations
274
+ <BLANKLINE>
275
+ [PT] 2 phrases
276
+ <BLANKLINE>
277
+ [text] + [Target] + [FE] + [Noun]
278
+ <BLANKLINE>
279
+ `` I live in hopes that the ringers themselves will be drawn into
280
+ - ^^^^ ^^ ***** ----------------------------------------------
281
+ E supp su Event
282
+ <BLANKLINE>
283
+ that fuller life .
284
+ -----------------
285
+ <BLANKLINE>
286
+ (E=Experiencer, su=supp)
287
+ <BLANKLINE>
288
+ <BLANKLINE>
env-llmeval/lib/python3.10/site-packages/nltk/test/generate.doctest ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ ===============================================
5
+ Generating sentences from context-free grammars
6
+ ===============================================
7
+
8
+ An example grammar:
9
+
10
+ >>> from nltk.parse.generate import generate, demo_grammar
11
+ >>> from nltk import CFG
12
+ >>> grammar = CFG.fromstring(demo_grammar)
13
+ >>> print(grammar)
14
+ Grammar with 13 productions (start state = S)
15
+ S -> NP VP
16
+ NP -> Det N
17
+ PP -> P NP
18
+ VP -> 'slept'
19
+ VP -> 'saw' NP
20
+ VP -> 'walked' PP
21
+ Det -> 'the'
22
+ Det -> 'a'
23
+ N -> 'man'
24
+ N -> 'park'
25
+ N -> 'dog'
26
+ P -> 'in'
27
+ P -> 'with'
28
+
29
+ The first 10 generated sentences:
30
+
31
+ >>> for sentence in generate(grammar, n=10):
32
+ ... print(' '.join(sentence))
33
+ the man slept
34
+ the man saw the man
35
+ the man saw the park
36
+ the man saw the dog
37
+ the man saw a man
38
+ the man saw a park
39
+ the man saw a dog
40
+ the man walked in the man
41
+ the man walked in the park
42
+ the man walked in the dog
43
+
44
+ All sentences of max depth 4:
45
+
46
+ >>> for sentence in generate(grammar, depth=4):
47
+ ... print(' '.join(sentence))
48
+ the man slept
49
+ the park slept
50
+ the dog slept
51
+ a man slept
52
+ a park slept
53
+ a dog slept
54
+
55
+ The number of sentences of different max depths:
56
+
57
+ >>> len(list(generate(grammar, depth=3)))
58
+ 0
59
+ >>> len(list(generate(grammar, depth=4)))
60
+ 6
61
+ >>> len(list(generate(grammar, depth=5)))
62
+ 42
63
+ >>> len(list(generate(grammar, depth=6)))
64
+ 114
65
+ >>> len(list(generate(grammar)))
66
+ 114
67
+
68
+ Infinite grammars will throw a RecursionError when not bounded by some ``depth``:
69
+
70
+ >>> grammar = CFG.fromstring("""
71
+ ... S -> A B
72
+ ... A -> B
73
+ ... B -> "b" | A
74
+ ... """)
75
+ >>> list(generate(grammar))
76
+ Traceback (most recent call last):
77
+ ...
78
+ RuntimeError: The grammar has rule(s) that yield infinite recursion!
env-llmeval/lib/python3.10/site-packages/nltk/test/gensim.doctest ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ =======================================
5
+ Demonstrate word embedding using Gensim
6
+ =======================================
7
+
8
+ >>> from nltk.test.gensim_fixt import setup_module
9
+ >>> setup_module()
10
+
11
+ We demonstrate three functions:
12
+ - Train the word embeddings using brown corpus;
13
+ - Load the pre-trained model and perform simple tasks; and
14
+ - Pruning the pre-trained binary model.
15
+
16
+ >>> import gensim
17
+
18
+ ---------------
19
+ Train the model
20
+ ---------------
21
+
22
+ Here we train a word embedding using the Brown Corpus:
23
+
24
+ >>> from nltk.corpus import brown
25
+ >>> train_set = brown.sents()[:10000]
26
+ >>> model = gensim.models.Word2Vec(train_set)
27
+
28
+ It might take some time to train the model. So, after it is trained, it can be saved as follows:
29
+
30
+ >>> model.save('brown.embedding')
31
+ >>> new_model = gensim.models.Word2Vec.load('brown.embedding')
32
+
33
+ The model will be the list of words with their embedding. We can easily get the vector representation of a word.
34
+
35
+ >>> len(new_model.wv['university'])
36
+ 100
37
+
38
+ There are some supporting functions already implemented in Gensim to manipulate with word embeddings.
39
+ For example, to compute the cosine similarity between 2 words:
40
+
41
+ >>> new_model.wv.similarity('university','school') > 0.3
42
+ True
43
+
44
+ ---------------------------
45
+ Using the pre-trained model
46
+ ---------------------------
47
+
48
+ NLTK includes a pre-trained model which is part of a model that is trained on 100 billion words from the Google News Dataset.
49
+ The full model is from https://code.google.com/p/word2vec/ (about 3 GB).
50
+
51
+ >>> from nltk.data import find
52
+ >>> word2vec_sample = str(find('models/word2vec_sample/pruned.word2vec.txt'))
53
+ >>> model = gensim.models.KeyedVectors.load_word2vec_format(word2vec_sample, binary=False)
54
+
55
+ We pruned the model to only include the most common words (~44k words).
56
+
57
+ >>> len(model)
58
+ 43981
59
+
60
+ Each word is represented in the space of 300 dimensions:
61
+
62
+ >>> len(model['university'])
63
+ 300
64
+
65
+ Finding the top n words that are similar to a target word is simple. The result is the list of n words with the score.
66
+
67
+ >>> model.most_similar(positive=['university'], topn = 3)
68
+ [('universities', 0.70039...), ('faculty', 0.67809...), ('undergraduate', 0.65870...)]
69
+
70
+ Finding a word that is not in a list is also supported, although, implementing this by yourself is simple.
71
+
72
+ >>> model.doesnt_match('breakfast cereal dinner lunch'.split())
73
+ 'cereal'
74
+
75
+ Mikolov et al. (2013) figured out that word embedding captures much of syntactic and semantic regularities. For example,
76
+ the vector 'King - Man + Woman' is close to 'Queen' and 'Germany - Berlin + Paris' is close to 'France'.
77
+
78
+ >>> model.most_similar(positive=['woman','king'], negative=['man'], topn = 1)
79
+ [('queen', 0.71181...)]
80
+
81
+ >>> model.most_similar(positive=['Paris','Germany'], negative=['Berlin'], topn = 1)
82
+ [('France', 0.78840...)]
83
+
84
+ We can visualize the word embeddings using t-SNE (https://lvdmaaten.github.io/tsne/). For this demonstration, we visualize the first 1000 words.
85
+
86
+ | import numpy as np
87
+ | labels = []
88
+ | count = 0
89
+ | max_count = 1000
90
+ | X = np.zeros(shape=(max_count,len(model['university'])))
91
+ |
92
+ | for term in model.index_to_key:
93
+ | X[count] = model[term]
94
+ | labels.append(term)
95
+ | count+= 1
96
+ | if count >= max_count: break
97
+ |
98
+ | # It is recommended to use PCA first to reduce to ~50 dimensions
99
+ | from sklearn.decomposition import PCA
100
+ | pca = PCA(n_components=50)
101
+ | X_50 = pca.fit_transform(X)
102
+ |
103
+ | # Using TSNE to further reduce to 2 dimensions
104
+ | from sklearn.manifold import TSNE
105
+ | model_tsne = TSNE(n_components=2, random_state=0)
106
+ | Y = model_tsne.fit_transform(X_50)
107
+ |
108
+ | # Show the scatter plot
109
+ | import matplotlib.pyplot as plt
110
+ | plt.scatter(Y[:,0], Y[:,1], 20)
111
+ |
112
+ | # Add labels
113
+ | for label, x, y in zip(labels, Y[:, 0], Y[:, 1]):
114
+ | plt.annotate(label, xy = (x,y), xytext = (0, 0), textcoords = 'offset points', size = 10)
115
+ |
116
+ | plt.show()
117
+
118
+ ------------------------------
119
+ Prune the trained binary model
120
+ ------------------------------
121
+
122
+ Here is the supporting code to extract part of the binary model (GoogleNews-vectors-negative300.bin.gz) from https://code.google.com/p/word2vec/
123
+ We use this code to get the `word2vec_sample` model.
124
+
125
+ | import gensim
126
+ | # Load the binary model
127
+ | model = gensim.models.KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300.bin.gz', binary = True)
128
+ |
129
+ | # Only output word that appear in the Brown corpus
130
+ | from nltk.corpus import brown
131
+ | words = set(brown.words())
132
+ | print(len(words))
133
+ |
134
+ | # Output presented word to a temporary file
135
+ | out_file = 'pruned.word2vec.txt'
136
+ | with open(out_file,'w') as f:
137
+ | word_presented = words.intersection(model.index_to_key)
138
+ | f.write('{} {}\n'.format(len(word_presented),len(model['word'])))
139
+ |
140
+ | for word in word_presented:
141
+ | f.write('{} {}\n'.format(word, ' '.join(str(value) for value in model[word])))
env-llmeval/lib/python3.10/site-packages/nltk/test/gluesemantics.doctest ADDED
@@ -0,0 +1,383 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ ==============================================================================
5
+ Glue Semantics
6
+ ==============================================================================
7
+
8
+
9
+
10
+ ======================
11
+ Linear logic
12
+ ======================
13
+
14
+ >>> from nltk.sem import logic
15
+ >>> from nltk.sem.glue import *
16
+ >>> from nltk.sem.linearlogic import *
17
+
18
+ >>> from nltk.sem.linearlogic import Expression
19
+ >>> read_expr = Expression.fromstring
20
+
21
+ Parser
22
+
23
+ >>> print(read_expr(r'f'))
24
+ f
25
+ >>> print(read_expr(r'(g -o f)'))
26
+ (g -o f)
27
+ >>> print(read_expr(r'(g -o (h -o f))'))
28
+ (g -o (h -o f))
29
+ >>> print(read_expr(r'((g -o G) -o G)'))
30
+ ((g -o G) -o G)
31
+ >>> print(read_expr(r'(g -o f)(g)'))
32
+ (g -o f)(g)
33
+ >>> print(read_expr(r'((g -o G) -o G)((g -o f))'))
34
+ ((g -o G) -o G)((g -o f))
35
+
36
+ Simplify
37
+
38
+ >>> print(read_expr(r'f').simplify())
39
+ f
40
+ >>> print(read_expr(r'(g -o f)').simplify())
41
+ (g -o f)
42
+ >>> print(read_expr(r'((g -o G) -o G)').simplify())
43
+ ((g -o G) -o G)
44
+ >>> print(read_expr(r'(g -o f)(g)').simplify())
45
+ f
46
+ >>> try: read_expr(r'(g -o f)(f)').simplify()
47
+ ... except LinearLogicApplicationException as e: print(e)
48
+ ...
49
+ Cannot apply (g -o f) to f. Cannot unify g with f given {}
50
+ >>> print(read_expr(r'(G -o f)(g)').simplify())
51
+ f
52
+ >>> print(read_expr(r'((g -o G) -o G)((g -o f))').simplify())
53
+ f
54
+
55
+ Test BindingDict
56
+
57
+ >>> h = ConstantExpression('h')
58
+ >>> g = ConstantExpression('g')
59
+ >>> f = ConstantExpression('f')
60
+
61
+ >>> H = VariableExpression('H')
62
+ >>> G = VariableExpression('G')
63
+ >>> F = VariableExpression('F')
64
+
65
+ >>> d1 = BindingDict({H: h})
66
+ >>> d2 = BindingDict({F: f, G: F})
67
+ >>> d12 = d1 + d2
68
+ >>> all12 = ['%s: %s' % (v, d12[v]) for v in d12.d]
69
+ >>> all12.sort()
70
+ >>> print(all12)
71
+ ['F: f', 'G: f', 'H: h']
72
+
73
+ >>> BindingDict([(F,f),(G,g),(H,h)]) == BindingDict({F:f, G:g, H:h})
74
+ True
75
+
76
+ >>> d4 = BindingDict({F: f})
77
+ >>> try: d4[F] = g
78
+ ... except VariableBindingException as e: print(e)
79
+ Variable F already bound to another value
80
+
81
+ Test Unify
82
+
83
+ >>> try: f.unify(g, BindingDict())
84
+ ... except UnificationException as e: print(e)
85
+ ...
86
+ Cannot unify f with g given {}
87
+
88
+ >>> f.unify(G, BindingDict()) == BindingDict({G: f})
89
+ True
90
+ >>> try: f.unify(G, BindingDict({G: h}))
91
+ ... except UnificationException as e: print(e)
92
+ ...
93
+ Cannot unify f with G given {G: h}
94
+ >>> f.unify(G, BindingDict({G: f})) == BindingDict({G: f})
95
+ True
96
+ >>> f.unify(G, BindingDict({H: f})) == BindingDict({G: f, H: f})
97
+ True
98
+
99
+ >>> G.unify(f, BindingDict()) == BindingDict({G: f})
100
+ True
101
+ >>> try: G.unify(f, BindingDict({G: h}))
102
+ ... except UnificationException as e: print(e)
103
+ ...
104
+ Cannot unify G with f given {G: h}
105
+ >>> G.unify(f, BindingDict({G: f})) == BindingDict({G: f})
106
+ True
107
+ >>> G.unify(f, BindingDict({H: f})) == BindingDict({G: f, H: f})
108
+ True
109
+
110
+ >>> G.unify(F, BindingDict()) == BindingDict({G: F})
111
+ True
112
+ >>> try: G.unify(F, BindingDict({G: H}))
113
+ ... except UnificationException as e: print(e)
114
+ ...
115
+ Cannot unify G with F given {G: H}
116
+ >>> G.unify(F, BindingDict({G: F})) == BindingDict({G: F})
117
+ True
118
+ >>> G.unify(F, BindingDict({H: F})) == BindingDict({G: F, H: F})
119
+ True
120
+
121
+ Test Compile
122
+
123
+ >>> print(read_expr('g').compile_pos(Counter(), GlueFormula))
124
+ (<ConstantExpression g>, [])
125
+ >>> print(read_expr('(g -o f)').compile_pos(Counter(), GlueFormula))
126
+ (<ImpExpression (g -o f)>, [])
127
+ >>> print(read_expr('(g -o (h -o f))').compile_pos(Counter(), GlueFormula))
128
+ (<ImpExpression (g -o (h -o f))>, [])
129
+
130
+
131
+ ======================
132
+ Glue
133
+ ======================
134
+
135
+ Demo of "John walks"
136
+ --------------------
137
+
138
+ >>> john = GlueFormula("John", "g")
139
+ >>> print(john)
140
+ John : g
141
+ >>> walks = GlueFormula(r"\x.walks(x)", "(g -o f)")
142
+ >>> print(walks)
143
+ \x.walks(x) : (g -o f)
144
+ >>> print(walks.applyto(john))
145
+ \x.walks(x)(John) : (g -o f)(g)
146
+ >>> print(walks.applyto(john).simplify())
147
+ walks(John) : f
148
+
149
+
150
+ Demo of "A dog walks"
151
+ ---------------------
152
+
153
+ >>> a = GlueFormula("\\P Q.some x.(P(x) and Q(x))", "((gv -o gr) -o ((g -o G) -o G))")
154
+ >>> print(a)
155
+ \P Q.exists x.(P(x) & Q(x)) : ((gv -o gr) -o ((g -o G) -o G))
156
+ >>> man = GlueFormula(r"\x.man(x)", "(gv -o gr)")
157
+ >>> print(man)
158
+ \x.man(x) : (gv -o gr)
159
+ >>> walks = GlueFormula(r"\x.walks(x)", "(g -o f)")
160
+ >>> print(walks)
161
+ \x.walks(x) : (g -o f)
162
+ >>> a_man = a.applyto(man)
163
+ >>> print(a_man.simplify())
164
+ \Q.exists x.(man(x) & Q(x)) : ((g -o G) -o G)
165
+ >>> a_man_walks = a_man.applyto(walks)
166
+ >>> print(a_man_walks.simplify())
167
+ exists x.(man(x) & walks(x)) : f
168
+
169
+
170
+ Demo of 'every girl chases a dog'
171
+ ---------------------------------
172
+
173
+ Individual words:
174
+
175
+ >>> every = GlueFormula("\\P Q.all x.(P(x) -> Q(x))", "((gv -o gr) -o ((g -o G) -o G))")
176
+ >>> print(every)
177
+ \P Q.all x.(P(x) -> Q(x)) : ((gv -o gr) -o ((g -o G) -o G))
178
+ >>> girl = GlueFormula(r"\x.girl(x)", "(gv -o gr)")
179
+ >>> print(girl)
180
+ \x.girl(x) : (gv -o gr)
181
+ >>> chases = GlueFormula(r"\x y.chases(x,y)", "(g -o (h -o f))")
182
+ >>> print(chases)
183
+ \x y.chases(x,y) : (g -o (h -o f))
184
+ >>> a = GlueFormula("\\P Q.some x.(P(x) and Q(x))", "((hv -o hr) -o ((h -o H) -o H))")
185
+ >>> print(a)
186
+ \P Q.exists x.(P(x) & Q(x)) : ((hv -o hr) -o ((h -o H) -o H))
187
+ >>> dog = GlueFormula(r"\x.dog(x)", "(hv -o hr)")
188
+ >>> print(dog)
189
+ \x.dog(x) : (hv -o hr)
190
+
191
+ Noun Quantification can only be done one way:
192
+
193
+ >>> every_girl = every.applyto(girl)
194
+ >>> print(every_girl.simplify())
195
+ \Q.all x.(girl(x) -> Q(x)) : ((g -o G) -o G)
196
+ >>> a_dog = a.applyto(dog)
197
+ >>> print(a_dog.simplify())
198
+ \Q.exists x.(dog(x) & Q(x)) : ((h -o H) -o H)
199
+
200
+ The first reading is achieved by combining 'chases' with 'a dog' first.
201
+ Since 'a girl' requires something of the form '(h -o H)' we must
202
+ get rid of the 'g' in the glue of 'see'. We will do this with
203
+ the '-o elimination' rule. So, x1 will be our subject placeholder.
204
+
205
+ >>> xPrime = GlueFormula("x1", "g")
206
+ >>> print(xPrime)
207
+ x1 : g
208
+ >>> xPrime_chases = chases.applyto(xPrime)
209
+ >>> print(xPrime_chases.simplify())
210
+ \y.chases(x1,y) : (h -o f)
211
+ >>> xPrime_chases_a_dog = a_dog.applyto(xPrime_chases)
212
+ >>> print(xPrime_chases_a_dog.simplify())
213
+ exists x.(dog(x) & chases(x1,x)) : f
214
+
215
+ Now we can retract our subject placeholder using lambda-abstraction and
216
+ combine with the true subject.
217
+
218
+ >>> chases_a_dog = xPrime_chases_a_dog.lambda_abstract(xPrime)
219
+ >>> print(chases_a_dog.simplify())
220
+ \x1.exists x.(dog(x) & chases(x1,x)) : (g -o f)
221
+ >>> every_girl_chases_a_dog = every_girl.applyto(chases_a_dog)
222
+ >>> r1 = every_girl_chases_a_dog.simplify()
223
+ >>> r2 = GlueFormula(r'all x.(girl(x) -> exists z1.(dog(z1) & chases(x,z1)))', 'f')
224
+ >>> r1 == r2
225
+ True
226
+
227
+ The second reading is achieved by combining 'every girl' with 'chases' first.
228
+
229
+ >>> xPrime = GlueFormula("x1", "g")
230
+ >>> print(xPrime)
231
+ x1 : g
232
+ >>> xPrime_chases = chases.applyto(xPrime)
233
+ >>> print(xPrime_chases.simplify())
234
+ \y.chases(x1,y) : (h -o f)
235
+ >>> yPrime = GlueFormula("x2", "h")
236
+ >>> print(yPrime)
237
+ x2 : h
238
+ >>> xPrime_chases_yPrime = xPrime_chases.applyto(yPrime)
239
+ >>> print(xPrime_chases_yPrime.simplify())
240
+ chases(x1,x2) : f
241
+ >>> chases_yPrime = xPrime_chases_yPrime.lambda_abstract(xPrime)
242
+ >>> print(chases_yPrime.simplify())
243
+ \x1.chases(x1,x2) : (g -o f)
244
+ >>> every_girl_chases_yPrime = every_girl.applyto(chases_yPrime)
245
+ >>> print(every_girl_chases_yPrime.simplify())
246
+ all x.(girl(x) -> chases(x,x2)) : f
247
+ >>> every_girl_chases = every_girl_chases_yPrime.lambda_abstract(yPrime)
248
+ >>> print(every_girl_chases.simplify())
249
+ \x2.all x.(girl(x) -> chases(x,x2)) : (h -o f)
250
+ >>> every_girl_chases_a_dog = a_dog.applyto(every_girl_chases)
251
+ >>> r1 = every_girl_chases_a_dog.simplify()
252
+ >>> r2 = GlueFormula(r'exists x.(dog(x) & all z2.(girl(z2) -> chases(z2,x)))', 'f')
253
+ >>> r1 == r2
254
+ True
255
+
256
+
257
+ Compilation
258
+ -----------
259
+
260
+ >>> for cp in GlueFormula('m', '(b -o a)').compile(Counter()): print(cp)
261
+ m : (b -o a) : {1}
262
+ >>> for cp in GlueFormula('m', '((c -o b) -o a)').compile(Counter()): print(cp)
263
+ v1 : c : {1}
264
+ m : (b[1] -o a) : {2}
265
+ >>> for cp in GlueFormula('m', '((d -o (c -o b)) -o a)').compile(Counter()): print(cp)
266
+ v1 : c : {1}
267
+ v2 : d : {2}
268
+ m : (b[1, 2] -o a) : {3}
269
+ >>> for cp in GlueFormula('m', '((d -o e) -o ((c -o b) -o a))').compile(Counter()): print(cp)
270
+ v1 : d : {1}
271
+ v2 : c : {2}
272
+ m : (e[1] -o (b[2] -o a)) : {3}
273
+ >>> for cp in GlueFormula('m', '(((d -o c) -o b) -o a)').compile(Counter()): print(cp)
274
+ v1 : (d -o c) : {1}
275
+ m : (b[1] -o a) : {2}
276
+ >>> for cp in GlueFormula('m', '((((e -o d) -o c) -o b) -o a)').compile(Counter()): print(cp)
277
+ v1 : e : {1}
278
+ v2 : (d[1] -o c) : {2}
279
+ m : (b[2] -o a) : {3}
280
+
281
+
282
+ Demo of 'a man walks' using Compilation
283
+ ---------------------------------------
284
+
285
+ Premises
286
+
287
+ >>> a = GlueFormula('\\P Q.some x.(P(x) and Q(x))', '((gv -o gr) -o ((g -o G) -o G))')
288
+ >>> print(a)
289
+ \P Q.exists x.(P(x) & Q(x)) : ((gv -o gr) -o ((g -o G) -o G))
290
+
291
+ >>> man = GlueFormula('\\x.man(x)', '(gv -o gr)')
292
+ >>> print(man)
293
+ \x.man(x) : (gv -o gr)
294
+
295
+ >>> walks = GlueFormula('\\x.walks(x)', '(g -o f)')
296
+ >>> print(walks)
297
+ \x.walks(x) : (g -o f)
298
+
299
+ Compiled Premises:
300
+
301
+ >>> counter = Counter()
302
+ >>> ahc = a.compile(counter)
303
+ >>> g1 = ahc[0]
304
+ >>> print(g1)
305
+ v1 : gv : {1}
306
+ >>> g2 = ahc[1]
307
+ >>> print(g2)
308
+ v2 : g : {2}
309
+ >>> g3 = ahc[2]
310
+ >>> print(g3)
311
+ \P Q.exists x.(P(x) & Q(x)) : (gr[1] -o (G[2] -o G)) : {3}
312
+ >>> g4 = man.compile(counter)[0]
313
+ >>> print(g4)
314
+ \x.man(x) : (gv -o gr) : {4}
315
+ >>> g5 = walks.compile(counter)[0]
316
+ >>> print(g5)
317
+ \x.walks(x) : (g -o f) : {5}
318
+
319
+ Derivation:
320
+
321
+ >>> g14 = g4.applyto(g1)
322
+ >>> print(g14.simplify())
323
+ man(v1) : gr : {1, 4}
324
+ >>> g134 = g3.applyto(g14)
325
+ >>> print(g134.simplify())
326
+ \Q.exists x.(man(x) & Q(x)) : (G[2] -o G) : {1, 3, 4}
327
+ >>> g25 = g5.applyto(g2)
328
+ >>> print(g25.simplify())
329
+ walks(v2) : f : {2, 5}
330
+ >>> g12345 = g134.applyto(g25)
331
+ >>> print(g12345.simplify())
332
+ exists x.(man(x) & walks(x)) : f : {1, 2, 3, 4, 5}
333
+
334
+ ---------------------------------
335
+ Dependency Graph to Glue Formulas
336
+ ---------------------------------
337
+ >>> from nltk.corpus.reader.dependency import DependencyGraph
338
+
339
+ >>> depgraph = DependencyGraph("""1 John _ NNP NNP _ 2 SUBJ _ _
340
+ ... 2 sees _ VB VB _ 0 ROOT _ _
341
+ ... 3 a _ ex_quant ex_quant _ 4 SPEC _ _
342
+ ... 4 dog _ NN NN _ 2 OBJ _ _
343
+ ... """)
344
+ >>> gfl = GlueDict('nltk:grammars/sample_grammars/glue.semtype').to_glueformula_list(depgraph)
345
+ >>> print(gfl) # doctest: +SKIP
346
+ [\x y.sees(x,y) : (f -o (i -o g)),
347
+ \x.dog(x) : (iv -o ir),
348
+ \P Q.exists x.(P(x) & Q(x)) : ((iv -o ir) -o ((i -o I3) -o I3)),
349
+ \P Q.exists x.(P(x) & Q(x)) : ((fv -o fr) -o ((f -o F4) -o F4)),
350
+ \x.John(x) : (fv -o fr)]
351
+ >>> glue = Glue()
352
+ >>> for r in sorted([r.simplify().normalize() for r in glue.get_readings(glue.gfl_to_compiled(gfl))], key=str):
353
+ ... print(r)
354
+ exists z1.(John(z1) & exists z2.(dog(z2) & sees(z1,z2)))
355
+ exists z1.(dog(z1) & exists z2.(John(z2) & sees(z2,z1)))
356
+
357
+ -----------------------------------
358
+ Dependency Graph to LFG f-structure
359
+ -----------------------------------
360
+ >>> from nltk.sem.lfg import FStructure
361
+
362
+ >>> fstruct = FStructure.read_depgraph(depgraph)
363
+
364
+ >>> print(fstruct) # doctest: +SKIP
365
+ f:[pred 'sees'
366
+ obj h:[pred 'dog'
367
+ spec 'a']
368
+ subj g:[pred 'John']]
369
+
370
+ >>> fstruct.to_depgraph().tree().pprint()
371
+ (sees (dog a) John)
372
+
373
+ ---------------------------------
374
+ LFG f-structure to Glue
375
+ ---------------------------------
376
+ >>> fstruct.to_glueformula_list(GlueDict('nltk:grammars/sample_grammars/glue.semtype')) # doctest: +SKIP
377
+ [\x y.sees(x,y) : (i -o (g -o f)),
378
+ \x.dog(x) : (gv -o gr),
379
+ \P Q.exists x.(P(x) & Q(x)) : ((gv -o gr) -o ((g -o G3) -o G3)),
380
+ \P Q.exists x.(P(x) & Q(x)) : ((iv -o ir) -o ((i -o I4) -o I4)),
381
+ \x.John(x) : (iv -o ir)]
382
+
383
+ .. see gluesemantics_malt.doctest for more
env-llmeval/lib/python3.10/site-packages/nltk/test/gluesemantics_malt_fixt.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ def setup_module():
2
+ import pytest
3
+
4
+ from nltk.parse.malt import MaltParser
5
+
6
+ try:
7
+ depparser = MaltParser()
8
+ except (AssertionError, LookupError) as e:
9
+ pytest.skip("MaltParser is not available")
env-llmeval/lib/python3.10/site-packages/nltk/test/index.doctest ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ .. _align howto: align.html
5
+ .. _ccg howto: ccg.html
6
+ .. _chat80 howto: chat80.html
7
+ .. _childes howto: childes.html
8
+ .. _chunk howto: chunk.html
9
+ .. _classify howto: classify.html
10
+ .. _collocations howto: collocations.html
11
+ .. _compat howto: compat.html
12
+ .. _corpus howto: corpus.html
13
+ .. _data howto: data.html
14
+ .. _dependency howto: dependency.html
15
+ .. _discourse howto: discourse.html
16
+ .. _drt howto: drt.html
17
+ .. _featgram howto: featgram.html
18
+ .. _featstruct howto: featstruct.html
19
+ .. _framenet howto: framenet.html
20
+ .. _generate howto: generate.html
21
+ .. _gluesemantics howto: gluesemantics.html
22
+ .. _gluesemantics_malt howto: gluesemantics_malt.html
23
+ .. _grammar howto: grammar.html
24
+ .. _grammartestsuites howto: grammartestsuites.html
25
+ .. _index howto: index.html
26
+ .. _inference howto: inference.html
27
+ .. _internals howto: internals.html
28
+ .. _japanese howto: japanese.html
29
+ .. _logic howto: logic.html
30
+ .. _metrics howto: metrics.html
31
+ .. _misc howto: misc.html
32
+ .. _nonmonotonic howto: nonmonotonic.html
33
+ .. _parse howto: parse.html
34
+ .. _portuguese_en howto: portuguese_en.html
35
+ .. _probability howto: probability.html
36
+ .. _propbank howto: propbank.html
37
+ .. _relextract howto: relextract.html
38
+ .. _resolution howto: resolution.html
39
+ .. _semantics howto: semantics.html
40
+ .. _simple howto: simple.html
41
+ .. _stem howto: stem.html
42
+ .. _tag howto: tag.html
43
+ .. _tokenize howto: tokenize.html
44
+ .. _toolbox howto: toolbox.html
45
+ .. _tree howto: tree.html
46
+ .. _treetransforms howto: treetransforms.html
47
+ .. _util howto: util.html
48
+ .. _wordnet howto: wordnet.html
49
+ .. _wordnet_lch howto: wordnet_lch.html
50
+
51
+ ===========
52
+ NLTK HOWTOs
53
+ ===========
54
+
55
+ * `align HOWTO`_
56
+ * `ccg HOWTO`_
57
+ * `chat80 HOWTO`_
58
+ * `childes HOWTO`_
59
+ * `chunk HOWTO`_
60
+ * `classify HOWTO`_
61
+ * `collocations HOWTO`_
62
+ * `compat HOWTO`_
63
+ * `corpus HOWTO`_
64
+ * `data HOWTO`_
65
+ * `dependency HOWTO`_
66
+ * `discourse HOWTO`_
67
+ * `drt HOWTO`_
68
+ * `featgram HOWTO`_
69
+ * `featstruct HOWTO`_
70
+ * `framenet HOWTO`_
71
+ * `generate HOWTO`_
72
+ * `gluesemantics HOWTO`_
73
+ * `gluesemantics_malt HOWTO`_
74
+ * `grammar HOWTO`_
75
+ * `grammartestsuites HOWTO`_
76
+ * `index HOWTO`_
77
+ * `inference HOWTO`_
78
+ * `internals HOWTO`_
79
+ * `japanese HOWTO`_
80
+ * `logic HOWTO`_
81
+ * `metrics HOWTO`_
82
+ * `misc HOWTO`_
83
+ * `nonmonotonic HOWTO`_
84
+ * `parse HOWTO`_
85
+ * `portuguese_en HOWTO`_
86
+ * `probability HOWTO`_
87
+ * `propbank HOWTO`_
88
+ * `relextract HOWTO`_
89
+ * `resolution HOWTO`_
90
+ * `semantics HOWTO`_
91
+ * `simple HOWTO`_
92
+ * `stem HOWTO`_
93
+ * `tag HOWTO`_
94
+ * `tokenize HOWTO`_
95
+ * `toolbox HOWTO`_
96
+ * `tree HOWTO`_
97
+ * `treetransforms HOWTO`_
98
+ * `util HOWTO`_
99
+ * `wordnet HOWTO`_
100
+ * `wordnet_lch HOWTO`_
env-llmeval/lib/python3.10/site-packages/nltk/test/internals.doctest ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ ==========================================
5
+ Unit tests for the nltk.utilities module
6
+ ==========================================
7
+
8
+ overridden()
9
+ ~~~~~~~~~~~~
10
+ >>> from nltk.internals import overridden
11
+
12
+ The typical use case is in defining methods for an interface or
13
+ abstract base class, in such a way that subclasses don't have to
14
+ implement all of the methods:
15
+
16
+ >>> class EaterI(object):
17
+ ... '''Subclass must define eat() or batch_eat().'''
18
+ ... def eat(self, food):
19
+ ... if overridden(self.batch_eat):
20
+ ... return self.batch_eat([food])[0]
21
+ ... else:
22
+ ... raise NotImplementedError()
23
+ ... def batch_eat(self, foods):
24
+ ... return [self.eat(food) for food in foods]
25
+
26
+ As long as a subclass implements one method, it will be used to
27
+ perform the other method:
28
+
29
+ >>> class GoodEater1(EaterI):
30
+ ... def eat(self, food):
31
+ ... return 'yum'
32
+ >>> GoodEater1().eat('steak')
33
+ 'yum'
34
+ >>> GoodEater1().batch_eat(['steak', 'peas'])
35
+ ['yum', 'yum']
36
+
37
+ >>> class GoodEater2(EaterI):
38
+ ... def batch_eat(self, foods):
39
+ ... return ['yum' for food in foods]
40
+ >>> GoodEater2().eat('steak')
41
+ 'yum'
42
+ >>> GoodEater2().batch_eat(['steak', 'peas'])
43
+ ['yum', 'yum']
44
+
45
+ But if a subclass doesn't implement either one, then they'll get an
46
+ error when they try to call them. (nb this is better than infinite
47
+ recursion):
48
+
49
+ >>> class BadEater1(EaterI):
50
+ ... pass
51
+ >>> BadEater1().eat('steak')
52
+ Traceback (most recent call last):
53
+ . . .
54
+ NotImplementedError
55
+ >>> BadEater1().batch_eat(['steak', 'peas'])
56
+ Traceback (most recent call last):
57
+ . . .
58
+ NotImplementedError
59
+
60
+ Trying to use the abstract base class itself will also result in an
61
+ error:
62
+
63
+ >>> class EaterI(EaterI):
64
+ ... pass
65
+ >>> EaterI().eat('steak')
66
+ Traceback (most recent call last):
67
+ . . .
68
+ NotImplementedError
69
+ >>> EaterI().batch_eat(['steak', 'peas'])
70
+ Traceback (most recent call last):
71
+ . . .
72
+ NotImplementedError
73
+
74
+ It's ok to use intermediate abstract classes:
75
+
76
+ >>> class AbstractEater(EaterI):
77
+ ... pass
78
+
79
+ >>> class GoodEater3(AbstractEater):
80
+ ... def eat(self, food):
81
+ ... return 'yum'
82
+ ...
83
+ >>> GoodEater3().eat('steak')
84
+ 'yum'
85
+ >>> GoodEater3().batch_eat(['steak', 'peas'])
86
+ ['yum', 'yum']
87
+
88
+ >>> class GoodEater4(AbstractEater):
89
+ ... def batch_eat(self, foods):
90
+ ... return ['yum' for food in foods]
91
+ >>> GoodEater4().eat('steak')
92
+ 'yum'
93
+ >>> GoodEater4().batch_eat(['steak', 'peas'])
94
+ ['yum', 'yum']
95
+
96
+ >>> class BadEater2(AbstractEater):
97
+ ... pass
98
+ >>> BadEater2().eat('steak')
99
+ Traceback (most recent call last):
100
+ . . .
101
+ NotImplementedError
102
+ >>> BadEater2().batch_eat(['steak', 'peas'])
103
+ Traceback (most recent call last):
104
+ . . .
105
+ NotImplementedError
106
+
107
+ Here's some extra tests:
108
+
109
+ >>> class A(object):
110
+ ... def f(x): pass
111
+ >>> class B(A):
112
+ ... def f(x): pass
113
+ >>> class C(A): pass
114
+ >>> class D(B): pass
115
+
116
+ >>> overridden(A().f)
117
+ False
118
+ >>> overridden(B().f)
119
+ True
120
+ >>> overridden(C().f)
121
+ False
122
+ >>> overridden(D().f)
123
+ True
124
+
125
+ It works for classic classes, too:
126
+
127
+ >>> class A:
128
+ ... def f(x): pass
129
+ >>> class B(A):
130
+ ... def f(x): pass
131
+ >>> class C(A): pass
132
+ >>> class D(B): pass
133
+ >>> overridden(A().f)
134
+ False
135
+ >>> overridden(B().f)
136
+ True
137
+ >>> overridden(C().f)
138
+ False
139
+ >>> overridden(D().f)
140
+ True
141
+
142
+
143
+ read_str()
144
+ ~~~~~~~~~~~~
145
+ >>> from nltk.internals import read_str
146
+
147
+ Test valid scenarios
148
+
149
+ >>> read_str("'valid string'", 0)
150
+ ('valid string', 14)
151
+
152
+ Now test invalid scenarios
153
+
154
+ >>> read_str("should error", 0)
155
+ Traceback (most recent call last):
156
+ ...
157
+ nltk.internals.ReadError: Expected open quote at 0
158
+ >>> read_str("'should error", 0)
159
+ Traceback (most recent call last):
160
+ ...
161
+ nltk.internals.ReadError: Expected close quote at 1
env-llmeval/lib/python3.10/site-packages/nltk/test/lm.doctest ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ .. -*- coding: utf-8 -*-
5
+
6
+
7
+ Regression Tests
8
+ ================
9
+
10
+
11
+ Issue 167
12
+ ---------
13
+ https://github.com/nltk/nltk/issues/167
14
+
15
+ >>> from nltk.corpus import brown
16
+ >>> from nltk.lm.preprocessing import padded_everygram_pipeline
17
+ >>> ngram_order = 3
18
+ >>> train_data, vocab_data = padded_everygram_pipeline(
19
+ ... ngram_order,
20
+ ... brown.sents(categories="news")
21
+ ... )
22
+
23
+ >>> from nltk.lm import WittenBellInterpolated
24
+ >>> lm = WittenBellInterpolated(ngram_order)
25
+ >>> lm.fit(train_data, vocab_data)
26
+
27
+
28
+
29
+
30
+ Sentence containing an unseen word should result in infinite entropy because
31
+ Witten-Bell is based ultimately on MLE, which cannot handle unseen ngrams.
32
+ Crucially, it shouldn't raise any exceptions for unseen words.
33
+
34
+ >>> from nltk.util import ngrams
35
+ >>> sent = ngrams("This is a sentence with the word aaddvark".split(), 3)
36
+ >>> lm.entropy(sent)
37
+ inf
38
+
39
+ If we remove all unseen ngrams from the sentence, we'll get a non-infinite value
40
+ for the entropy.
41
+
42
+ >>> sent = ngrams("This is a sentence".split(), 3)
43
+ >>> round(lm.entropy(sent), 14)
44
+ 10.23701322869105
45
+
46
+
47
+ Issue 367
48
+ ---------
49
+ https://github.com/nltk/nltk/issues/367
50
+
51
+ Reproducing Dan Blanchard's example:
52
+ https://github.com/nltk/nltk/issues/367#issuecomment-14646110
53
+
54
+ >>> from nltk.lm import Lidstone, Vocabulary
55
+ >>> word_seq = list('aaaababaaccbacb')
56
+ >>> ngram_order = 2
57
+ >>> from nltk.util import everygrams
58
+ >>> train_data = [everygrams(word_seq, max_len=ngram_order)]
59
+ >>> V = Vocabulary(['a', 'b', 'c', ''])
60
+ >>> lm = Lidstone(0.2, ngram_order, vocabulary=V)
61
+ >>> lm.fit(train_data)
62
+
63
+ For doctest to work we have to sort the vocabulary keys.
64
+
65
+ >>> V_keys = sorted(V)
66
+ >>> round(sum(lm.score(w, ("b",)) for w in V_keys), 6)
67
+ 1.0
68
+ >>> round(sum(lm.score(w, ("a",)) for w in V_keys), 6)
69
+ 1.0
70
+
71
+ >>> [lm.score(w, ("b",)) for w in V_keys]
72
+ [0.05, 0.05, 0.8, 0.05, 0.05]
73
+ >>> [round(lm.score(w, ("a",)), 4) for w in V_keys]
74
+ [0.0222, 0.0222, 0.4667, 0.2444, 0.2444]
75
+
76
+
77
+ Here's reproducing @afourney's comment:
78
+ https://github.com/nltk/nltk/issues/367#issuecomment-15686289
79
+
80
+ >>> sent = ['foo', 'foo', 'foo', 'foo', 'bar', 'baz']
81
+ >>> ngram_order = 3
82
+ >>> from nltk.lm.preprocessing import padded_everygram_pipeline
83
+ >>> train_data, vocab_data = padded_everygram_pipeline(ngram_order, [sent])
84
+ >>> from nltk.lm import Lidstone
85
+ >>> lm = Lidstone(0.2, ngram_order)
86
+ >>> lm.fit(train_data, vocab_data)
87
+
88
+ The vocabulary includes the "UNK" symbol as well as two padding symbols.
89
+
90
+ >>> len(lm.vocab)
91
+ 6
92
+ >>> word = "foo"
93
+ >>> context = ("bar", "baz")
94
+
95
+ The raw counts.
96
+
97
+ >>> lm.context_counts(context)[word]
98
+ 0
99
+ >>> lm.context_counts(context).N()
100
+ 1
101
+
102
+ Counts with Lidstone smoothing.
103
+
104
+ >>> lm.context_counts(context)[word] + lm.gamma
105
+ 0.2
106
+ >>> lm.context_counts(context).N() + len(lm.vocab) * lm.gamma
107
+ 2.2
108
+
109
+ Without any backoff, just using Lidstone smoothing, P("foo" | "bar", "baz") should be:
110
+ 0.2 / 2.2 ~= 0.090909
111
+
112
+ >>> round(lm.score(word, context), 6)
113
+ 0.090909
114
+
115
+
116
+ Issue 380
117
+ ---------
118
+ https://github.com/nltk/nltk/issues/380
119
+
120
+ Reproducing setup akin to this comment:
121
+ https://github.com/nltk/nltk/issues/380#issue-12879030
122
+
123
+ For speed take only the first 100 sentences of reuters. Shouldn't affect the test.
124
+
125
+ >>> from nltk.corpus import reuters
126
+ >>> sents = reuters.sents()[:100]
127
+ >>> ngram_order = 3
128
+ >>> from nltk.lm.preprocessing import padded_everygram_pipeline
129
+ >>> train_data, vocab_data = padded_everygram_pipeline(ngram_order, sents)
130
+
131
+ >>> from nltk.lm import Lidstone
132
+ >>> lm = Lidstone(0.2, ngram_order)
133
+ >>> lm.fit(train_data, vocab_data)
134
+ >>> lm.score("said", ("",)) < 1
135
+ True
env-llmeval/lib/python3.10/site-packages/nltk/test/meteor.doctest ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ .. -*- coding: utf-8 -*-
5
+
6
+ =============
7
+ METEOR tests
8
+ =============
9
+
10
+ No Alignment test
11
+ ------------------
12
+
13
+ >>> from nltk.translate import meteor
14
+ >>> from nltk import word_tokenize
15
+
16
+ If the candidate has no alignment to any of the references, the METEOR score is 0.
17
+
18
+ >>> round(meteor(
19
+ ... [word_tokenize('The candidate has no alignment to any of the references')],
20
+ ... word_tokenize('John loves Mary')
21
+ ... ), 4)
22
+ 0.0
23
+
24
+ Tests based on wikipedia examples
25
+ ---------------------------------
26
+
27
+ Testing on `wikipedia examples <https://en.wikipedia.org/wiki/METEOR#Examples>`_
28
+
29
+ >>> same_res = round(meteor(
30
+ ... [word_tokenize('The cat sat on the mat')],
31
+ ... word_tokenize('The cat sat on the mat')
32
+ ... ), 4)
33
+ >>> abs(same_res - 0.9977) < 1e-2
34
+ True
35
+
36
+ >>> meteor(
37
+ ... [word_tokenize('The cat sat on the mat')],
38
+ ... word_tokenize('on the mat sat the cat')
39
+ ... )
40
+ 0.5
41
+
42
+ >>> round(meteor(
43
+ ... [word_tokenize('The cat sat on the mat')],
44
+ ... word_tokenize('The cat was sat on the mat')
45
+ ... ), 4)
46
+ 0.9654
47
+
48
+ Test corresponding to issue #2751, where METEOR score > 1
49
+
50
+ >>> round(meteor(
51
+ ... [word_tokenize('create or update a vm set')],
52
+ ... word_tokenize('creates or updates a virtual machine scale set')
53
+ ... ), 4)
54
+ 0.7806
env-llmeval/lib/python3.10/site-packages/nltk/test/metrics.doctest ADDED
@@ -0,0 +1,321 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ =======
5
+ Metrics
6
+ =======
7
+
8
+ -----
9
+ Setup
10
+ -----
11
+
12
+ >>> import pytest
13
+ >>> _ = pytest.importorskip("numpy")
14
+
15
+
16
+ The `nltk.metrics` package provides a variety of *evaluation measures*
17
+ which can be used for a wide variety of NLP tasks.
18
+
19
+ >>> from nltk.metrics import *
20
+
21
+ ------------------
22
+ Standard IR Scores
23
+ ------------------
24
+
25
+ We can use standard scores from information retrieval to test the
26
+ performance of taggers, chunkers, etc.
27
+
28
+ >>> reference = 'DET NN VB DET JJ NN NN IN DET NN'.split()
29
+ >>> test = 'DET VB VB DET NN NN NN IN DET NN'.split()
30
+ >>> print(accuracy(reference, test))
31
+ 0.8
32
+
33
+
34
+ The following measures apply to sets:
35
+
36
+ >>> reference_set = set(reference)
37
+ >>> test_set = set(test)
38
+ >>> precision(reference_set, test_set)
39
+ 1.0
40
+ >>> print(recall(reference_set, test_set))
41
+ 0.8
42
+ >>> print(f_measure(reference_set, test_set))
43
+ 0.88888888888...
44
+
45
+ Measuring the likelihood of the data, given probability distributions:
46
+
47
+ >>> from nltk import FreqDist, MLEProbDist
48
+ >>> pdist1 = MLEProbDist(FreqDist("aldjfalskfjaldsf"))
49
+ >>> pdist2 = MLEProbDist(FreqDist("aldjfalssjjlldss"))
50
+ >>> print(log_likelihood(['a', 'd'], [pdist1, pdist2]))
51
+ -2.7075187496...
52
+
53
+
54
+ ----------------
55
+ Distance Metrics
56
+ ----------------
57
+
58
+ String edit distance (Levenshtein):
59
+
60
+ >>> edit_distance("rain", "shine")
61
+ 3
62
+ >>> edit_distance_align("shine", "shine")
63
+ [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5)]
64
+ >>> edit_distance_align("rain", "brainy")
65
+ [(0, 0), (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (4, 6)]
66
+ >>> edit_distance_align("", "brainy")
67
+ [(0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6)]
68
+ >>> edit_distance_align("", "")
69
+ [(0, 0)]
70
+
71
+ Other distance measures:
72
+
73
+ >>> s1 = set([1,2,3,4])
74
+ >>> s2 = set([3,4,5])
75
+ >>> binary_distance(s1, s2)
76
+ 1.0
77
+ >>> print(jaccard_distance(s1, s2))
78
+ 0.6
79
+ >>> print(masi_distance(s1, s2))
80
+ 0.868
81
+
82
+ ----------------------
83
+ Miscellaneous Measures
84
+ ----------------------
85
+
86
+ Rank Correlation works with two dictionaries mapping keys to ranks.
87
+ The dictionaries should have the same set of keys.
88
+
89
+ >>> spearman_correlation({'e':1, 't':2, 'a':3}, {'e':1, 'a':2, 't':3})
90
+ 0.5
91
+
92
+ Windowdiff uses a sliding window in comparing two segmentations of the same input (e.g. tokenizations, chunkings).
93
+ Segmentations are represented using strings of zeros and ones.
94
+
95
+ >>> s1 = "000100000010"
96
+ >>> s2 = "000010000100"
97
+ >>> s3 = "100000010000"
98
+ >>> s4 = "000000000000"
99
+ >>> s5 = "111111111111"
100
+ >>> windowdiff(s1, s1, 3)
101
+ 0.0
102
+ >>> abs(windowdiff(s1, s2, 3) - 0.3) < 1e-6 # windowdiff(s1, s2, 3) == 0.3
103
+ True
104
+ >>> abs(windowdiff(s2, s3, 3) - 0.8) < 1e-6 # windowdiff(s2, s3, 3) == 0.8
105
+ True
106
+ >>> windowdiff(s1, s4, 3)
107
+ 0.5
108
+ >>> windowdiff(s1, s5, 3)
109
+ 1.0
110
+
111
+ ----------------
112
+ Confusion Matrix
113
+ ----------------
114
+
115
+ >>> reference = 'This is the reference data. Testing 123. aoaeoeoe'
116
+ >>> test = 'Thos iz_the rifirenci data. Testeng 123. aoaeoeoe'
117
+ >>> print(ConfusionMatrix(reference, test))
118
+ | . 1 2 3 T _ a c d e f g h i n o r s t z |
119
+ --+-------------------------------------------+
120
+ |<8>. . . . . 1 . . . . . . . . . . . . . . |
121
+ . | .<2>. . . . . . . . . . . . . . . . . . . |
122
+ 1 | . .<1>. . . . . . . . . . . . . . . . . . |
123
+ 2 | . . .<1>. . . . . . . . . . . . . . . . . |
124
+ 3 | . . . .<1>. . . . . . . . . . . . . . . . |
125
+ T | . . . . .<2>. . . . . . . . . . . . . . . |
126
+ _ | . . . . . .<.>. . . . . . . . . . . . . . |
127
+ a | . . . . . . .<4>. . . . . . . . . . . . . |
128
+ c | . . . . . . . .<1>. . . . . . . . . . . . |
129
+ d | . . . . . . . . .<1>. . . . . . . . . . . |
130
+ e | . . . . . . . . . .<6>. . . 3 . . . . . . |
131
+ f | . . . . . . . . . . .<1>. . . . . . . . . |
132
+ g | . . . . . . . . . . . .<1>. . . . . . . . |
133
+ h | . . . . . . . . . . . . .<2>. . . . . . . |
134
+ i | . . . . . . . . . . 1 . . .<1>. 1 . . . . |
135
+ n | . . . . . . . . . . . . . . .<2>. . . . . |
136
+ o | . . . . . . . . . . . . . . . .<3>. . . . |
137
+ r | . . . . . . . . . . . . . . . . .<2>. . . |
138
+ s | . . . . . . . . . . . . . . . . . .<2>. 1 |
139
+ t | . . . . . . . . . . . . . . . . . . .<3>. |
140
+ z | . . . . . . . . . . . . . . . . . . . .<.>|
141
+ --+-------------------------------------------+
142
+ (row = reference; col = test)
143
+ <BLANKLINE>
144
+
145
+ >>> cm = ConfusionMatrix(reference, test)
146
+ >>> print(cm.pretty_format(sort_by_count=True))
147
+ | e a i o s t . T h n r 1 2 3 c d f g _ z |
148
+ --+-------------------------------------------+
149
+ |<8>. . . . . . . . . . . . . . . . . . 1 . |
150
+ e | .<6>. 3 . . . . . . . . . . . . . . . . . |
151
+ a | . .<4>. . . . . . . . . . . . . . . . . . |
152
+ i | . 1 .<1>1 . . . . . . . . . . . . . . . . |
153
+ o | . . . .<3>. . . . . . . . . . . . . . . . |
154
+ s | . . . . .<2>. . . . . . . . . . . . . . 1 |
155
+ t | . . . . . .<3>. . . . . . . . . . . . . . |
156
+ . | . . . . . . .<2>. . . . . . . . . . . . . |
157
+ T | . . . . . . . .<2>. . . . . . . . . . . . |
158
+ h | . . . . . . . . .<2>. . . . . . . . . . . |
159
+ n | . . . . . . . . . .<2>. . . . . . . . . . |
160
+ r | . . . . . . . . . . .<2>. . . . . . . . . |
161
+ 1 | . . . . . . . . . . . .<1>. . . . . . . . |
162
+ 2 | . . . . . . . . . . . . .<1>. . . . . . . |
163
+ 3 | . . . . . . . . . . . . . .<1>. . . . . . |
164
+ c | . . . . . . . . . . . . . . .<1>. . . . . |
165
+ d | . . . . . . . . . . . . . . . .<1>. . . . |
166
+ f | . . . . . . . . . . . . . . . . .<1>. . . |
167
+ g | . . . . . . . . . . . . . . . . . .<1>. . |
168
+ _ | . . . . . . . . . . . . . . . . . . .<.>. |
169
+ z | . . . . . . . . . . . . . . . . . . . .<.>|
170
+ --+-------------------------------------------+
171
+ (row = reference; col = test)
172
+ <BLANKLINE>
173
+
174
+ >>> print(cm.pretty_format(sort_by_count=True, truncate=10))
175
+ | e a i o s t . T h |
176
+ --+---------------------+
177
+ |<8>. . . . . . . . . |
178
+ e | .<6>. 3 . . . . . . |
179
+ a | . .<4>. . . . . . . |
180
+ i | . 1 .<1>1 . . . . . |
181
+ o | . . . .<3>. . . . . |
182
+ s | . . . . .<2>. . . . |
183
+ t | . . . . . .<3>. . . |
184
+ . | . . . . . . .<2>. . |
185
+ T | . . . . . . . .<2>. |
186
+ h | . . . . . . . . .<2>|
187
+ --+---------------------+
188
+ (row = reference; col = test)
189
+ <BLANKLINE>
190
+
191
+ >>> print(cm.pretty_format(sort_by_count=True, truncate=10, values_in_chart=False))
192
+ | 1 |
193
+ | 1 2 3 4 5 6 7 8 9 0 |
194
+ ---+---------------------+
195
+ 1 |<8>. . . . . . . . . |
196
+ 2 | .<6>. 3 . . . . . . |
197
+ 3 | . .<4>. . . . . . . |
198
+ 4 | . 1 .<1>1 . . . . . |
199
+ 5 | . . . .<3>. . . . . |
200
+ 6 | . . . . .<2>. . . . |
201
+ 7 | . . . . . .<3>. . . |
202
+ 8 | . . . . . . .<2>. . |
203
+ 9 | . . . . . . . .<2>. |
204
+ 10 | . . . . . . . . .<2>|
205
+ ---+---------------------+
206
+ (row = reference; col = test)
207
+ Value key:
208
+ 1:
209
+ 2: e
210
+ 3: a
211
+ 4: i
212
+ 5: o
213
+ 6: s
214
+ 7: t
215
+ 8: .
216
+ 9: T
217
+ 10: h
218
+ <BLANKLINE>
219
+
220
+ For "e", the number of true positives should be 6, while the number of false negatives is 3.
221
+ So, the recall ought to be 6 / (6 + 3):
222
+
223
+ >>> cm.recall("e") # doctest: +ELLIPSIS
224
+ 0.666666...
225
+
226
+ For "e", the false positive is just 1, so the precision should be 6 / (6 + 1):
227
+
228
+ >>> cm.precision("e") # doctest: +ELLIPSIS
229
+ 0.857142...
230
+
231
+ The f-measure with default value of ``alpha = 0.5`` should then be:
232
+
233
+ * *1/(alpha/p + (1-alpha)/r) =*
234
+ * *1/(0.5/p + 0.5/r) =*
235
+ * *2pr / (p + r) =*
236
+ * *2 * 0.857142... * 0.666666... / (0.857142... + 0.666666...) =*
237
+ * *0.749999...*
238
+
239
+ >>> cm.f_measure("e") # doctest: +ELLIPSIS
240
+ 0.749999...
241
+
242
+ --------------------
243
+ Association measures
244
+ --------------------
245
+
246
+ These measures are useful to determine whether the coocurrence of two random
247
+ events is meaningful. They are used, for instance, to distinguish collocations
248
+ from other pairs of adjacent words.
249
+
250
+ We bring some examples of bigram association calculations from Manning and
251
+ Schutze's SNLP, 2nd Ed. chapter 5.
252
+
253
+ >>> n_new_companies, n_new, n_companies, N = 8, 15828, 4675, 14307668
254
+ >>> bam = BigramAssocMeasures
255
+ >>> bam.raw_freq(20, (42, 20), N) == 20. / N
256
+ True
257
+ >>> bam.student_t(n_new_companies, (n_new, n_companies), N)
258
+ 0.999...
259
+ >>> bam.chi_sq(n_new_companies, (n_new, n_companies), N)
260
+ 1.54...
261
+ >>> bam.likelihood_ratio(150, (12593, 932), N)
262
+ 1291...
263
+
264
+ For other associations, we ensure the ordering of the measures:
265
+
266
+ >>> bam.mi_like(20, (42, 20), N) > bam.mi_like(20, (41, 27), N)
267
+ True
268
+ >>> bam.pmi(20, (42, 20), N) > bam.pmi(20, (41, 27), N)
269
+ True
270
+ >>> bam.phi_sq(20, (42, 20), N) > bam.phi_sq(20, (41, 27), N)
271
+ True
272
+ >>> bam.poisson_stirling(20, (42, 20), N) > bam.poisson_stirling(20, (41, 27), N)
273
+ True
274
+ >>> bam.jaccard(20, (42, 20), N) > bam.jaccard(20, (41, 27), N)
275
+ True
276
+ >>> bam.dice(20, (42, 20), N) > bam.dice(20, (41, 27), N)
277
+ True
278
+ >>> bam.fisher(20, (42, 20), N) > bam.fisher(20, (41, 27), N) # doctest: +SKIP
279
+ False
280
+
281
+ For trigrams, we have to provide more count information:
282
+
283
+ >>> n_w1_w2_w3 = 20
284
+ >>> n_w1_w2, n_w1_w3, n_w2_w3 = 35, 60, 40
285
+ >>> pair_counts = (n_w1_w2, n_w1_w3, n_w2_w3)
286
+ >>> n_w1, n_w2, n_w3 = 100, 200, 300
287
+ >>> uni_counts = (n_w1, n_w2, n_w3)
288
+ >>> N = 14307668
289
+ >>> tam = TrigramAssocMeasures
290
+ >>> tam.raw_freq(n_w1_w2_w3, pair_counts, uni_counts, N) == 1. * n_w1_w2_w3 / N
291
+ True
292
+ >>> uni_counts2 = (n_w1, n_w2, 100)
293
+ >>> tam.student_t(n_w1_w2_w3, pair_counts, uni_counts2, N) > tam.student_t(n_w1_w2_w3, pair_counts, uni_counts, N)
294
+ True
295
+ >>> tam.chi_sq(n_w1_w2_w3, pair_counts, uni_counts2, N) > tam.chi_sq(n_w1_w2_w3, pair_counts, uni_counts, N)
296
+ True
297
+ >>> tam.mi_like(n_w1_w2_w3, pair_counts, uni_counts2, N) > tam.mi_like(n_w1_w2_w3, pair_counts, uni_counts, N)
298
+ True
299
+ >>> tam.pmi(n_w1_w2_w3, pair_counts, uni_counts2, N) > tam.pmi(n_w1_w2_w3, pair_counts, uni_counts, N)
300
+ True
301
+ >>> tam.likelihood_ratio(n_w1_w2_w3, pair_counts, uni_counts2, N) > tam.likelihood_ratio(n_w1_w2_w3, pair_counts, uni_counts, N)
302
+ True
303
+ >>> tam.poisson_stirling(n_w1_w2_w3, pair_counts, uni_counts2, N) > tam.poisson_stirling(n_w1_w2_w3, pair_counts, uni_counts, N)
304
+ True
305
+ >>> tam.jaccard(n_w1_w2_w3, pair_counts, uni_counts2, N) > tam.jaccard(n_w1_w2_w3, pair_counts, uni_counts, N)
306
+ True
307
+
308
+
309
+ For fourgrams, we have to provide more count information:
310
+
311
+ >>> n_w1_w2_w3_w4 = 5
312
+ >>> n_w1_w2, n_w1_w3, n_w2_w3 = 35, 60, 40
313
+ >>> n_w1_w2_w3, n_w2_w3_w4 = 20, 10
314
+ >>> pair_counts = (n_w1_w2, n_w1_w3, n_w2_w3)
315
+ >>> triplet_counts = (n_w1_w2_w3, n_w2_w3_w4)
316
+ >>> n_w1, n_w2, n_w3, n_w4 = 100, 200, 300, 400
317
+ >>> uni_counts = (n_w1, n_w2, n_w3, n_w4)
318
+ >>> N = 14307668
319
+ >>> qam = QuadgramAssocMeasures
320
+ >>> qam.raw_freq(n_w1_w2_w3_w4, pair_counts, triplet_counts, uni_counts, N) == 1. * n_w1_w2_w3_w4 / N
321
+ True
env-llmeval/lib/python3.10/site-packages/nltk/test/misc.doctest ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ --------------------------------------------------------------------------------
5
+ Unit tests for the miscellaneous sort functions.
6
+ --------------------------------------------------------------------------------
7
+
8
+ >>> from copy import deepcopy
9
+ >>> from nltk.misc.sort import *
10
+
11
+ A (very) small list of unsorted integers.
12
+
13
+ >>> test_data = [12, 67, 7, 28, 92, 56, 53, 720, 91, 57, 20, 20]
14
+
15
+ Test each sorting method - each method returns the number of operations
16
+ required to sort the data, and sorts in-place (desctructively - hence the need
17
+ for multiple copies).
18
+
19
+ >>> sorted_data = deepcopy(test_data)
20
+ >>> selection(sorted_data)
21
+ 66
22
+
23
+ >>> sorted_data
24
+ [7, 12, 20, 20, 28, 53, 56, 57, 67, 91, 92, 720]
25
+
26
+ >>> sorted_data = deepcopy(test_data)
27
+ >>> bubble(sorted_data)
28
+ 30
29
+
30
+ >>> sorted_data
31
+ [7, 12, 20, 20, 28, 53, 56, 57, 67, 91, 92, 720]
32
+
33
+ >>> sorted_data = deepcopy(test_data)
34
+ >>> merge(sorted_data)
35
+ 30
36
+
37
+ >>> sorted_data
38
+ [7, 12, 20, 20, 28, 53, 56, 57, 67, 91, 92, 720]
39
+
40
+ >>> sorted_data = deepcopy(test_data)
41
+ >>> quick(sorted_data)
42
+ 13
43
+
44
+ >>> sorted_data
45
+ [7, 12, 20, 20, 28, 53, 56, 57, 67, 91, 92, 720]
46
+
47
+ --------------------------------------------------------------------------------
48
+ Unit tests for Wordfinder class
49
+ --------------------------------------------------------------------------------
50
+
51
+ >>> import random
52
+
53
+ >>> # The following is not enough for reproducibility under Python 2/3
54
+ >>> # (see https://bugs.python.org/issue9025) so this test is skipped.
55
+ >>> random.seed(12345)
56
+
57
+ >>> from nltk.misc import wordfinder
58
+ >>> wordfinder.word_finder() # doctest: +SKIP
59
+ Word Finder
60
+ <BLANKLINE>
61
+ J V L A I R O T A T I S I V O D E R E T
62
+ H U U B E A R O E P O C S O R E T N E P
63
+ A D A U Z E E S R A P P A L L M E N T R
64
+ C X A D Q S Z T P E O R S N G P J A D E
65
+ I G Y K K T I A A R G F I D T E L C N S
66
+ R E C N B H T R L T N N B W N T A O A I
67
+ A Y I L O E I A M E I A A Y U R P L L D
68
+ G L T V S T S F E A D I P H D O O H N I
69
+ R L S E C I N I L R N N M E C G R U E A
70
+ A A Y G I C E N L L E O I G Q R T A E L
71
+ M R C E T I S T A E T L L E U A E N R L
72
+ O U O T A S E E C S O O N H Y P A T G Y
73
+ E M H O M M D R E S F P U L T H C F N V
74
+ L A C A I M A M A N L B R U T E D O M I
75
+ O R I L N E E E E E U A R S C R Y L I P
76
+ H T R K E S N N M S I L A S R E V I N U
77
+ T X T A A O U T K S E T A R R E S I B J
78
+ A E D L E L J I F O O R P E L K N I R W
79
+ K H A I D E Q O P R I C K T I M B E R P
80
+ Z K D O O H G N I H T U R V E Y D R O P
81
+ <BLANKLINE>
82
+ 1: INTERCHANGER
83
+ 2: TEARLESSNESS
84
+ 3: UNIVERSALISM
85
+ 4: DESENSITIZER
86
+ 5: INTERMENTION
87
+ 6: TRICHOCYSTIC
88
+ 7: EXTRAMURALLY
89
+ 8: VEGETOALKALI
90
+ 9: PALMELLACEAE
91
+ 10: AESTHETICISM
92
+ 11: PETROGRAPHER
93
+ 12: VISITATORIAL
94
+ 13: OLEOMARGARIC
95
+ 14: WRINKLEPROOF
96
+ 15: PRICKTIMBER
97
+ 16: PRESIDIALLY
98
+ 17: SCITAMINEAE
99
+ 18: ENTEROSCOPE
100
+ 19: APPALLMENT
101
+ 20: TURVEYDROP
102
+ 21: THINGHOOD
103
+ 22: BISERRATE
104
+ 23: GREENLAND
105
+ 24: BRUTEDOM
106
+ 25: POLONIAN
107
+ 26: ACOLHUAN
108
+ 27: LAPORTEA
109
+ 28: TENDING
110
+ 29: TEREDO
111
+ 30: MESOLE
112
+ 31: UNLIMP
113
+ 32: OSTARA
114
+ 33: PILY
115
+ 34: DUNT
116
+ 35: ONYX
117
+ 36: KATH
118
+ 37: JUNE
env-llmeval/lib/python3.10/site-packages/nltk/test/parse.doctest ADDED
@@ -0,0 +1,933 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ =========
5
+ Parsing
6
+ =========
7
+
8
+ Unit tests for the Context Free Grammar class
9
+ ---------------------------------------------
10
+
11
+ >>> import pickle
12
+ >>> import subprocess
13
+ >>> import sys
14
+ >>> from nltk import Nonterminal, nonterminals, Production, CFG
15
+
16
+ >>> nt1 = Nonterminal('NP')
17
+ >>> nt2 = Nonterminal('VP')
18
+
19
+ >>> nt1.symbol()
20
+ 'NP'
21
+
22
+ >>> nt1 == Nonterminal('NP')
23
+ True
24
+
25
+ >>> nt1 == nt2
26
+ False
27
+
28
+ >>> S, NP, VP, PP = nonterminals('S, NP, VP, PP')
29
+ >>> N, V, P, DT = nonterminals('N, V, P, DT')
30
+
31
+ >>> prod1 = Production(S, [NP, VP])
32
+ >>> prod2 = Production(NP, [DT, NP])
33
+
34
+ >>> prod1.lhs()
35
+ S
36
+
37
+ >>> prod1.rhs()
38
+ (NP, VP)
39
+
40
+ >>> prod1 == Production(S, [NP, VP])
41
+ True
42
+
43
+ >>> prod1 == prod2
44
+ False
45
+
46
+ >>> grammar = CFG.fromstring("""
47
+ ... S -> NP VP
48
+ ... PP -> P NP
49
+ ... NP -> 'the' N | N PP | 'the' N PP
50
+ ... VP -> V NP | V PP | V NP PP
51
+ ... N -> 'cat'
52
+ ... N -> 'dog'
53
+ ... N -> 'rug'
54
+ ... V -> 'chased'
55
+ ... V -> 'sat'
56
+ ... P -> 'in'
57
+ ... P -> 'on'
58
+ ... """)
59
+
60
+ >>> cmd = """import pickle
61
+ ... from nltk import Production
62
+ ... p = Production('S', ['NP', 'VP'])
63
+ ... print(pickle.dumps(p))
64
+ ... """
65
+
66
+ >>> # Start a subprocess to simulate pickling in another process
67
+ >>> proc = subprocess.run([sys.executable, '-c', cmd], stdout=subprocess.PIPE)
68
+ >>> p1 = pickle.loads(eval(proc.stdout))
69
+ >>> p2 = Production('S', ['NP', 'VP'])
70
+ >>> print(hash(p1) == hash(p2))
71
+ True
72
+
73
+ Unit tests for the rd (Recursive Descent Parser) class
74
+ ------------------------------------------------------
75
+
76
+ Create and run a recursive descent parser over both a syntactically ambiguous
77
+ and unambiguous sentence.
78
+
79
+ >>> from nltk.parse import RecursiveDescentParser
80
+ >>> rd = RecursiveDescentParser(grammar)
81
+
82
+ >>> sentence1 = 'the cat chased the dog'.split()
83
+ >>> sentence2 = 'the cat chased the dog on the rug'.split()
84
+
85
+ >>> for t in rd.parse(sentence1):
86
+ ... print(t)
87
+ (S (NP the (N cat)) (VP (V chased) (NP the (N dog))))
88
+
89
+ >>> for t in rd.parse(sentence2):
90
+ ... print(t)
91
+ (S
92
+ (NP the (N cat))
93
+ (VP (V chased) (NP the (N dog) (PP (P on) (NP the (N rug))))))
94
+ (S
95
+ (NP the (N cat))
96
+ (VP (V chased) (NP the (N dog)) (PP (P on) (NP the (N rug)))))
97
+
98
+
99
+ (dolist (expr doctest-font-lock-keywords)
100
+ (add-to-list 'font-lock-keywords expr))
101
+
102
+ font-lock-keywords
103
+ (add-to-list 'font-lock-keywords
104
+ (car doctest-font-lock-keywords))
105
+
106
+
107
+ Unit tests for the sr (Shift Reduce Parser) class
108
+ -------------------------------------------------
109
+
110
+ Create and run a shift reduce parser over both a syntactically ambiguous
111
+ and unambiguous sentence. Note that unlike the recursive descent parser, one
112
+ and only one parse is ever returned.
113
+
114
+ >>> from nltk.parse import ShiftReduceParser
115
+ >>> sr = ShiftReduceParser(grammar)
116
+
117
+ >>> sentence1 = 'the cat chased the dog'.split()
118
+ >>> sentence2 = 'the cat chased the dog on the rug'.split()
119
+
120
+ >>> for t in sr.parse(sentence1):
121
+ ... print(t)
122
+ (S (NP the (N cat)) (VP (V chased) (NP the (N dog))))
123
+
124
+
125
+ The shift reduce parser uses heuristics to decide what to do when there are
126
+ multiple possible shift or reduce operations available - for the supplied
127
+ grammar clearly the wrong operation is selected.
128
+
129
+ >>> for t in sr.parse(sentence2):
130
+ ... print(t)
131
+
132
+
133
+ Unit tests for the Chart Parser class
134
+ -------------------------------------
135
+
136
+ We use the demo() function for testing.
137
+ We must turn off showing of times.
138
+
139
+ >>> import nltk
140
+
141
+ First we test tracing with a short sentence
142
+
143
+ >>> nltk.parse.chart.demo(2, print_times=False, trace=1,
144
+ ... sent='I saw a dog', numparses=1)
145
+ * Sentence:
146
+ I saw a dog
147
+ ['I', 'saw', 'a', 'dog']
148
+ <BLANKLINE>
149
+ * Strategy: Bottom-up
150
+ <BLANKLINE>
151
+ |. I . saw . a . dog .|
152
+ |[---------] . . .| [0:1] 'I'
153
+ |. [---------] . .| [1:2] 'saw'
154
+ |. . [---------] .| [2:3] 'a'
155
+ |. . . [---------]| [3:4] 'dog'
156
+ |> . . . .| [0:0] NP -> * 'I'
157
+ |[---------] . . .| [0:1] NP -> 'I' *
158
+ |> . . . .| [0:0] S -> * NP VP
159
+ |> . . . .| [0:0] NP -> * NP PP
160
+ |[---------> . . .| [0:1] S -> NP * VP
161
+ |[---------> . . .| [0:1] NP -> NP * PP
162
+ |. > . . .| [1:1] Verb -> * 'saw'
163
+ |. [---------] . .| [1:2] Verb -> 'saw' *
164
+ |. > . . .| [1:1] VP -> * Verb NP
165
+ |. > . . .| [1:1] VP -> * Verb
166
+ |. [---------> . .| [1:2] VP -> Verb * NP
167
+ |. [---------] . .| [1:2] VP -> Verb *
168
+ |. > . . .| [1:1] VP -> * VP PP
169
+ |[-------------------] . .| [0:2] S -> NP VP *
170
+ |. [---------> . .| [1:2] VP -> VP * PP
171
+ |. . > . .| [2:2] Det -> * 'a'
172
+ |. . [---------] .| [2:3] Det -> 'a' *
173
+ |. . > . .| [2:2] NP -> * Det Noun
174
+ |. . [---------> .| [2:3] NP -> Det * Noun
175
+ |. . . > .| [3:3] Noun -> * 'dog'
176
+ |. . . [---------]| [3:4] Noun -> 'dog' *
177
+ |. . [-------------------]| [2:4] NP -> Det Noun *
178
+ |. . > . .| [2:2] S -> * NP VP
179
+ |. . > . .| [2:2] NP -> * NP PP
180
+ |. [-----------------------------]| [1:4] VP -> Verb NP *
181
+ |. . [------------------->| [2:4] S -> NP * VP
182
+ |. . [------------------->| [2:4] NP -> NP * PP
183
+ |[=======================================]| [0:4] S -> NP VP *
184
+ |. [----------------------------->| [1:4] VP -> VP * PP
185
+ Nr edges in chart: 33
186
+ (S (NP I) (VP (Verb saw) (NP (Det a) (Noun dog))))
187
+ <BLANKLINE>
188
+
189
+ Then we test the different parsing Strategies.
190
+ Note that the number of edges differ between the strategies.
191
+
192
+ Top-down
193
+
194
+ >>> nltk.parse.chart.demo(1, print_times=False, trace=0,
195
+ ... sent='I saw John with a dog', numparses=2)
196
+ * Sentence:
197
+ I saw John with a dog
198
+ ['I', 'saw', 'John', 'with', 'a', 'dog']
199
+ <BLANKLINE>
200
+ * Strategy: Top-down
201
+ <BLANKLINE>
202
+ Nr edges in chart: 48
203
+ (S
204
+ (NP I)
205
+ (VP (Verb saw) (NP (NP John) (PP with (NP (Det a) (Noun dog))))))
206
+ (S
207
+ (NP I)
208
+ (VP (VP (Verb saw) (NP John)) (PP with (NP (Det a) (Noun dog)))))
209
+ <BLANKLINE>
210
+
211
+ Bottom-up
212
+
213
+ >>> nltk.parse.chart.demo(2, print_times=False, trace=0,
214
+ ... sent='I saw John with a dog', numparses=2)
215
+ * Sentence:
216
+ I saw John with a dog
217
+ ['I', 'saw', 'John', 'with', 'a', 'dog']
218
+ <BLANKLINE>
219
+ * Strategy: Bottom-up
220
+ <BLANKLINE>
221
+ Nr edges in chart: 53
222
+ (S
223
+ (NP I)
224
+ (VP (VP (Verb saw) (NP John)) (PP with (NP (Det a) (Noun dog)))))
225
+ (S
226
+ (NP I)
227
+ (VP (Verb saw) (NP (NP John) (PP with (NP (Det a) (Noun dog))))))
228
+ <BLANKLINE>
229
+
230
+ Bottom-up Left-Corner
231
+
232
+ >>> nltk.parse.chart.demo(3, print_times=False, trace=0,
233
+ ... sent='I saw John with a dog', numparses=2)
234
+ * Sentence:
235
+ I saw John with a dog
236
+ ['I', 'saw', 'John', 'with', 'a', 'dog']
237
+ <BLANKLINE>
238
+ * Strategy: Bottom-up left-corner
239
+ <BLANKLINE>
240
+ Nr edges in chart: 36
241
+ (S
242
+ (NP I)
243
+ (VP (VP (Verb saw) (NP John)) (PP with (NP (Det a) (Noun dog)))))
244
+ (S
245
+ (NP I)
246
+ (VP (Verb saw) (NP (NP John) (PP with (NP (Det a) (Noun dog))))))
247
+ <BLANKLINE>
248
+
249
+ Left-Corner with Bottom-Up Filter
250
+
251
+ >>> nltk.parse.chart.demo(4, print_times=False, trace=0,
252
+ ... sent='I saw John with a dog', numparses=2)
253
+ * Sentence:
254
+ I saw John with a dog
255
+ ['I', 'saw', 'John', 'with', 'a', 'dog']
256
+ <BLANKLINE>
257
+ * Strategy: Filtered left-corner
258
+ <BLANKLINE>
259
+ Nr edges in chart: 28
260
+ (S
261
+ (NP I)
262
+ (VP (VP (Verb saw) (NP John)) (PP with (NP (Det a) (Noun dog)))))
263
+ (S
264
+ (NP I)
265
+ (VP (Verb saw) (NP (NP John) (PP with (NP (Det a) (Noun dog))))))
266
+ <BLANKLINE>
267
+
268
+ The stepping chart parser
269
+
270
+ >>> nltk.parse.chart.demo(5, print_times=False, trace=1,
271
+ ... sent='I saw John with a dog', numparses=2)
272
+ * Sentence:
273
+ I saw John with a dog
274
+ ['I', 'saw', 'John', 'with', 'a', 'dog']
275
+ <BLANKLINE>
276
+ * Strategy: Stepping (top-down vs bottom-up)
277
+ <BLANKLINE>
278
+ *** SWITCH TO TOP DOWN
279
+ |[------] . . . . .| [0:1] 'I'
280
+ |. [------] . . . .| [1:2] 'saw'
281
+ |. . [------] . . .| [2:3] 'John'
282
+ |. . . [------] . .| [3:4] 'with'
283
+ |. . . . [------] .| [4:5] 'a'
284
+ |. . . . . [------]| [5:6] 'dog'
285
+ |> . . . . . .| [0:0] S -> * NP VP
286
+ |> . . . . . .| [0:0] NP -> * NP PP
287
+ |> . . . . . .| [0:0] NP -> * Det Noun
288
+ |> . . . . . .| [0:0] NP -> * 'I'
289
+ |[------] . . . . .| [0:1] NP -> 'I' *
290
+ |[------> . . . . .| [0:1] S -> NP * VP
291
+ |[------> . . . . .| [0:1] NP -> NP * PP
292
+ |. > . . . . .| [1:1] VP -> * VP PP
293
+ |. > . . . . .| [1:1] VP -> * Verb NP
294
+ |. > . . . . .| [1:1] VP -> * Verb
295
+ |. > . . . . .| [1:1] Verb -> * 'saw'
296
+ |. [------] . . . .| [1:2] Verb -> 'saw' *
297
+ |. [------> . . . .| [1:2] VP -> Verb * NP
298
+ |. [------] . . . .| [1:2] VP -> Verb *
299
+ |[-------------] . . . .| [0:2] S -> NP VP *
300
+ |. [------> . . . .| [1:2] VP -> VP * PP
301
+ *** SWITCH TO BOTTOM UP
302
+ |. . > . . . .| [2:2] NP -> * 'John'
303
+ |. . . > . . .| [3:3] PP -> * 'with' NP
304
+ |. . . > . . .| [3:3] Prep -> * 'with'
305
+ |. . . . > . .| [4:4] Det -> * 'a'
306
+ |. . . . . > .| [5:5] Noun -> * 'dog'
307
+ |. . [------] . . .| [2:3] NP -> 'John' *
308
+ |. . . [------> . .| [3:4] PP -> 'with' * NP
309
+ |. . . [------] . .| [3:4] Prep -> 'with' *
310
+ |. . . . [------] .| [4:5] Det -> 'a' *
311
+ |. . . . . [------]| [5:6] Noun -> 'dog' *
312
+ |. [-------------] . . .| [1:3] VP -> Verb NP *
313
+ |[--------------------] . . .| [0:3] S -> NP VP *
314
+ |. [-------------> . . .| [1:3] VP -> VP * PP
315
+ |. . > . . . .| [2:2] S -> * NP VP
316
+ |. . > . . . .| [2:2] NP -> * NP PP
317
+ |. . . . > . .| [4:4] NP -> * Det Noun
318
+ |. . [------> . . .| [2:3] S -> NP * VP
319
+ |. . [------> . . .| [2:3] NP -> NP * PP
320
+ |. . . . [------> .| [4:5] NP -> Det * Noun
321
+ |. . . . [-------------]| [4:6] NP -> Det Noun *
322
+ |. . . [--------------------]| [3:6] PP -> 'with' NP *
323
+ |. [----------------------------------]| [1:6] VP -> VP PP *
324
+ *** SWITCH TO TOP DOWN
325
+ |. . > . . . .| [2:2] NP -> * Det Noun
326
+ |. . . . > . .| [4:4] NP -> * NP PP
327
+ |. . . > . . .| [3:3] VP -> * VP PP
328
+ |. . . > . . .| [3:3] VP -> * Verb NP
329
+ |. . . > . . .| [3:3] VP -> * Verb
330
+ |[=========================================]| [0:6] S -> NP VP *
331
+ |. [---------------------------------->| [1:6] VP -> VP * PP
332
+ |. . [---------------------------]| [2:6] NP -> NP PP *
333
+ |. . . . [------------->| [4:6] NP -> NP * PP
334
+ |. [----------------------------------]| [1:6] VP -> Verb NP *
335
+ |. . [--------------------------->| [2:6] S -> NP * VP
336
+ |. . [--------------------------->| [2:6] NP -> NP * PP
337
+ |[=========================================]| [0:6] S -> NP VP *
338
+ |. [---------------------------------->| [1:6] VP -> VP * PP
339
+ |. . . . . . >| [6:6] VP -> * VP PP
340
+ |. . . . . . >| [6:6] VP -> * Verb NP
341
+ |. . . . . . >| [6:6] VP -> * Verb
342
+ *** SWITCH TO BOTTOM UP
343
+ |. . . . > . .| [4:4] S -> * NP VP
344
+ |. . . . [------------->| [4:6] S -> NP * VP
345
+ *** SWITCH TO TOP DOWN
346
+ *** SWITCH TO BOTTOM UP
347
+ *** SWITCH TO TOP DOWN
348
+ *** SWITCH TO BOTTOM UP
349
+ *** SWITCH TO TOP DOWN
350
+ *** SWITCH TO BOTTOM UP
351
+ Nr edges in chart: 61
352
+ (S
353
+ (NP I)
354
+ (VP (VP (Verb saw) (NP John)) (PP with (NP (Det a) (Noun dog)))))
355
+ (S
356
+ (NP I)
357
+ (VP (Verb saw) (NP (NP John) (PP with (NP (Det a) (Noun dog))))))
358
+ <BLANKLINE>
359
+
360
+
361
+ Unit tests for the Incremental Chart Parser class
362
+ -------------------------------------------------
363
+
364
+ The incremental chart parsers are defined in earleychart.py.
365
+ We use the demo() function for testing. We must turn off showing of times.
366
+
367
+ >>> import nltk
368
+
369
+ Earley Chart Parser
370
+
371
+ >>> nltk.parse.earleychart.demo(print_times=False, trace=1,
372
+ ... sent='I saw John with a dog', numparses=2)
373
+ * Sentence:
374
+ I saw John with a dog
375
+ ['I', 'saw', 'John', 'with', 'a', 'dog']
376
+ <BLANKLINE>
377
+ |. I . saw . John . with . a . dog .|
378
+ |[------] . . . . .| [0:1] 'I'
379
+ |. [------] . . . .| [1:2] 'saw'
380
+ |. . [------] . . .| [2:3] 'John'
381
+ |. . . [------] . .| [3:4] 'with'
382
+ |. . . . [------] .| [4:5] 'a'
383
+ |. . . . . [------]| [5:6] 'dog'
384
+ |> . . . . . .| [0:0] S -> * NP VP
385
+ |> . . . . . .| [0:0] NP -> * NP PP
386
+ |> . . . . . .| [0:0] NP -> * Det Noun
387
+ |> . . . . . .| [0:0] NP -> * 'I'
388
+ |[------] . . . . .| [0:1] NP -> 'I' *
389
+ |[------> . . . . .| [0:1] S -> NP * VP
390
+ |[------> . . . . .| [0:1] NP -> NP * PP
391
+ |. > . . . . .| [1:1] VP -> * VP PP
392
+ |. > . . . . .| [1:1] VP -> * Verb NP
393
+ |. > . . . . .| [1:1] VP -> * Verb
394
+ |. > . . . . .| [1:1] Verb -> * 'saw'
395
+ |. [------] . . . .| [1:2] Verb -> 'saw' *
396
+ |. [------> . . . .| [1:2] VP -> Verb * NP
397
+ |. [------] . . . .| [1:2] VP -> Verb *
398
+ |[-------------] . . . .| [0:2] S -> NP VP *
399
+ |. [------> . . . .| [1:2] VP -> VP * PP
400
+ |. . > . . . .| [2:2] NP -> * NP PP
401
+ |. . > . . . .| [2:2] NP -> * Det Noun
402
+ |. . > . . . .| [2:2] NP -> * 'John'
403
+ |. . [------] . . .| [2:3] NP -> 'John' *
404
+ |. [-------------] . . .| [1:3] VP -> Verb NP *
405
+ |. . [------> . . .| [2:3] NP -> NP * PP
406
+ |. . . > . . .| [3:3] PP -> * 'with' NP
407
+ |[--------------------] . . .| [0:3] S -> NP VP *
408
+ |. [-------------> . . .| [1:3] VP -> VP * PP
409
+ |. . . [------> . .| [3:4] PP -> 'with' * NP
410
+ |. . . . > . .| [4:4] NP -> * NP PP
411
+ |. . . . > . .| [4:4] NP -> * Det Noun
412
+ |. . . . > . .| [4:4] Det -> * 'a'
413
+ |. . . . [------] .| [4:5] Det -> 'a' *
414
+ |. . . . [------> .| [4:5] NP -> Det * Noun
415
+ |. . . . . > .| [5:5] Noun -> * 'dog'
416
+ |. . . . . [------]| [5:6] Noun -> 'dog' *
417
+ |. . . . [-------------]| [4:6] NP -> Det Noun *
418
+ |. . . [--------------------]| [3:6] PP -> 'with' NP *
419
+ |. . . . [------------->| [4:6] NP -> NP * PP
420
+ |. . [---------------------------]| [2:6] NP -> NP PP *
421
+ |. [----------------------------------]| [1:6] VP -> VP PP *
422
+ |[=========================================]| [0:6] S -> NP VP *
423
+ |. [---------------------------------->| [1:6] VP -> VP * PP
424
+ |. [----------------------------------]| [1:6] VP -> Verb NP *
425
+ |. . [--------------------------->| [2:6] NP -> NP * PP
426
+ |[=========================================]| [0:6] S -> NP VP *
427
+ |. [---------------------------------->| [1:6] VP -> VP * PP
428
+ (S
429
+ (NP I)
430
+ (VP (VP (Verb saw) (NP John)) (PP with (NP (Det a) (Noun dog)))))
431
+ (S
432
+ (NP I)
433
+ (VP (Verb saw) (NP (NP John) (PP with (NP (Det a) (Noun dog))))))
434
+
435
+
436
+ Unit tests for LARGE context-free grammars
437
+ ------------------------------------------
438
+
439
+ Reading the ATIS grammar.
440
+
441
+ >>> grammar = nltk.data.load('grammars/large_grammars/atis.cfg')
442
+ >>> grammar
443
+ <Grammar with 5517 productions>
444
+
445
+ Reading the test sentences.
446
+
447
+ >>> sentences = nltk.data.load('grammars/large_grammars/atis_sentences.txt')
448
+ >>> sentences = nltk.parse.util.extract_test_sentences(sentences)
449
+ >>> len(sentences)
450
+ 98
451
+ >>> testsentence = sentences[22]
452
+ >>> testsentence[0]
453
+ ['show', 'me', 'northwest', 'flights', 'to', 'detroit', '.']
454
+ >>> testsentence[1]
455
+ 17
456
+ >>> sentence = testsentence[0]
457
+
458
+ Now we test all different parsing strategies.
459
+ Note that the number of edges differ between the strategies.
460
+
461
+ Bottom-up parsing.
462
+
463
+ >>> parser = nltk.parse.BottomUpChartParser(grammar)
464
+ >>> chart = parser.chart_parse(sentence)
465
+ >>> print((chart.num_edges()))
466
+ 7661
467
+ >>> print((len(list(chart.parses(grammar.start())))))
468
+ 17
469
+
470
+ Bottom-up Left-corner parsing.
471
+
472
+ >>> parser = nltk.parse.BottomUpLeftCornerChartParser(grammar)
473
+ >>> chart = parser.chart_parse(sentence)
474
+ >>> print((chart.num_edges()))
475
+ 4986
476
+ >>> print((len(list(chart.parses(grammar.start())))))
477
+ 17
478
+
479
+ Left-corner parsing with bottom-up filter.
480
+
481
+ >>> parser = nltk.parse.LeftCornerChartParser(grammar)
482
+ >>> chart = parser.chart_parse(sentence)
483
+ >>> print((chart.num_edges()))
484
+ 1342
485
+ >>> print((len(list(chart.parses(grammar.start())))))
486
+ 17
487
+
488
+ Top-down parsing.
489
+
490
+ >>> parser = nltk.parse.TopDownChartParser(grammar)
491
+ >>> chart = parser.chart_parse(sentence)
492
+ >>> print((chart.num_edges()))
493
+ 28352
494
+ >>> print((len(list(chart.parses(grammar.start())))))
495
+ 17
496
+
497
+ Incremental Bottom-up parsing.
498
+
499
+ >>> parser = nltk.parse.IncrementalBottomUpChartParser(grammar)
500
+ >>> chart = parser.chart_parse(sentence)
501
+ >>> print((chart.num_edges()))
502
+ 7661
503
+ >>> print((len(list(chart.parses(grammar.start())))))
504
+ 17
505
+
506
+ Incremental Bottom-up Left-corner parsing.
507
+
508
+ >>> parser = nltk.parse.IncrementalBottomUpLeftCornerChartParser(grammar)
509
+ >>> chart = parser.chart_parse(sentence)
510
+ >>> print((chart.num_edges()))
511
+ 4986
512
+ >>> print((len(list(chart.parses(grammar.start())))))
513
+ 17
514
+
515
+ Incremental Left-corner parsing with bottom-up filter.
516
+
517
+ >>> parser = nltk.parse.IncrementalLeftCornerChartParser(grammar)
518
+ >>> chart = parser.chart_parse(sentence)
519
+ >>> print((chart.num_edges()))
520
+ 1342
521
+ >>> print((len(list(chart.parses(grammar.start())))))
522
+ 17
523
+
524
+ Incremental Top-down parsing.
525
+
526
+ >>> parser = nltk.parse.IncrementalTopDownChartParser(grammar)
527
+ >>> chart = parser.chart_parse(sentence)
528
+ >>> print((chart.num_edges()))
529
+ 28352
530
+ >>> print((len(list(chart.parses(grammar.start())))))
531
+ 17
532
+
533
+ Earley parsing. This is similar to the incremental top-down algorithm.
534
+
535
+ >>> parser = nltk.parse.EarleyChartParser(grammar)
536
+ >>> chart = parser.chart_parse(sentence)
537
+ >>> print((chart.num_edges()))
538
+ 28352
539
+ >>> print((len(list(chart.parses(grammar.start())))))
540
+ 17
541
+
542
+
543
+ Unit tests for the Probabilistic CFG class
544
+ ------------------------------------------
545
+
546
+ >>> from nltk.corpus import treebank
547
+ >>> from itertools import islice
548
+ >>> from nltk.grammar import PCFG, induce_pcfg
549
+ >>> toy_pcfg1 = PCFG.fromstring("""
550
+ ... S -> NP VP [1.0]
551
+ ... NP -> Det N [0.5] | NP PP [0.25] | 'John' [0.1] | 'I' [0.15]
552
+ ... Det -> 'the' [0.8] | 'my' [0.2]
553
+ ... N -> 'man' [0.5] | 'telescope' [0.5]
554
+ ... VP -> VP PP [0.1] | V NP [0.7] | V [0.2]
555
+ ... V -> 'ate' [0.35] | 'saw' [0.65]
556
+ ... PP -> P NP [1.0]
557
+ ... P -> 'with' [0.61] | 'under' [0.39]
558
+ ... """)
559
+
560
+ >>> toy_pcfg2 = PCFG.fromstring("""
561
+ ... S -> NP VP [1.0]
562
+ ... VP -> V NP [.59]
563
+ ... VP -> V [.40]
564
+ ... VP -> VP PP [.01]
565
+ ... NP -> Det N [.41]
566
+ ... NP -> Name [.28]
567
+ ... NP -> NP PP [.31]
568
+ ... PP -> P NP [1.0]
569
+ ... V -> 'saw' [.21]
570
+ ... V -> 'ate' [.51]
571
+ ... V -> 'ran' [.28]
572
+ ... N -> 'boy' [.11]
573
+ ... N -> 'cookie' [.12]
574
+ ... N -> 'table' [.13]
575
+ ... N -> 'telescope' [.14]
576
+ ... N -> 'hill' [.5]
577
+ ... Name -> 'Jack' [.52]
578
+ ... Name -> 'Bob' [.48]
579
+ ... P -> 'with' [.61]
580
+ ... P -> 'under' [.39]
581
+ ... Det -> 'the' [.41]
582
+ ... Det -> 'a' [.31]
583
+ ... Det -> 'my' [.28]
584
+ ... """)
585
+
586
+ Create a set of PCFG productions.
587
+
588
+ >>> grammar = PCFG.fromstring("""
589
+ ... A -> B B [.3] | C B C [.7]
590
+ ... B -> B D [.5] | C [.5]
591
+ ... C -> 'a' [.1] | 'b' [0.9]
592
+ ... D -> 'b' [1.0]
593
+ ... """)
594
+ >>> prod = grammar.productions()[0]
595
+ >>> prod
596
+ A -> B B [0.3]
597
+
598
+ >>> prod.lhs()
599
+ A
600
+
601
+ >>> prod.rhs()
602
+ (B, B)
603
+
604
+ >>> print((prod.prob()))
605
+ 0.3
606
+
607
+ >>> grammar.start()
608
+ A
609
+
610
+ >>> grammar.productions()
611
+ [A -> B B [0.3], A -> C B C [0.7], B -> B D [0.5], B -> C [0.5], C -> 'a' [0.1], C -> 'b' [0.9], D -> 'b' [1.0]]
612
+
613
+ Induce some productions using parsed Treebank data.
614
+
615
+ >>> productions = []
616
+ >>> for fileid in treebank.fileids()[:2]:
617
+ ... for t in treebank.parsed_sents(fileid):
618
+ ... productions += t.productions()
619
+
620
+ >>> grammar = induce_pcfg(S, productions)
621
+ >>> grammar
622
+ <Grammar with 71 productions>
623
+
624
+ >>> sorted(grammar.productions(lhs=Nonterminal('PP')))[:2]
625
+ [PP -> IN NP [1.0]]
626
+ >>> sorted(grammar.productions(lhs=Nonterminal('NNP')))[:2]
627
+ [NNP -> 'Agnew' [0.0714286], NNP -> 'Consolidated' [0.0714286]]
628
+ >>> sorted(grammar.productions(lhs=Nonterminal('JJ')))[:2]
629
+ [JJ -> 'British' [0.142857], JJ -> 'former' [0.142857]]
630
+ >>> sorted(grammar.productions(lhs=Nonterminal('NP')))[:2]
631
+ [NP -> CD NNS [0.133333], NP -> DT JJ JJ NN [0.0666667]]
632
+
633
+ Unit tests for the Probabilistic Chart Parse classes
634
+ ----------------------------------------------------
635
+
636
+ >>> tokens = "Jack saw Bob with my cookie".split()
637
+ >>> grammar = toy_pcfg2
638
+ >>> print(grammar)
639
+ Grammar with 23 productions (start state = S)
640
+ S -> NP VP [1.0]
641
+ VP -> V NP [0.59]
642
+ VP -> V [0.4]
643
+ VP -> VP PP [0.01]
644
+ NP -> Det N [0.41]
645
+ NP -> Name [0.28]
646
+ NP -> NP PP [0.31]
647
+ PP -> P NP [1.0]
648
+ V -> 'saw' [0.21]
649
+ V -> 'ate' [0.51]
650
+ V -> 'ran' [0.28]
651
+ N -> 'boy' [0.11]
652
+ N -> 'cookie' [0.12]
653
+ N -> 'table' [0.13]
654
+ N -> 'telescope' [0.14]
655
+ N -> 'hill' [0.5]
656
+ Name -> 'Jack' [0.52]
657
+ Name -> 'Bob' [0.48]
658
+ P -> 'with' [0.61]
659
+ P -> 'under' [0.39]
660
+ Det -> 'the' [0.41]
661
+ Det -> 'a' [0.31]
662
+ Det -> 'my' [0.28]
663
+
664
+ Create several parsers using different queuing strategies and show the
665
+ resulting parses.
666
+
667
+ >>> from nltk.parse import pchart
668
+
669
+ >>> parser = pchart.InsideChartParser(grammar)
670
+ >>> for t in parser.parse(tokens):
671
+ ... print(t)
672
+ (S
673
+ (NP (Name Jack))
674
+ (VP
675
+ (V saw)
676
+ (NP
677
+ (NP (Name Bob))
678
+ (PP (P with) (NP (Det my) (N cookie)))))) (p=6.31607e-06)
679
+ (S
680
+ (NP (Name Jack))
681
+ (VP
682
+ (VP (V saw) (NP (Name Bob)))
683
+ (PP (P with) (NP (Det my) (N cookie))))) (p=2.03744e-07)
684
+
685
+ >>> parser = pchart.RandomChartParser(grammar)
686
+ >>> for t in parser.parse(tokens):
687
+ ... print(t)
688
+ (S
689
+ (NP (Name Jack))
690
+ (VP
691
+ (V saw)
692
+ (NP
693
+ (NP (Name Bob))
694
+ (PP (P with) (NP (Det my) (N cookie)))))) (p=6.31607e-06)
695
+ (S
696
+ (NP (Name Jack))
697
+ (VP
698
+ (VP (V saw) (NP (Name Bob)))
699
+ (PP (P with) (NP (Det my) (N cookie))))) (p=2.03744e-07)
700
+
701
+ >>> parser = pchart.UnsortedChartParser(grammar)
702
+ >>> for t in parser.parse(tokens):
703
+ ... print(t)
704
+ (S
705
+ (NP (Name Jack))
706
+ (VP
707
+ (V saw)
708
+ (NP
709
+ (NP (Name Bob))
710
+ (PP (P with) (NP (Det my) (N cookie)))))) (p=6.31607e-06)
711
+ (S
712
+ (NP (Name Jack))
713
+ (VP
714
+ (VP (V saw) (NP (Name Bob)))
715
+ (PP (P with) (NP (Det my) (N cookie))))) (p=2.03744e-07)
716
+
717
+ >>> parser = pchart.LongestChartParser(grammar)
718
+ >>> for t in parser.parse(tokens):
719
+ ... print(t)
720
+ (S
721
+ (NP (Name Jack))
722
+ (VP
723
+ (V saw)
724
+ (NP
725
+ (NP (Name Bob))
726
+ (PP (P with) (NP (Det my) (N cookie)))))) (p=6.31607e-06)
727
+ (S
728
+ (NP (Name Jack))
729
+ (VP
730
+ (VP (V saw) (NP (Name Bob)))
731
+ (PP (P with) (NP (Det my) (N cookie))))) (p=2.03744e-07)
732
+
733
+ >>> parser = pchart.InsideChartParser(grammar, beam_size = len(tokens)+1)
734
+ >>> for t in parser.parse(tokens):
735
+ ... print(t)
736
+
737
+
738
+ Unit tests for the Viterbi Parse classes
739
+ ----------------------------------------
740
+
741
+ >>> from nltk.parse import ViterbiParser
742
+ >>> tokens = "Jack saw Bob with my cookie".split()
743
+ >>> grammar = toy_pcfg2
744
+
745
+ Parse the tokenized sentence.
746
+
747
+ >>> parser = ViterbiParser(grammar)
748
+ >>> for t in parser.parse(tokens):
749
+ ... print(t)
750
+ (S
751
+ (NP (Name Jack))
752
+ (VP
753
+ (V saw)
754
+ (NP
755
+ (NP (Name Bob))
756
+ (PP (P with) (NP (Det my) (N cookie)))))) (p=6.31607e-06)
757
+
758
+
759
+ Unit tests for the FeatStructNonterminal class
760
+ ----------------------------------------------
761
+
762
+ >>> from nltk.grammar import FeatStructNonterminal
763
+ >>> FeatStructNonterminal(
764
+ ... pos='n', agr=FeatStructNonterminal(number='pl', gender='f'))
765
+ [agr=[gender='f', number='pl'], pos='n']
766
+
767
+ >>> FeatStructNonterminal('VP[+fin]/NP[+pl]')
768
+ VP[+fin]/NP[+pl]
769
+
770
+
771
+ Tracing the Feature Chart Parser
772
+ --------------------------------
773
+
774
+ We use the featurechart.demo() function for tracing the Feature Chart Parser.
775
+
776
+ >>> nltk.parse.featurechart.demo(print_times=False,
777
+ ... print_grammar=True,
778
+ ... parser=nltk.parse.featurechart.FeatureChartParser,
779
+ ... sent='I saw John with a dog')
780
+ <BLANKLINE>
781
+ Grammar with 18 productions (start state = S[])
782
+ S[] -> NP[] VP[]
783
+ PP[] -> Prep[] NP[]
784
+ NP[] -> NP[] PP[]
785
+ VP[] -> VP[] PP[]
786
+ VP[] -> Verb[] NP[]
787
+ VP[] -> Verb[]
788
+ NP[] -> Det[pl=?x] Noun[pl=?x]
789
+ NP[] -> 'John'
790
+ NP[] -> 'I'
791
+ Det[] -> 'the'
792
+ Det[] -> 'my'
793
+ Det[-pl] -> 'a'
794
+ Noun[-pl] -> 'dog'
795
+ Noun[-pl] -> 'cookie'
796
+ Verb[] -> 'ate'
797
+ Verb[] -> 'saw'
798
+ Prep[] -> 'with'
799
+ Prep[] -> 'under'
800
+ <BLANKLINE>
801
+ * FeatureChartParser
802
+ Sentence: I saw John with a dog
803
+ |.I.s.J.w.a.d.|
804
+ |[-] . . . . .| [0:1] 'I'
805
+ |. [-] . . . .| [1:2] 'saw'
806
+ |. . [-] . . .| [2:3] 'John'
807
+ |. . . [-] . .| [3:4] 'with'
808
+ |. . . . [-] .| [4:5] 'a'
809
+ |. . . . . [-]| [5:6] 'dog'
810
+ |[-] . . . . .| [0:1] NP[] -> 'I' *
811
+ |[-> . . . . .| [0:1] S[] -> NP[] * VP[] {}
812
+ |[-> . . . . .| [0:1] NP[] -> NP[] * PP[] {}
813
+ |. [-] . . . .| [1:2] Verb[] -> 'saw' *
814
+ |. [-> . . . .| [1:2] VP[] -> Verb[] * NP[] {}
815
+ |. [-] . . . .| [1:2] VP[] -> Verb[] *
816
+ |. [-> . . . .| [1:2] VP[] -> VP[] * PP[] {}
817
+ |[---] . . . .| [0:2] S[] -> NP[] VP[] *
818
+ |. . [-] . . .| [2:3] NP[] -> 'John' *
819
+ |. . [-> . . .| [2:3] S[] -> NP[] * VP[] {}
820
+ |. . [-> . . .| [2:3] NP[] -> NP[] * PP[] {}
821
+ |. [---] . . .| [1:3] VP[] -> Verb[] NP[] *
822
+ |. [---> . . .| [1:3] VP[] -> VP[] * PP[] {}
823
+ |[-----] . . .| [0:3] S[] -> NP[] VP[] *
824
+ |. . . [-] . .| [3:4] Prep[] -> 'with' *
825
+ |. . . [-> . .| [3:4] PP[] -> Prep[] * NP[] {}
826
+ |. . . . [-] .| [4:5] Det[-pl] -> 'a' *
827
+ |. . . . [-> .| [4:5] NP[] -> Det[pl=?x] * Noun[pl=?x] {?x: False}
828
+ |. . . . . [-]| [5:6] Noun[-pl] -> 'dog' *
829
+ |. . . . [---]| [4:6] NP[] -> Det[-pl] Noun[-pl] *
830
+ |. . . . [--->| [4:6] S[] -> NP[] * VP[] {}
831
+ |. . . . [--->| [4:6] NP[] -> NP[] * PP[] {}
832
+ |. . . [-----]| [3:6] PP[] -> Prep[] NP[] *
833
+ |. . [-------]| [2:6] NP[] -> NP[] PP[] *
834
+ |. [---------]| [1:6] VP[] -> VP[] PP[] *
835
+ |. [--------->| [1:6] VP[] -> VP[] * PP[] {}
836
+ |[===========]| [0:6] S[] -> NP[] VP[] *
837
+ |. . [------->| [2:6] S[] -> NP[] * VP[] {}
838
+ |. . [------->| [2:6] NP[] -> NP[] * PP[] {}
839
+ |. [---------]| [1:6] VP[] -> Verb[] NP[] *
840
+ |. [--------->| [1:6] VP[] -> VP[] * PP[] {}
841
+ |[===========]| [0:6] S[] -> NP[] VP[] *
842
+ (S[]
843
+ (NP[] I)
844
+ (VP[]
845
+ (VP[] (Verb[] saw) (NP[] John))
846
+ (PP[] (Prep[] with) (NP[] (Det[-pl] a) (Noun[-pl] dog)))))
847
+ (S[]
848
+ (NP[] I)
849
+ (VP[]
850
+ (Verb[] saw)
851
+ (NP[]
852
+ (NP[] John)
853
+ (PP[] (Prep[] with) (NP[] (Det[-pl] a) (Noun[-pl] dog))))))
854
+
855
+
856
+ Unit tests for the Feature Chart Parser classes
857
+ -----------------------------------------------
858
+
859
+ The list of parsers we want to test.
860
+
861
+ >>> parsers = [nltk.parse.featurechart.FeatureChartParser,
862
+ ... nltk.parse.featurechart.FeatureTopDownChartParser,
863
+ ... nltk.parse.featurechart.FeatureBottomUpChartParser,
864
+ ... nltk.parse.featurechart.FeatureBottomUpLeftCornerChartParser,
865
+ ... nltk.parse.earleychart.FeatureIncrementalChartParser,
866
+ ... nltk.parse.earleychart.FeatureEarleyChartParser,
867
+ ... nltk.parse.earleychart.FeatureIncrementalTopDownChartParser,
868
+ ... nltk.parse.earleychart.FeatureIncrementalBottomUpChartParser,
869
+ ... nltk.parse.earleychart.FeatureIncrementalBottomUpLeftCornerChartParser,
870
+ ... ]
871
+
872
+ A helper function that tests each parser on the given grammar and sentence.
873
+ We check that the number of trees are correct, and that all parsers
874
+ return the same trees. Otherwise an error is printed.
875
+
876
+ >>> def unittest(grammar, sentence, nr_trees):
877
+ ... sentence = sentence.split()
878
+ ... trees = None
879
+ ... for P in parsers:
880
+ ... result = P(grammar).parse(sentence)
881
+ ... result = set(tree.freeze() for tree in result)
882
+ ... if len(result) != nr_trees:
883
+ ... print("Wrong nr of trees:", len(result))
884
+ ... elif trees is None:
885
+ ... trees = result
886
+ ... elif result != trees:
887
+ ... print("Trees differ for parser:", P.__name__)
888
+
889
+ The demo grammar from before, with an ambiguous sentence.
890
+
891
+ >>> isawjohn = nltk.parse.featurechart.demo_grammar()
892
+ >>> unittest(isawjohn, "I saw John with a dog with my cookie", 5)
893
+
894
+ This grammar tests that variables in different grammar rules are renamed
895
+ before unification. (The problematic variable is in this case ?X).
896
+
897
+ >>> whatwasthat = nltk.grammar.FeatureGrammar.fromstring('''
898
+ ... S[] -> NP[num=?N] VP[num=?N, slash=?X]
899
+ ... NP[num=?X] -> "what"
900
+ ... NP[num=?X] -> "that"
901
+ ... VP[num=?P, slash=none] -> V[num=?P] NP[]
902
+ ... V[num=sg] -> "was"
903
+ ... ''')
904
+ >>> unittest(whatwasthat, "what was that", 1)
905
+
906
+ This grammar tests that the same rule can be used in different places
907
+ in another rule, and that the variables are properly renamed.
908
+
909
+ >>> thislovesthat = nltk.grammar.FeatureGrammar.fromstring('''
910
+ ... S[] -> NP[case=nom] V[] NP[case=acc]
911
+ ... NP[case=?X] -> Pron[case=?X]
912
+ ... Pron[] -> "this"
913
+ ... Pron[] -> "that"
914
+ ... V[] -> "loves"
915
+ ... ''')
916
+ >>> unittest(thislovesthat, "this loves that", 1)
917
+
918
+
919
+ Tests for loading feature grammar files
920
+ ---------------------------------------
921
+
922
+ Alternative 1: first load the grammar, then create the parser.
923
+
924
+ >>> fcfg = nltk.data.load('grammars/book_grammars/feat0.fcfg')
925
+ >>> fcp1 = nltk.parse.FeatureChartParser(fcfg)
926
+ >>> print((type(fcp1)))
927
+ <class 'nltk.parse.featurechart.FeatureChartParser'>
928
+
929
+ Alternative 2: directly load the parser.
930
+
931
+ >>> fcp2 = nltk.parse.load_parser('grammars/book_grammars/feat0.fcfg')
932
+ >>> print((type(fcp2)))
933
+ <class 'nltk.parse.featurechart.FeatureChartParser'>
env-llmeval/lib/python3.10/site-packages/nltk/test/portuguese_en.doctest ADDED
@@ -0,0 +1,568 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ ==================================
5
+ Examples for Portuguese Processing
6
+ ==================================
7
+
8
+ This HOWTO contains a variety of examples relating to the Portuguese language.
9
+ It is intended to be read in conjunction with the NLTK book
10
+ (``https://www.nltk.org/book/``). For instructions on running the Python
11
+ interpreter, please see the section *Getting Started with Python*, in Chapter 1.
12
+
13
+ --------------------------------------------
14
+ Python Programming, with Portuguese Examples
15
+ --------------------------------------------
16
+
17
+ Chapter 1 of the NLTK book contains many elementary programming examples, all
18
+ with English texts. In this section, we'll see some corresponding examples
19
+ using Portuguese. Please refer to the chapter for full discussion. *Vamos!*
20
+
21
+ >>> from nltk.test.portuguese_en_fixt import setup_module
22
+ >>> setup_module()
23
+
24
+ >>> from nltk.examples.pt import *
25
+ *** Introductory Examples for the NLTK Book ***
26
+ Loading ptext1, ... and psent1, ...
27
+ Type the name of the text or sentence to view it.
28
+ Type: 'texts()' or 'sents()' to list the materials.
29
+ ptext1: Memórias Póstumas de Brás Cubas (1881)
30
+ ptext2: Dom Casmurro (1899)
31
+ ptext3: Gênesis
32
+ ptext4: Folha de Sao Paulo (1994)
33
+
34
+
35
+ Any time we want to find out about these texts, we just have
36
+ to enter their names at the Python prompt:
37
+
38
+ >>> ptext2
39
+ <Text: Dom Casmurro (1899)>
40
+
41
+ Searching Text
42
+ --------------
43
+
44
+ A concordance permits us to see words in context.
45
+
46
+ >>> ptext1.concordance('olhos')
47
+ Building index...
48
+ Displaying 25 of 138 matches:
49
+ De pé , à cabeceira da cama , com os olhos estúpidos , a boca entreaberta , a t
50
+ orelhas . Pela minha parte fechei os olhos e deixei - me ir à ventura . Já agor
51
+ xões de cérebro enfermo . Como ia de olhos fechados , não via o caminho ; lembr
52
+ gelos eternos . Com efeito , abri os olhos e vi que o meu animal galopava numa
53
+ me apareceu então , fitando - me uns olhos rutilantes como o sol . Tudo nessa f
54
+ mim mesmo . Então , encarei - a com olhos súplices , e pedi mais alguns anos .
55
+ ...
56
+
57
+ For a given word, we can find words with a similar text distribution:
58
+
59
+ >>> ptext1.similar('chegar')
60
+ Building word-context index...
61
+ acabada acudir aludir avistar bramanismo casamento cheguei com contar
62
+ contrário corpo dali deixei desferirem dizer fazer filhos já leitor lhe
63
+ >>> ptext3.similar('chegar')
64
+ Building word-context index...
65
+ achar alumiar arrombar destruir governar guardar ir lavrar passar que
66
+ toda tomar ver vir
67
+
68
+ We can search for the statistically significant collocations in a text:
69
+
70
+ >>> ptext1.collocations()
71
+ Building collocations list
72
+ Quincas Borba; Lobo Neves; alguma coisa; Brás Cubas; meu pai; dia
73
+ seguinte; não sei; Meu pai; alguns instantes; outra vez; outra coisa;
74
+ por exemplo; mim mesmo; coisa nenhuma; mesma coisa; não era; dias
75
+ depois; Passeio Público; olhar para; das coisas
76
+
77
+ We can search for words in context, with the help of *regular expressions*, e.g.:
78
+
79
+ >>> ptext1.findall("<olhos> (<.*>)")
80
+ estúpidos; e; fechados; rutilantes; súplices; a; do; babavam;
81
+ na; moles; se; da; umas; espraiavam; chamejantes; espetados;
82
+ ...
83
+
84
+ We can automatically generate random text based on a given text, e.g.:
85
+
86
+ >>> ptext3.generate() # doctest: +SKIP
87
+ No princípio , criou Deus os abençoou , dizendo : Onde { estão } e até
88
+ à ave dos céus , { que } será . Disse mais Abrão : Dá - me a mulher
89
+ que tomaste ; porque daquele poço Eseque , { tinha .} E disse : Não
90
+ poderemos descer ; mas , do campo ainda não estava na casa do teu
91
+ pescoço . E viveu Serugue , depois Simeão e Levi { são } estes ? E o
92
+ varão , porque habitava na terra de Node , da mão de Esaú : Jeús ,
93
+ Jalão e Corá
94
+
95
+ Texts as List of Words
96
+ ----------------------
97
+
98
+ A few sentences have been defined for you.
99
+
100
+ >>> psent1
101
+ ['o', 'amor', 'da', 'gl\xf3ria', 'era', 'a', 'coisa', 'mais',
102
+ 'verdadeiramente', 'humana', 'que', 'h\xe1', 'no', 'homem', ',',
103
+ 'e', ',', 'conseq\xfcentemente', ',', 'a', 'sua', 'mais',
104
+ 'genu\xedna', 'fei\xe7\xe3o', '.']
105
+ >>>
106
+
107
+ Notice that the sentence has been *tokenized*. Each token is
108
+ represented as a string, represented using quotes, e.g. ``'coisa'``.
109
+ Some strings contain special characters, e.g. ``\xf3``,
110
+ the internal representation for ó.
111
+ The tokens are combined in the form of a *list*. How long is this list?
112
+
113
+ >>> len(psent1)
114
+ 25
115
+ >>>
116
+
117
+ What is the vocabulary of this sentence?
118
+
119
+ >>> sorted(set(psent1))
120
+ [',', '.', 'a', 'amor', 'coisa', 'conseqüentemente', 'da', 'e', 'era',
121
+ 'feição', 'genuína', 'glória', 'homem', 'humana', 'há', 'mais', 'no',
122
+ 'o', 'que', 'sua', 'verdadeiramente']
123
+ >>>
124
+
125
+ Let's iterate over each item in ``psent2``, and print information for each:
126
+
127
+ >>> for w in psent2:
128
+ ... print(w, len(w), w[-1])
129
+ ...
130
+ Não 3 o
131
+ consultes 9 s
132
+ dicionários 11 s
133
+ . 1 .
134
+
135
+ Observe how we make a human-readable version of a string, using ``decode()``.
136
+ Also notice that we accessed the last character of a string ``w`` using ``w[-1]``.
137
+
138
+ We just saw a ``for`` loop above. Another useful control structure is a
139
+ *list comprehension*.
140
+
141
+ >>> [w.upper() for w in psent2]
142
+ ['N\xc3O', 'CONSULTES', 'DICION\xc1RIOS', '.']
143
+ >>> [w for w in psent1 if w.endswith('a')]
144
+ ['da', 'gl\xf3ria', 'era', 'a', 'coisa', 'humana', 'a', 'sua', 'genu\xedna']
145
+ >>> [w for w in ptext4 if len(w) > 15]
146
+ ['norte-irlandeses', 'pan-nacionalismo', 'predominatemente', 'primeiro-ministro',
147
+ 'primeiro-ministro', 'irlandesa-americana', 'responsabilidades', 'significativamente']
148
+
149
+ We can examine the relative frequency of words in a text, using ``FreqDist``:
150
+
151
+ >>> fd1 = FreqDist(ptext1)
152
+ >>> fd1
153
+ <FreqDist with 10848 samples and 77098 outcomes>
154
+ >>> fd1['olhos']
155
+ 137
156
+ >>> fd1.max()
157
+ ','
158
+ >>> fd1.samples()[:100]
159
+ [',', '.', 'a', 'que', 'de', 'e', '-', 'o', ';', 'me', 'um', 'n\xe3o',
160
+ '\x97', 'se', 'do', 'da', 'uma', 'com', 'os', '\xe9', 'era', 'as', 'eu',
161
+ 'lhe', 'ao', 'em', 'para', 'mas', '...', '!', '\xe0', 'na', 'mais', '?',
162
+ 'no', 'como', 'por', 'N\xe3o', 'dos', 'o', 'ele', ':', 'Virg\xedlia',
163
+ 'me', 'disse', 'minha', 'das', 'O', '/', 'A', 'CAP\xcdTULO', 'muito',
164
+ 'depois', 'coisa', 'foi', 'sem', 'olhos', 'ela', 'nos', 'tinha', 'nem',
165
+ 'E', 'outro', 'vida', 'nada', 'tempo', 'menos', 'outra', 'casa', 'homem',
166
+ 'porque', 'quando', 'mim', 'mesmo', 'ser', 'pouco', 'estava', 'dia',
167
+ 't\xe3o', 'tudo', 'Mas', 'at\xe9', 'D', 'ainda', 's\xf3', 'alguma',
168
+ 'la', 'vez', 'anos', 'h\xe1', 'Era', 'pai', 'esse', 'lo', 'dizer', 'assim',
169
+ 'ent\xe3o', 'dizia', 'aos', 'Borba']
170
+
171
+ ---------------
172
+ Reading Corpora
173
+ ---------------
174
+
175
+ Accessing the Machado Text Corpus
176
+ ---------------------------------
177
+
178
+ NLTK includes the complete works of Machado de Assis.
179
+
180
+ >>> from nltk.corpus import machado
181
+ >>> machado.fileids()
182
+ ['contos/macn001.txt', 'contos/macn002.txt', 'contos/macn003.txt', ...]
183
+
184
+ Each file corresponds to one of the works of Machado de Assis. To see a complete
185
+ list of works, you can look at the corpus README file: ``print machado.readme()``.
186
+ Let's access the text of the *Posthumous Memories of Brás Cubas*.
187
+
188
+ We can access the text as a list of characters, and access 200 characters starting
189
+ from position 10,000.
190
+
191
+ >>> raw_text = machado.raw('romance/marm05.txt')
192
+ >>> raw_text[10000:10200]
193
+ u', primou no\nEstado, e foi um dos amigos particulares do vice-rei Conde
194
+ da Cunha.\n\nComo este apelido de Cubas lhe\ncheirasse excessivamente a
195
+ tanoaria, alegava meu pai, bisneto de Dami\xe3o, que o\ndito ape'
196
+
197
+ However, this is not a very useful way to work with a text. We generally think
198
+ of a text as a sequence of words and punctuation, not characters:
199
+
200
+ >>> text1 = machado.words('romance/marm05.txt')
201
+ >>> text1
202
+ ['Romance', ',', 'Mem\xf3rias', 'P\xf3stumas', 'de', ...]
203
+ >>> len(text1)
204
+ 77098
205
+ >>> len(set(text1))
206
+ 10848
207
+
208
+ Here's a program that finds the most common ngrams that contain a
209
+ particular target word.
210
+
211
+ >>> from nltk import ngrams, FreqDist
212
+ >>> target_word = 'olhos'
213
+ >>> fd = FreqDist(ng
214
+ ... for ng in ngrams(text1, 5)
215
+ ... if target_word in ng)
216
+ >>> for hit in fd.samples():
217
+ ... print(' '.join(hit))
218
+ ...
219
+ , com os olhos no
220
+ com os olhos no ar
221
+ com os olhos no chão
222
+ e todos com os olhos
223
+ me estar com os olhos
224
+ os olhos estúpidos , a
225
+ os olhos na costura ,
226
+ os olhos no ar ,
227
+ , com os olhos espetados
228
+ , com os olhos estúpidos
229
+ , com os olhos fitos
230
+ , com os olhos naquele
231
+ , com os olhos para
232
+
233
+
234
+ Accessing the MacMorpho Tagged Corpus
235
+ -------------------------------------
236
+
237
+ NLTK includes the MAC-MORPHO Brazilian Portuguese POS-tagged news text,
238
+ with over a million words of
239
+ journalistic texts extracted from ten sections of
240
+ the daily newspaper *Folha de Sao Paulo*, 1994.
241
+
242
+ We can access this corpus as a sequence of words or tagged words as follows:
243
+
244
+ >>> import nltk.corpus
245
+ >>> nltk.corpus.mac_morpho.words()
246
+ ['Jersei', 'atinge', 'm\xe9dia', 'de', 'Cr$', '1,4', ...]
247
+ >>> nltk.corpus.mac_morpho.sents()
248
+ [['Jersei', 'atinge', 'm\xe9dia', 'de', 'Cr$', '1,4', 'milh\xe3o',
249
+ 'em', 'a', 'venda', 'de', 'a', 'Pinhal', 'em', 'S\xe3o', 'Paulo'],
250
+ ['Programe', 'sua', 'viagem', 'a', 'a', 'Exposi\xe7\xe3o', 'Nacional',
251
+ 'do', 'Zeb', ',', 'que', 'come\xe7a', 'dia', '25'], ...]
252
+ >>> nltk.corpus.mac_morpho.tagged_words()
253
+ [('Jersei', 'N'), ('atinge', 'V'), ('m\xe9dia', 'N'), ...]
254
+
255
+ We can also access it in sentence chunks.
256
+
257
+ >>> nltk.corpus.mac_morpho.tagged_sents()
258
+ [[('Jersei', 'N'), ('atinge', 'V'), ('m\xe9dia', 'N'), ('de', 'PREP'),
259
+ ('Cr$', 'CUR'), ('1,4', 'NUM'), ('milh\xe3o', 'N'), ('em', 'PREP|+'),
260
+ ('a', 'ART'), ('venda', 'N'), ('de', 'PREP|+'), ('a', 'ART'),
261
+ ('Pinhal', 'NPROP'), ('em', 'PREP'), ('S\xe3o', 'NPROP'),
262
+ ('Paulo', 'NPROP')],
263
+ [('Programe', 'V'), ('sua', 'PROADJ'), ('viagem', 'N'), ('a', 'PREP|+'),
264
+ ('a', 'ART'), ('Exposi\xe7\xe3o', 'NPROP'), ('Nacional', 'NPROP'),
265
+ ('do', 'NPROP'), ('Zeb', 'NPROP'), (',', ','), ('que', 'PRO-KS-REL'),
266
+ ('come\xe7a', 'V'), ('dia', 'N'), ('25', 'N|AP')], ...]
267
+
268
+ This data can be used to train taggers (examples below for the Floresta treebank).
269
+
270
+ Accessing the Floresta Portuguese Treebank
271
+ ------------------------------------------
272
+
273
+ The NLTK data distribution includes the
274
+ "Floresta Sinta(c)tica Corpus" version 7.4, available from
275
+ ``https://www.linguateca.pt/Floresta/``.
276
+
277
+ We can access this corpus as a sequence of words or tagged words as follows:
278
+
279
+ >>> from nltk.corpus import floresta
280
+ >>> floresta.words()
281
+ ['Um', 'revivalismo', 'refrescante', 'O', '7_e_Meio', ...]
282
+ >>> floresta.tagged_words()
283
+ [('Um', '>N+art'), ('revivalismo', 'H+n'), ...]
284
+
285
+ The tags consist of some syntactic information, followed by a plus sign,
286
+ followed by a conventional part-of-speech tag. Let's strip off the material before
287
+ the plus sign:
288
+
289
+ >>> def simplify_tag(t):
290
+ ... if "+" in t:
291
+ ... return t[t.index("+")+1:]
292
+ ... else:
293
+ ... return t
294
+ >>> twords = floresta.tagged_words()
295
+ >>> twords = [(w.lower(), simplify_tag(t)) for (w,t) in twords]
296
+ >>> twords[:10]
297
+ [('um', 'art'), ('revivalismo', 'n'), ('refrescante', 'adj'), ('o', 'art'), ('7_e_meio', 'prop'),
298
+ ('\xe9', 'v-fin'), ('um', 'art'), ('ex-libris', 'n'), ('de', 'prp'), ('a', 'art')]
299
+
300
+ Pretty printing the tagged words:
301
+
302
+ >>> print(' '.join(word + '/' + tag for (word, tag) in twords[:10]))
303
+ um/art revivalismo/n refrescante/adj o/art 7_e_meio/prop é/v-fin um/art ex-libris/n de/prp a/art
304
+
305
+ Count the word tokens and types, and determine the most common word:
306
+
307
+ >>> words = floresta.words()
308
+ >>> len(words)
309
+ 211852
310
+ >>> fd = nltk.FreqDist(words)
311
+ >>> len(fd)
312
+ 29421
313
+ >>> fd.max()
314
+ 'de'
315
+
316
+ List the 20 most frequent tags, in order of decreasing frequency:
317
+
318
+ >>> tags = [simplify_tag(tag) for (word,tag) in floresta.tagged_words()]
319
+ >>> fd = nltk.FreqDist(tags)
320
+ >>> fd.keys()[:20]
321
+ ['n', 'prp', 'art', 'v-fin', ',', 'prop', 'adj', 'adv', '.',
322
+ 'conj-c', 'v-inf', 'pron-det', 'v-pcp', 'num', 'pron-indp',
323
+ 'pron-pers', '\xab', '\xbb', 'conj-s', '}']
324
+
325
+ We can also access the corpus grouped by sentence:
326
+
327
+ >>> floresta.sents()
328
+ [['Um', 'revivalismo', 'refrescante'],
329
+ ['O', '7_e_Meio', '\xe9', 'um', 'ex-libris', 'de', 'a', 'noite',
330
+ 'algarvia', '.'], ...]
331
+ >>> floresta.tagged_sents()
332
+ [[('Um', '>N+art'), ('revivalismo', 'H+n'), ('refrescante', 'N<+adj')],
333
+ [('O', '>N+art'), ('7_e_Meio', 'H+prop'), ('\xe9', 'P+v-fin'),
334
+ ('um', '>N+art'), ('ex-libris', 'H+n'), ('de', 'H+prp'),
335
+ ('a', '>N+art'), ('noite', 'H+n'), ('algarvia', 'N<+adj'), ('.', '.')],
336
+ ...]
337
+ >>> floresta.parsed_sents()
338
+ [Tree('UTT+np', [Tree('>N+art', ['Um']), Tree('H+n', ['revivalismo']),
339
+ Tree('N<+adj', ['refrescante'])]),
340
+ Tree('STA+fcl',
341
+ [Tree('SUBJ+np', [Tree('>N+art', ['O']),
342
+ Tree('H+prop', ['7_e_Meio'])]),
343
+ Tree('P+v-fin', ['\xe9']),
344
+ Tree('SC+np',
345
+ [Tree('>N+art', ['um']),
346
+ Tree('H+n', ['ex-libris']),
347
+ Tree('N<+pp', [Tree('H+prp', ['de']),
348
+ Tree('P<+np', [Tree('>N+art', ['a']),
349
+ Tree('H+n', ['noite']),
350
+ Tree('N<+adj', ['algarvia'])])])]),
351
+ Tree('.', ['.'])]), ...]
352
+
353
+ To view a parse tree, use the ``draw()`` method, e.g.:
354
+
355
+ >>> psents = floresta.parsed_sents()
356
+ >>> psents[5].draw() # doctest: +SKIP
357
+
358
+ Character Encodings
359
+ -------------------
360
+
361
+ Python understands the common character encoding used for Portuguese, ISO 8859-1 (ISO Latin 1).
362
+
363
+ >>> import os, nltk.test
364
+ >>> testdir = os.path.split(nltk.test.__file__)[0]
365
+ >>> text = open(os.path.join(testdir, 'floresta.txt'), 'rb').read().decode('ISO 8859-1')
366
+ >>> text[:60]
367
+ 'O 7 e Meio \xe9 um ex-libris da noite algarvia.\n\xc9 uma das mais '
368
+ >>> print(text[:60])
369
+ O 7 e Meio é um ex-libris da noite algarvia.
370
+ É uma das mais
371
+
372
+ For more information about character encodings and Python, please see section 3.3 of the book.
373
+
374
+ ----------------
375
+ Processing Tasks
376
+ ----------------
377
+
378
+
379
+ Simple Concordancing
380
+ --------------------
381
+
382
+ Here's a function that takes a word and a specified amount of context (measured
383
+ in characters), and generates a concordance for that word.
384
+
385
+ >>> def concordance(word, context=30):
386
+ ... for sent in floresta.sents():
387
+ ... if word in sent:
388
+ ... pos = sent.index(word)
389
+ ... left = ' '.join(sent[:pos])
390
+ ... right = ' '.join(sent[pos+1:])
391
+ ... print('%*s %s %-*s' %
392
+ ... (context, left[-context:], word, context, right[:context]))
393
+
394
+ >>> concordance("dar") # doctest: +SKIP
395
+ anduru , foi o suficiente para dar a volta a o resultado .
396
+ 1. O P?BLICO veio dar a a imprensa di?ria portuguesa
397
+ A fartura de pensamento pode dar maus resultados e n?s n?o quer
398
+ Come?a a dar resultados a pol?tica de a Uni
399
+ ial come?ar a incorporar- lo e dar forma a um ' site ' que tem se
400
+ r com Constantino para ele lhe dar tamb?m os pap?is assinados .
401
+ va a brincar , pois n?o lhe ia dar procura??o nenhuma enquanto n?
402
+ ?rica como o ant?doto capaz de dar sentido a o seu enorme poder .
403
+ . . .
404
+ >>> concordance("vender") # doctest: +SKIP
405
+ er recebido uma encomenda para vender 4000 blindados a o Iraque .
406
+ m?rico_Amorim caso conseguisse vender o lote de ac??es de o empres?r
407
+ mpre ter jovens simp?ticos a ? vender ? chega ! }
408
+ Disse que o governo vai vender ? desde autom?vel at? particip
409
+ ndiciou ontem duas pessoas por vender carro com ?gio .
410
+ A inten??o de Fleury ? vender as a??es para equilibrar as fi
411
+
412
+ Part-of-Speech Tagging
413
+ ----------------------
414
+
415
+ Let's begin by getting the tagged sentence data, and simplifying the tags
416
+ as described earlier.
417
+
418
+ >>> from nltk.corpus import floresta
419
+ >>> tsents = floresta.tagged_sents()
420
+ >>> tsents = [[(w.lower(),simplify_tag(t)) for (w,t) in sent] for sent in tsents if sent]
421
+ >>> train = tsents[100:]
422
+ >>> test = tsents[:100]
423
+
424
+ We already know that ``n`` is the most common tag, so we can set up a
425
+ default tagger that tags every word as a noun, and see how well it does:
426
+
427
+ >>> tagger0 = nltk.DefaultTagger('n')
428
+ >>> nltk.tag.accuracy(tagger0, test)
429
+ 0.17697228144989338
430
+
431
+ Evidently, about one in every six words is a noun. Let's improve on this by
432
+ training a unigram tagger:
433
+
434
+ >>> tagger1 = nltk.UnigramTagger(train, backoff=tagger0)
435
+ >>> nltk.tag.accuracy(tagger1, test)
436
+ 0.87029140014214645
437
+
438
+ Next a bigram tagger:
439
+
440
+ >>> tagger2 = nltk.BigramTagger(train, backoff=tagger1)
441
+ >>> nltk.tag.accuracy(tagger2, test)
442
+ 0.89019189765458417
443
+
444
+
445
+ Sentence Segmentation
446
+ ---------------------
447
+
448
+ Punkt is a language-neutral sentence segmentation tool. We
449
+
450
+ >>> sent_tokenizer=nltk.data.load('tokenizers/punkt/portuguese.pickle')
451
+ >>> raw_text = machado.raw('romance/marm05.txt')
452
+ >>> sentences = sent_tokenizer.tokenize(raw_text)
453
+ >>> for sent in sentences[1000:1005]:
454
+ ... print("<<", sent, ">>")
455
+ ...
456
+ << Em verdade, parecia ainda mais mulher do que era;
457
+ seria criança nos seus folgares de moça; mas assim quieta, impassível, tinha a
458
+ compostura da mulher casada. >>
459
+ << Talvez essa circunstância lhe diminuía um pouco da
460
+ graça virginal. >>
461
+ << Depressa nos familiarizamos; a mãe fazia-lhe grandes elogios, eu
462
+ escutava-os de boa sombra, e ela sorria com os olhos fúlgidos, como se lá dentro
463
+ do cérebro lhe estivesse a voar uma borboletinha de asas de ouro e olhos de
464
+ diamante... >>
465
+ << Digo lá dentro, porque cá fora o
466
+ que esvoaçou foi uma borboleta preta, que subitamente penetrou na varanda, e
467
+ começou a bater as asas em derredor de D. Eusébia. >>
468
+ << D. Eusébia deu um grito,
469
+ levantou-se, praguejou umas palavras soltas: - T'esconjuro!... >>
470
+
471
+ The sentence tokenizer can be trained and evaluated on other text.
472
+ The source text (from the Floresta Portuguese Treebank) contains one sentence per line.
473
+ We read the text, split it into its lines, and then join these lines together using
474
+ spaces. Now the information about sentence breaks has been discarded. We split this
475
+ material into training and testing data:
476
+
477
+ >>> import os, nltk.test
478
+ >>> testdir = os.path.split(nltk.test.__file__)[0]
479
+ >>> text = open(os.path.join(testdir, 'floresta.txt'), 'rb').read().decode('ISO-8859-1')
480
+ >>> lines = text.split('\n')
481
+ >>> train = ' '.join(lines[10:])
482
+ >>> test = ' '.join(lines[:10])
483
+
484
+ Now we train the sentence segmenter (or sentence tokenizer) and use it on our test sentences:
485
+
486
+ >>> stok = nltk.PunktSentenceTokenizer(train)
487
+ >>> print(stok.tokenize(test))
488
+ ['O 7 e Meio \xe9 um ex-libris da noite algarvia.',
489
+ '\xc9 uma das mais antigas discotecas do Algarve, situada em Albufeira,
490
+ que continua a manter os tra\xe7os decorativos e as clientelas de sempre.',
491
+ '\xc9 um pouco a vers\xe3o de uma esp\xe9cie de \xaboutro lado\xbb da noite,
492
+ a meio caminho entre os devaneios de uma fauna perif\xe9rica, seja de Lisboa,
493
+ Londres, Dublin ou Faro e Portim\xe3o, e a postura circunspecta dos fi\xe9is da casa,
494
+ que dela esperam a m\xfasica \xabgeracionista\xbb dos 60 ou dos 70.',
495
+ 'N\xe3o deixa de ser, nos tempos que correm, um certo \xabvery typical\xbb algarvio,
496
+ cabe\xe7a de cartaz para os que querem fugir a algumas movimenta\xe7\xf5es nocturnas
497
+ j\xe1 a caminho da ritualiza\xe7\xe3o de massas, do g\xe9nero \xabvamos todos ao
498
+ Calypso e encontramo-nos na Locomia\xbb.',
499
+ 'E assim, aos 2,5 milh\xf5es que o Minist\xe9rio do Planeamento e Administra\xe7\xe3o
500
+ do Territ\xf3rio j\xe1 gasta no pagamento do pessoal afecto a estes organismos,
501
+ v\xeam juntar-se os montantes das obras propriamente ditas, que os munic\xedpios,
502
+ j\xe1 com projectos na m\xe3o, v\xeam reivindicar junto do Executivo, como salienta
503
+ aquele membro do Governo.',
504
+ 'E o dinheiro \xabn\xe3o falta s\xf3 \xe0s c\xe2maras\xbb, lembra o secret\xe1rio de Estado,
505
+ que considera que a solu\xe7\xe3o para as autarquias \xe9 \xabespecializarem-se em
506
+ fundos comunit\xe1rios\xbb.',
507
+ 'Mas como, se muitas n\xe3o disp\xf5em, nos seus quadros, dos t\xe9cnicos necess\xe1rios?',
508
+ '\xabEncomendem-nos a projectistas de fora\xbb porque, se as obras vierem a ser financiadas,
509
+ eles at\xe9 saem de gra\xe7a, j\xe1 que, nesse caso, \xabos fundos comunit\xe1rios pagam
510
+ os projectos, o mesmo n\xe3o acontecendo quando eles s\xe3o feitos pelos GAT\xbb,
511
+ dado serem organismos do Estado.',
512
+ 'Essa poder\xe1 vir a ser uma hip\xf3tese, at\xe9 porque, no terreno, a capacidade dos GAT
513
+ est\xe1 cada vez mais enfraquecida.',
514
+ 'Alguns at\xe9 j\xe1 desapareceram, como o de Castro Verde, e outros t\xeam vindo a perder quadros.']
515
+
516
+ NLTK's data collection includes a trained model for Portuguese sentence
517
+ segmentation, which can be loaded as follows. It is faster to load a trained model than
518
+ to retrain it.
519
+
520
+ >>> stok = nltk.data.load('tokenizers/punkt/portuguese.pickle')
521
+
522
+ Stemming
523
+ --------
524
+
525
+ NLTK includes the RSLP Portuguese stemmer. Here we use it to stem some Portuguese text:
526
+
527
+ >>> stemmer = nltk.stem.RSLPStemmer()
528
+ >>> stemmer.stem("copiar")
529
+ 'copi'
530
+ >>> stemmer.stem("paisagem")
531
+ 'pais'
532
+
533
+
534
+ Stopwords
535
+ ---------
536
+
537
+ NLTK includes Portuguese stopwords:
538
+
539
+ >>> stopwords = nltk.corpus.stopwords.words('portuguese')
540
+ >>> stopwords[:10]
541
+ ['a', 'ao', 'aos', 'aquela', 'aquelas', 'aquele', 'aqueles', 'aquilo', 'as', 'at\xe9']
542
+
543
+ Now we can use these to filter text. Let's find the most frequent words (other than stopwords)
544
+ and print them in descending order of frequency:
545
+
546
+ >>> fd = nltk.FreqDist(w.lower() for w in floresta.words() if w not in stopwords)
547
+ >>> for word in list(fd.keys())[:20]:
548
+ ... print(word, fd[word])
549
+ , 13444
550
+ . 7725
551
+ « 2369
552
+ » 2310
553
+ é 1305
554
+ o 1086
555
+ } 1047
556
+ { 1044
557
+ a 897
558
+ ; 633
559
+ em 516
560
+ ser 466
561
+ sobre 349
562
+ os 313
563
+ anos 301
564
+ ontem 292
565
+ ainda 279
566
+ segundo 256
567
+ ter 249
568
+ dois 231
env-llmeval/lib/python3.10/site-packages/nltk/test/semantics.doctest ADDED
@@ -0,0 +1,667 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ =========
5
+ Semantics
6
+ =========
7
+
8
+ >>> # Setup tests by setting the counter to 0
9
+ >>> from nltk.sem import logic
10
+ >>> logic._counter._value = 0
11
+
12
+ >>> import nltk
13
+ >>> from nltk.sem import Valuation, Model
14
+ >>> v = [('adam', 'b1'), ('betty', 'g1'), ('fido', 'd1'),
15
+ ... ('girl', set(['g1', 'g2'])), ('boy', set(['b1', 'b2'])),
16
+ ... ('dog', set(['d1'])),
17
+ ... ('love', set([('b1', 'g1'), ('b2', 'g2'), ('g1', 'b1'), ('g2', 'b1')]))]
18
+ >>> val = Valuation(v)
19
+ >>> dom = val.domain
20
+ >>> m = Model(dom, val)
21
+
22
+ Evaluation
23
+ ----------
24
+
25
+ The top-level method of a ``Model`` instance is ``evaluate()``, which
26
+ assigns a semantic value to expressions of the ``logic`` module, under
27
+ an assignment ``g``:
28
+
29
+ >>> dom = val.domain
30
+ >>> g = nltk.sem.Assignment(dom)
31
+ >>> m.evaluate('all x.(boy(x) -> - girl(x))', g)
32
+ True
33
+
34
+
35
+ ``evaluate()`` calls a recursive function ``satisfy()``, which in turn
36
+ calls a function ``i()`` to interpret non-logical constants and
37
+ individual variables. ``i()`` delegates the interpretation of these to
38
+ the the model's ``Valuation`` and the variable assignment ``g``
39
+ respectively. Any atomic expression which cannot be assigned a value
40
+ by ``i`` raises an ``Undefined`` exception; this is caught by
41
+ ``evaluate``, which returns the string ``'Undefined'``.
42
+
43
+ >>> m.evaluate('walk(adam)', g, trace=2)
44
+ <BLANKLINE>
45
+ 'walk(adam)' is undefined under M, g
46
+ 'Undefined'
47
+
48
+ Batch Processing
49
+ ----------------
50
+
51
+ The utility functions ``interpret_sents()`` and ``evaluate_sents()`` are intended to
52
+ help with processing multiple sentences. Here's an example of the first of these:
53
+
54
+ >>> sents = ['Mary walks']
55
+ >>> results = nltk.sem.util.interpret_sents(sents, 'grammars/sample_grammars/sem2.fcfg')
56
+ >>> for result in results:
57
+ ... for (synrep, semrep) in result:
58
+ ... print(synrep)
59
+ (S[SEM=<walk(mary)>]
60
+ (NP[-LOC, NUM='sg', SEM=<\P.P(mary)>]
61
+ (PropN[-LOC, NUM='sg', SEM=<\P.P(mary)>] Mary))
62
+ (VP[NUM='sg', SEM=<\x.walk(x)>]
63
+ (IV[NUM='sg', SEM=<\x.walk(x)>, TNS='pres'] walks)))
64
+
65
+ In order to provide backwards compatibility with 'legacy' grammars where the semantics value
66
+ is specified with a lowercase
67
+ ``sem`` feature, the relevant feature name can be passed to the function using the
68
+ ``semkey`` parameter, as shown here:
69
+
70
+ >>> sents = ['raining']
71
+ >>> g = nltk.grammar.FeatureGrammar.fromstring("""
72
+ ... % start S
73
+ ... S[sem=<raining>] -> 'raining'
74
+ ... """)
75
+ >>> results = nltk.sem.util.interpret_sents(sents, g, semkey='sem')
76
+ >>> for result in results:
77
+ ... for (synrep, semrep) in result:
78
+ ... print(semrep)
79
+ raining
80
+
81
+ The function ``evaluate_sents()`` works in a similar manner, but also needs to be
82
+ passed a ``Model`` against which the semantic representations are evaluated.
83
+
84
+ Unit Tests
85
+ ==========
86
+
87
+
88
+ Unit tests for relations and valuations
89
+ ---------------------------------------
90
+
91
+ >>> from nltk.sem import *
92
+
93
+ Relations are sets of tuples, all of the same length.
94
+
95
+ >>> s1 = set([('d1', 'd2'), ('d1', 'd1'), ('d2', 'd1')])
96
+ >>> is_rel(s1)
97
+ True
98
+ >>> s2 = set([('d1', 'd2'), ('d1', 'd2'), ('d1',)])
99
+ >>> is_rel(s2)
100
+ Traceback (most recent call last):
101
+ . . .
102
+ ValueError: Set set([('d1', 'd2'), ('d1',)]) contains sequences of different lengths
103
+ >>> s3 = set(['d1', 'd2'])
104
+ >>> is_rel(s3)
105
+ Traceback (most recent call last):
106
+ . . .
107
+ ValueError: Set set(['d2', 'd1']) contains sequences of different lengths
108
+ >>> s4 = set2rel(s3)
109
+ >>> is_rel(s4)
110
+ True
111
+ >>> is_rel(set())
112
+ True
113
+ >>> null_binary_rel = set([(None, None)])
114
+ >>> is_rel(null_binary_rel)
115
+ True
116
+
117
+ Sets of entities are converted into sets of singleton tuples
118
+ (containing strings).
119
+
120
+ >>> sorted(set2rel(s3))
121
+ [('d1',), ('d2',)]
122
+ >>> sorted(set2rel(set([1,3,5,])))
123
+ ['1', '3', '5']
124
+ >>> set2rel(set()) == set()
125
+ True
126
+ >>> set2rel(set2rel(s3)) == set2rel(s3)
127
+ True
128
+
129
+ Predication is evaluated by set membership.
130
+
131
+ >>> ('d1', 'd2') in s1
132
+ True
133
+ >>> ('d2', 'd2') in s1
134
+ False
135
+ >>> ('d1',) in s1
136
+ False
137
+ >>> 'd2' in s1
138
+ False
139
+ >>> ('d1',) in s4
140
+ True
141
+ >>> ('d1',) in set()
142
+ False
143
+ >>> 'd1' in null_binary_rel
144
+ False
145
+
146
+
147
+ >>> val = Valuation([('Fido', 'd1'), ('dog', set(['d1', 'd2'])), ('walk', set())])
148
+ >>> sorted(val['dog'])
149
+ [('d1',), ('d2',)]
150
+ >>> val.domain == set(['d1', 'd2'])
151
+ True
152
+ >>> print(val.symbols)
153
+ ['Fido', 'dog', 'walk']
154
+
155
+
156
+ Parse a valuation from a string.
157
+
158
+ >>> v = """
159
+ ... john => b1
160
+ ... mary => g1
161
+ ... suzie => g2
162
+ ... fido => d1
163
+ ... tess => d2
164
+ ... noosa => n
165
+ ... girl => {g1, g2}
166
+ ... boy => {b1, b2}
167
+ ... dog => {d1, d2}
168
+ ... bark => {d1, d2}
169
+ ... walk => {b1, g2, d1}
170
+ ... chase => {(b1, g1), (b2, g1), (g1, d1), (g2, d2)}
171
+ ... see => {(b1, g1), (b2, d2), (g1, b1),(d2, b1), (g2, n)}
172
+ ... in => {(b1, n), (b2, n), (d2, n)}
173
+ ... with => {(b1, g1), (g1, b1), (d1, b1), (b1, d1)}
174
+ ... """
175
+ >>> val = Valuation.fromstring(v)
176
+
177
+ >>> print(val) # doctest: +SKIP
178
+ {'bark': set([('d1',), ('d2',)]),
179
+ 'boy': set([('b1',), ('b2',)]),
180
+ 'chase': set([('b1', 'g1'), ('g2', 'd2'), ('g1', 'd1'), ('b2', 'g1')]),
181
+ 'dog': set([('d1',), ('d2',)]),
182
+ 'fido': 'd1',
183
+ 'girl': set([('g2',), ('g1',)]),
184
+ 'in': set([('d2', 'n'), ('b1', 'n'), ('b2', 'n')]),
185
+ 'john': 'b1',
186
+ 'mary': 'g1',
187
+ 'noosa': 'n',
188
+ 'see': set([('b1', 'g1'), ('b2', 'd2'), ('d2', 'b1'), ('g2', 'n'), ('g1', 'b1')]),
189
+ 'suzie': 'g2',
190
+ 'tess': 'd2',
191
+ 'walk': set([('d1',), ('b1',), ('g2',)]),
192
+ 'with': set([('b1', 'g1'), ('d1', 'b1'), ('b1', 'd1'), ('g1', 'b1')])}
193
+
194
+
195
+ Unit tests for function argument application in a Model
196
+ -------------------------------------------------------
197
+
198
+ >>> v = [('adam', 'b1'), ('betty', 'g1'), ('fido', 'd1'),\
199
+ ... ('girl', set(['g1', 'g2'])), ('boy', set(['b1', 'b2'])), ('dog', set(['d1'])),
200
+ ... ('love', set([('b1', 'g1'), ('b2', 'g2'), ('g1', 'b1'), ('g2', 'b1')])),
201
+ ... ('kiss', null_binary_rel)]
202
+ >>> val = Valuation(v)
203
+ >>> dom = val.domain
204
+ >>> m = Model(dom, val)
205
+ >>> g = Assignment(dom)
206
+ >>> sorted(val['boy'])
207
+ [('b1',), ('b2',)]
208
+ >>> ('b1',) in val['boy']
209
+ True
210
+ >>> ('g1',) in val['boy']
211
+ False
212
+ >>> ('foo',) in val['boy']
213
+ False
214
+ >>> ('b1', 'g1') in val['love']
215
+ True
216
+ >>> ('b1', 'b1') in val['kiss']
217
+ False
218
+ >>> sorted(val.domain)
219
+ ['b1', 'b2', 'd1', 'g1', 'g2']
220
+
221
+
222
+ Model Tests
223
+ ===========
224
+
225
+ Extension of Lambda expressions
226
+
227
+ >>> v0 = [('adam', 'b1'), ('betty', 'g1'), ('fido', 'd1'),\
228
+ ... ('girl', set(['g1', 'g2'])), ('boy', set(['b1', 'b2'])),
229
+ ... ('dog', set(['d1'])),
230
+ ... ('love', set([('b1', 'g1'), ('b2', 'g2'), ('g1', 'b1'), ('g2', 'b1')]))]
231
+
232
+ >>> val0 = Valuation(v0)
233
+ >>> dom0 = val0.domain
234
+ >>> m0 = Model(dom0, val0)
235
+ >>> g0 = Assignment(dom0)
236
+
237
+ >>> print(m0.evaluate(r'\x. \y. love(x, y)', g0) == {'g2': {'g2': False, 'b2': False, 'b1': True, 'g1': False, 'd1': False}, 'b2': {'g2': True, 'b2': False, 'b1': False, 'g1': False, 'd1': False}, 'b1': {'g2': False, 'b2': False, 'b1': False, 'g1': True, 'd1': False}, 'g1': {'g2': False, 'b2': False, 'b1': True, 'g1': False, 'd1': False}, 'd1': {'g2': False, 'b2': False, 'b1': False, 'g1': False, 'd1': False}})
238
+ True
239
+ >>> print(m0.evaluate(r'\x. dog(x) (adam)', g0))
240
+ False
241
+ >>> print(m0.evaluate(r'\x. (dog(x) | boy(x)) (adam)', g0))
242
+ True
243
+ >>> print(m0.evaluate(r'\x. \y. love(x, y)(fido)', g0) == {'g2': False, 'b2': False, 'b1': False, 'g1': False, 'd1': False})
244
+ True
245
+ >>> print(m0.evaluate(r'\x. \y. love(x, y)(adam)', g0) == {'g2': False, 'b2': False, 'b1': False, 'g1': True, 'd1': False})
246
+ True
247
+ >>> print(m0.evaluate(r'\x. \y. love(x, y)(betty)', g0) == {'g2': False, 'b2': False, 'b1': True, 'g1': False, 'd1': False})
248
+ True
249
+ >>> print(m0.evaluate(r'\x. \y. love(x, y)(betty)(adam)', g0))
250
+ True
251
+ >>> print(m0.evaluate(r'\x. \y. love(x, y)(betty, adam)', g0))
252
+ True
253
+ >>> print(m0.evaluate(r'\y. \x. love(x, y)(fido)(adam)', g0))
254
+ False
255
+ >>> print(m0.evaluate(r'\y. \x. love(x, y)(betty, adam)', g0))
256
+ True
257
+ >>> print(m0.evaluate(r'\x. exists y. love(x, y)', g0) == {'g2': True, 'b2': True, 'b1': True, 'g1': True, 'd1': False})
258
+ True
259
+ >>> print(m0.evaluate(r'\z. adam', g0) == {'g2': 'b1', 'b2': 'b1', 'b1': 'b1', 'g1': 'b1', 'd1': 'b1'})
260
+ True
261
+ >>> print(m0.evaluate(r'\z. love(x, y)', g0) == {'g2': False, 'b2': False, 'b1': False, 'g1': False, 'd1': False})
262
+ True
263
+
264
+
265
+ Propositional Model Test
266
+ ------------------------
267
+
268
+ >>> tests = [
269
+ ... ('P & Q', True),
270
+ ... ('P & R', False),
271
+ ... ('- P', False),
272
+ ... ('- R', True),
273
+ ... ('- - P', True),
274
+ ... ('- (P & R)', True),
275
+ ... ('P | R', True),
276
+ ... ('R | P', True),
277
+ ... ('R | R', False),
278
+ ... ('- P | R', False),
279
+ ... ('P | - P', True),
280
+ ... ('P -> Q', True),
281
+ ... ('P -> R', False),
282
+ ... ('R -> P', True),
283
+ ... ('P <-> P', True),
284
+ ... ('R <-> R', True),
285
+ ... ('P <-> R', False),
286
+ ... ]
287
+ >>> val1 = Valuation([('P', True), ('Q', True), ('R', False)])
288
+ >>> dom = set([])
289
+ >>> m = Model(dom, val1)
290
+ >>> g = Assignment(dom)
291
+ >>> for (sent, testvalue) in tests:
292
+ ... semvalue = m.evaluate(sent, g)
293
+ ... if semvalue == testvalue:
294
+ ... print('*', end=' ')
295
+ * * * * * * * * * * * * * * * * *
296
+
297
+
298
+ Test of i Function
299
+ ------------------
300
+
301
+ >>> from nltk.sem import Expression
302
+ >>> v = [('adam', 'b1'), ('betty', 'g1'), ('fido', 'd1'),
303
+ ... ('girl', set(['g1', 'g2'])), ('boy', set(['b1', 'b2'])), ('dog', set(['d1'])),
304
+ ... ('love', set([('b1', 'g1'), ('b2', 'g2'), ('g1', 'b1'), ('g2', 'b1')]))]
305
+ >>> val = Valuation(v)
306
+ >>> dom = val.domain
307
+ >>> m = Model(dom, val)
308
+ >>> g = Assignment(dom, [('x', 'b1'), ('y', 'g2')])
309
+ >>> exprs = ['adam', 'girl', 'love', 'walks', 'x', 'y', 'z']
310
+ >>> parsed_exprs = [Expression.fromstring(e) for e in exprs]
311
+ >>> sorted_set = lambda x: sorted(x) if isinstance(x, set) else x
312
+ >>> for parsed in parsed_exprs:
313
+ ... try:
314
+ ... print("'%s' gets value %s" % (parsed, sorted_set(m.i(parsed, g))))
315
+ ... except Undefined:
316
+ ... print("'%s' is Undefined" % parsed)
317
+ 'adam' gets value b1
318
+ 'girl' gets value [('g1',), ('g2',)]
319
+ 'love' gets value [('b1', 'g1'), ('b2', 'g2'), ('g1', 'b1'), ('g2', 'b1')]
320
+ 'walks' is Undefined
321
+ 'x' gets value b1
322
+ 'y' gets value g2
323
+ 'z' is Undefined
324
+
325
+ Test for formulas in Model
326
+ --------------------------
327
+
328
+ >>> tests = [
329
+ ... ('love(adam, betty)', True),
330
+ ... ('love(adam, sue)', 'Undefined'),
331
+ ... ('dog(fido)', True),
332
+ ... ('- dog(fido)', False),
333
+ ... ('- - dog(fido)', True),
334
+ ... ('- dog(sue)', 'Undefined'),
335
+ ... ('dog(fido) & boy(adam)', True),
336
+ ... ('- (dog(fido) & boy(adam))', False),
337
+ ... ('- dog(fido) & boy(adam)', False),
338
+ ... ('dog(fido) | boy(adam)', True),
339
+ ... ('- (dog(fido) | boy(adam))', False),
340
+ ... ('- dog(fido) | boy(adam)', True),
341
+ ... ('- dog(fido) | - boy(adam)', False),
342
+ ... ('dog(fido) -> boy(adam)', True),
343
+ ... ('- (dog(fido) -> boy(adam))', False),
344
+ ... ('- dog(fido) -> boy(adam)', True),
345
+ ... ('exists x . love(adam, x)', True),
346
+ ... ('all x . love(adam, x)', False),
347
+ ... ('fido = fido', True),
348
+ ... ('exists x . all y. love(x, y)', False),
349
+ ... ('exists x . (x = fido)', True),
350
+ ... ('all x . (dog(x) | - dog(x))', True),
351
+ ... ('adam = mia', 'Undefined'),
352
+ ... ('\\x. (boy(x) | girl(x))', {'g2': True, 'b2': True, 'b1': True, 'g1': True, 'd1': False}),
353
+ ... ('\\x. exists y. (boy(x) & love(x, y))', {'g2': False, 'b2': True, 'b1': True, 'g1': False, 'd1': False}),
354
+ ... ('exists z1. boy(z1)', True),
355
+ ... ('exists x. (boy(x) & - (x = adam))', True),
356
+ ... ('exists x. (boy(x) & all y. love(y, x))', False),
357
+ ... ('all x. (boy(x) | girl(x))', False),
358
+ ... ('all x. (girl(x) -> exists y. boy(y) & love(x, y))', False),
359
+ ... ('exists x. (boy(x) & all y. (girl(y) -> love(y, x)))', True),
360
+ ... ('exists x. (boy(x) & all y. (girl(y) -> love(x, y)))', False),
361
+ ... ('all x. (dog(x) -> - girl(x))', True),
362
+ ... ('exists x. exists y. (love(x, y) & love(x, y))', True),
363
+ ... ]
364
+ >>> for (sent, testvalue) in tests:
365
+ ... semvalue = m.evaluate(sent, g)
366
+ ... if semvalue == testvalue:
367
+ ... print('*', end=' ')
368
+ ... else:
369
+ ... print(sent, semvalue)
370
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
371
+
372
+
373
+
374
+ Satisfier Tests
375
+ ---------------
376
+
377
+ >>> formulas = [
378
+ ... 'boy(x)',
379
+ ... '(x = x)',
380
+ ... '(boy(x) | girl(x))',
381
+ ... '(boy(x) & girl(x))',
382
+ ... 'love(adam, x)',
383
+ ... 'love(x, adam)',
384
+ ... '- (x = adam)',
385
+ ... 'exists z22. love(x, z22)',
386
+ ... 'exists y. love(y, x)',
387
+ ... 'all y. (girl(y) -> love(x, y))',
388
+ ... 'all y. (girl(y) -> love(y, x))',
389
+ ... 'all y. (girl(y) -> (boy(x) & love(y, x)))',
390
+ ... 'boy(x) & all y. (girl(y) -> love(x, y))',
391
+ ... 'boy(x) & all y. (girl(y) -> love(y, x))',
392
+ ... 'boy(x) & exists y. (girl(y) & love(y, x))',
393
+ ... 'girl(x) -> dog(x)',
394
+ ... 'all y. (dog(y) -> (x = y))',
395
+ ... '- exists y. love(y, x)',
396
+ ... 'exists y. (love(adam, y) & love(y, x))'
397
+ ... ]
398
+ >>> g.purge()
399
+ >>> g.add('x', 'b1')
400
+ {'x': 'b1'}
401
+ >>> for f in formulas:
402
+ ... try:
403
+ ... print("'%s' gets value: %s" % (f, m.evaluate(f, g)))
404
+ ... except Undefined:
405
+ ... print("'%s' is Undefined" % f)
406
+ 'boy(x)' gets value: True
407
+ '(x = x)' gets value: True
408
+ '(boy(x) | girl(x))' gets value: True
409
+ '(boy(x) & girl(x))' gets value: False
410
+ 'love(adam, x)' gets value: False
411
+ 'love(x, adam)' gets value: False
412
+ '- (x = adam)' gets value: False
413
+ 'exists z22. love(x, z22)' gets value: True
414
+ 'exists y. love(y, x)' gets value: True
415
+ 'all y. (girl(y) -> love(x, y))' gets value: False
416
+ 'all y. (girl(y) -> love(y, x))' gets value: True
417
+ 'all y. (girl(y) -> (boy(x) & love(y, x)))' gets value: True
418
+ 'boy(x) & all y. (girl(y) -> love(x, y))' gets value: False
419
+ 'boy(x) & all y. (girl(y) -> love(y, x))' gets value: True
420
+ 'boy(x) & exists y. (girl(y) & love(y, x))' gets value: True
421
+ 'girl(x) -> dog(x)' gets value: True
422
+ 'all y. (dog(y) -> (x = y))' gets value: False
423
+ '- exists y. love(y, x)' gets value: False
424
+ 'exists y. (love(adam, y) & love(y, x))' gets value: True
425
+
426
+ >>> from nltk.sem import Expression
427
+ >>> for fmla in formulas:
428
+ ... p = Expression.fromstring(fmla)
429
+ ... g.purge()
430
+ ... print("Satisfiers of '%s':\n\t%s" % (p, sorted(m.satisfiers(p, 'x', g))))
431
+ Satisfiers of 'boy(x)':
432
+ ['b1', 'b2']
433
+ Satisfiers of '(x = x)':
434
+ ['b1', 'b2', 'd1', 'g1', 'g2']
435
+ Satisfiers of '(boy(x) | girl(x))':
436
+ ['b1', 'b2', 'g1', 'g2']
437
+ Satisfiers of '(boy(x) & girl(x))':
438
+ []
439
+ Satisfiers of 'love(adam,x)':
440
+ ['g1']
441
+ Satisfiers of 'love(x,adam)':
442
+ ['g1', 'g2']
443
+ Satisfiers of '-(x = adam)':
444
+ ['b2', 'd1', 'g1', 'g2']
445
+ Satisfiers of 'exists z22.love(x,z22)':
446
+ ['b1', 'b2', 'g1', 'g2']
447
+ Satisfiers of 'exists y.love(y,x)':
448
+ ['b1', 'g1', 'g2']
449
+ Satisfiers of 'all y.(girl(y) -> love(x,y))':
450
+ []
451
+ Satisfiers of 'all y.(girl(y) -> love(y,x))':
452
+ ['b1']
453
+ Satisfiers of 'all y.(girl(y) -> (boy(x) & love(y,x)))':
454
+ ['b1']
455
+ Satisfiers of '(boy(x) & all y.(girl(y) -> love(x,y)))':
456
+ []
457
+ Satisfiers of '(boy(x) & all y.(girl(y) -> love(y,x)))':
458
+ ['b1']
459
+ Satisfiers of '(boy(x) & exists y.(girl(y) & love(y,x)))':
460
+ ['b1']
461
+ Satisfiers of '(girl(x) -> dog(x))':
462
+ ['b1', 'b2', 'd1']
463
+ Satisfiers of 'all y.(dog(y) -> (x = y))':
464
+ ['d1']
465
+ Satisfiers of '-exists y.love(y,x)':
466
+ ['b2', 'd1']
467
+ Satisfiers of 'exists y.(love(adam,y) & love(y,x))':
468
+ ['b1']
469
+
470
+
471
+ Tests based on the Blackburn & Bos testsuite
472
+ --------------------------------------------
473
+
474
+ >>> v1 = [('jules', 'd1'), ('vincent', 'd2'), ('pumpkin', 'd3'),
475
+ ... ('honey_bunny', 'd4'), ('yolanda', 'd5'),
476
+ ... ('customer', set(['d1', 'd2'])),
477
+ ... ('robber', set(['d3', 'd4'])),
478
+ ... ('love', set([('d3', 'd4')]))]
479
+ >>> val1 = Valuation(v1)
480
+ >>> dom1 = val1.domain
481
+ >>> m1 = Model(dom1, val1)
482
+ >>> g1 = Assignment(dom1)
483
+
484
+ >>> v2 = [('jules', 'd1'), ('vincent', 'd2'), ('pumpkin', 'd3'),
485
+ ... ('honey_bunny', 'd4'), ('yolanda', 'd4'),
486
+ ... ('customer', set(['d1', 'd2', 'd5', 'd6'])),
487
+ ... ('robber', set(['d3', 'd4'])),
488
+ ... ('love', set([(None, None)]))]
489
+ >>> val2 = Valuation(v2)
490
+ >>> dom2 = set(['d1', 'd2', 'd3', 'd4', 'd5', 'd6'])
491
+ >>> m2 = Model(dom2, val2)
492
+ >>> g2 = Assignment(dom2)
493
+ >>> g21 = Assignment(dom2)
494
+ >>> g21.add('y', 'd3')
495
+ {'y': 'd3'}
496
+
497
+ >>> v3 = [('mia', 'd1'), ('jody', 'd2'), ('jules', 'd3'),
498
+ ... ('vincent', 'd4'),
499
+ ... ('woman', set(['d1', 'd2'])), ('man', set(['d3', 'd4'])),
500
+ ... ('joke', set(['d5', 'd6'])), ('episode', set(['d7', 'd8'])),
501
+ ... ('in', set([('d5', 'd7'), ('d5', 'd8')])),
502
+ ... ('tell', set([('d1', 'd5'), ('d2', 'd6')]))]
503
+ >>> val3 = Valuation(v3)
504
+ >>> dom3 = set(['d1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7', 'd8'])
505
+ >>> m3 = Model(dom3, val3)
506
+ >>> g3 = Assignment(dom3)
507
+
508
+ >>> tests = [
509
+ ... ('exists x. robber(x)', m1, g1, True),
510
+ ... ('exists x. exists y. love(y, x)', m1, g1, True),
511
+ ... ('exists x0. exists x1. love(x1, x0)', m2, g2, False),
512
+ ... ('all x. all y. love(y, x)', m2, g2, False),
513
+ ... ('- (all x. all y. love(y, x))', m2, g2, True),
514
+ ... ('all x. all y. - love(y, x)', m2, g2, True),
515
+ ... ('yolanda = honey_bunny', m2, g2, True),
516
+ ... ('mia = honey_bunny', m2, g2, 'Undefined'),
517
+ ... ('- (yolanda = honey_bunny)', m2, g2, False),
518
+ ... ('- (mia = honey_bunny)', m2, g2, 'Undefined'),
519
+ ... ('all x. (robber(x) | customer(x))', m2, g2, True),
520
+ ... ('- (all x. (robber(x) | customer(x)))', m2, g2, False),
521
+ ... ('(robber(x) | customer(x))', m2, g2, 'Undefined'),
522
+ ... ('(robber(y) | customer(y))', m2, g21, True),
523
+ ... ('exists x. (man(x) & exists x. woman(x))', m3, g3, True),
524
+ ... ('exists x. (man(x) & exists x. woman(x))', m3, g3, True),
525
+ ... ('- exists x. woman(x)', m3, g3, False),
526
+ ... ('exists x. (tasty(x) & burger(x))', m3, g3, 'Undefined'),
527
+ ... ('- exists x. (tasty(x) & burger(x))', m3, g3, 'Undefined'),
528
+ ... ('exists x. (man(x) & - exists y. woman(y))', m3, g3, False),
529
+ ... ('exists x. (man(x) & - exists x. woman(x))', m3, g3, False),
530
+ ... ('exists x. (woman(x) & - exists x. customer(x))', m2, g2, 'Undefined'),
531
+ ... ]
532
+
533
+ >>> for item in tests:
534
+ ... sentence, model, g, testvalue = item
535
+ ... semvalue = model.evaluate(sentence, g)
536
+ ... if semvalue == testvalue:
537
+ ... print('*', end=' ')
538
+ ... g.purge()
539
+ * * * * * * * * * * * * * * * * * * * * * *
540
+
541
+
542
+ Tests for mapping from syntax to semantics
543
+ ------------------------------------------
544
+
545
+ Load a valuation from a file.
546
+
547
+ >>> import nltk.data
548
+ >>> from nltk.sem.util import parse_sents
549
+ >>> val = nltk.data.load('grammars/sample_grammars/valuation1.val')
550
+ >>> dom = val.domain
551
+ >>> m = Model(dom, val)
552
+ >>> g = Assignment(dom)
553
+ >>> gramfile = 'grammars/sample_grammars/sem2.fcfg'
554
+ >>> inputs = ['John sees a girl', 'every dog barks']
555
+ >>> parses = parse_sents(inputs, gramfile)
556
+ >>> for sent, trees in zip(inputs, parses):
557
+ ... print()
558
+ ... print("Sentence: %s" % sent)
559
+ ... for tree in trees:
560
+ ... print("Parse:\n %s" %tree)
561
+ ... print("Semantics: %s" % root_semrep(tree))
562
+ <BLANKLINE>
563
+ Sentence: John sees a girl
564
+ Parse:
565
+ (S[SEM=<exists x.(girl(x) & see(john,x))>]
566
+ (NP[-LOC, NUM='sg', SEM=<\P.P(john)>]
567
+ (PropN[-LOC, NUM='sg', SEM=<\P.P(john)>] John))
568
+ (VP[NUM='sg', SEM=<\y.exists x.(girl(x) & see(y,x))>]
569
+ (TV[NUM='sg', SEM=<\X y.X(\x.see(y,x))>, TNS='pres'] sees)
570
+ (NP[NUM='sg', SEM=<\Q.exists x.(girl(x) & Q(x))>]
571
+ (Det[NUM='sg', SEM=<\P Q.exists x.(P(x) & Q(x))>] a)
572
+ (Nom[NUM='sg', SEM=<\x.girl(x)>]
573
+ (N[NUM='sg', SEM=<\x.girl(x)>] girl)))))
574
+ Semantics: exists x.(girl(x) & see(john,x))
575
+ <BLANKLINE>
576
+ Sentence: every dog barks
577
+ Parse:
578
+ (S[SEM=<all x.(dog(x) -> bark(x))>]
579
+ (NP[NUM='sg', SEM=<\Q.all x.(dog(x) -> Q(x))>]
580
+ (Det[NUM='sg', SEM=<\P Q.all x.(P(x) -> Q(x))>] every)
581
+ (Nom[NUM='sg', SEM=<\x.dog(x)>]
582
+ (N[NUM='sg', SEM=<\x.dog(x)>] dog)))
583
+ (VP[NUM='sg', SEM=<\x.bark(x)>]
584
+ (IV[NUM='sg', SEM=<\x.bark(x)>, TNS='pres'] barks)))
585
+ Semantics: all x.(dog(x) -> bark(x))
586
+
587
+ >>> sent = "every dog barks"
588
+ >>> result = nltk.sem.util.interpret_sents([sent], gramfile)[0]
589
+ >>> for (syntree, semrep) in result:
590
+ ... print(syntree)
591
+ ... print()
592
+ ... print(semrep)
593
+ (S[SEM=<all x.(dog(x) -> bark(x))>]
594
+ (NP[NUM='sg', SEM=<\Q.all x.(dog(x) -> Q(x))>]
595
+ (Det[NUM='sg', SEM=<\P Q.all x.(P(x) -> Q(x))>] every)
596
+ (Nom[NUM='sg', SEM=<\x.dog(x)>]
597
+ (N[NUM='sg', SEM=<\x.dog(x)>] dog)))
598
+ (VP[NUM='sg', SEM=<\x.bark(x)>]
599
+ (IV[NUM='sg', SEM=<\x.bark(x)>, TNS='pres'] barks)))
600
+ <BLANKLINE>
601
+ all x.(dog(x) -> bark(x))
602
+
603
+ >>> result = nltk.sem.util.evaluate_sents([sent], gramfile, m, g)[0]
604
+ >>> for (syntree, semrel, value) in result:
605
+ ... print(syntree)
606
+ ... print()
607
+ ... print(semrep)
608
+ ... print()
609
+ ... print(value)
610
+ (S[SEM=<all x.(dog(x) -> bark(x))>]
611
+ (NP[NUM='sg', SEM=<\Q.all x.(dog(x) -> Q(x))>]
612
+ (Det[NUM='sg', SEM=<\P Q.all x.(P(x) -> Q(x))>] every)
613
+ (Nom[NUM='sg', SEM=<\x.dog(x)>]
614
+ (N[NUM='sg', SEM=<\x.dog(x)>] dog)))
615
+ (VP[NUM='sg', SEM=<\x.bark(x)>]
616
+ (IV[NUM='sg', SEM=<\x.bark(x)>, TNS='pres'] barks)))
617
+ <BLANKLINE>
618
+ all x.(dog(x) -> bark(x))
619
+ <BLANKLINE>
620
+ True
621
+
622
+ >>> sents = ['Mary walks', 'John sees a dog']
623
+ >>> results = nltk.sem.util.interpret_sents(sents, 'grammars/sample_grammars/sem2.fcfg')
624
+ >>> for result in results:
625
+ ... for (synrep, semrep) in result:
626
+ ... print(synrep)
627
+ (S[SEM=<walk(mary)>]
628
+ (NP[-LOC, NUM='sg', SEM=<\P.P(mary)>]
629
+ (PropN[-LOC, NUM='sg', SEM=<\P.P(mary)>] Mary))
630
+ (VP[NUM='sg', SEM=<\x.walk(x)>]
631
+ (IV[NUM='sg', SEM=<\x.walk(x)>, TNS='pres'] walks)))
632
+ (S[SEM=<exists x.(dog(x) & see(john,x))>]
633
+ (NP[-LOC, NUM='sg', SEM=<\P.P(john)>]
634
+ (PropN[-LOC, NUM='sg', SEM=<\P.P(john)>] John))
635
+ (VP[NUM='sg', SEM=<\y.exists x.(dog(x) & see(y,x))>]
636
+ (TV[NUM='sg', SEM=<\X y.X(\x.see(y,x))>, TNS='pres'] sees)
637
+ (NP[NUM='sg', SEM=<\Q.exists x.(dog(x) & Q(x))>]
638
+ (Det[NUM='sg', SEM=<\P Q.exists x.(P(x) & Q(x))>] a)
639
+ (Nom[NUM='sg', SEM=<\x.dog(x)>]
640
+ (N[NUM='sg', SEM=<\x.dog(x)>] dog)))))
641
+
642
+ Cooper Storage
643
+ --------------
644
+
645
+ >>> from nltk.sem import cooper_storage as cs
646
+ >>> sentence = 'every girl chases a dog'
647
+ >>> trees = cs.parse_with_bindops(sentence, grammar='grammars/book_grammars/storage.fcfg')
648
+ >>> semrep = trees[0].label()['SEM']
649
+ >>> cs_semrep = cs.CooperStore(semrep)
650
+ >>> print(cs_semrep.core)
651
+ chase(z2,z4)
652
+ >>> for bo in cs_semrep.store:
653
+ ... print(bo)
654
+ bo(\P.all x.(girl(x) -> P(x)),z2)
655
+ bo(\P.exists x.(dog(x) & P(x)),z4)
656
+ >>> cs_semrep.s_retrieve(trace=True)
657
+ Permutation 1
658
+ (\P.all x.(girl(x) -> P(x)))(\z2.chase(z2,z4))
659
+ (\P.exists x.(dog(x) & P(x)))(\z4.all x.(girl(x) -> chase(x,z4)))
660
+ Permutation 2
661
+ (\P.exists x.(dog(x) & P(x)))(\z4.chase(z2,z4))
662
+ (\P.all x.(girl(x) -> P(x)))(\z2.exists x.(dog(x) & chase(z2,x)))
663
+
664
+ >>> for reading in cs_semrep.readings:
665
+ ... print(reading)
666
+ exists x.(dog(x) & all z3.(girl(z3) -> chase(z3,x)))
667
+ all x.(girl(x) -> exists z4.(dog(z4) & chase(x,z4)))
env-llmeval/lib/python3.10/site-packages/nltk/test/setup_fixt.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from nltk.internals import find_binary, find_jar
2
+
3
+
4
+ def check_binary(binary: str, **args):
5
+ """Skip a test via `pytest.skip` if the `binary` executable is not found.
6
+ Keyword arguments are passed to `nltk.internals.find_binary`."""
7
+ import pytest
8
+
9
+ try:
10
+ find_binary(binary, **args)
11
+ except LookupError:
12
+ pytest.skip(f"Skipping test because the {binary} binary was not found.")
13
+
14
+
15
+ def check_jar(name_pattern: str, **args):
16
+ """Skip a test via `pytest.skip` if the `name_pattern` jar is not found.
17
+ Keyword arguments are passed to `nltk.internals.find_jar`.
18
+
19
+ TODO: Investigate why the CoreNLP tests that rely on this check_jar failed
20
+ on the CI. https://github.com/nltk/nltk/pull/3060#issuecomment-1268355108
21
+ """
22
+ import pytest
23
+
24
+ pytest.skip(
25
+ "Skipping test because the doctests requiring jars are inconsistent on the CI."
26
+ )
env-llmeval/lib/python3.10/site-packages/nltk/test/tokenize.doctest ADDED
@@ -0,0 +1,397 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ >>> from nltk.tokenize import *
5
+
6
+ Regression Tests: NLTKWordTokenizer
7
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8
+
9
+ Tokenizing some test strings.
10
+
11
+ >>> s1 = "On a $50,000 mortgage of 30 years at 8 percent, the monthly payment would be $366.88."
12
+ >>> word_tokenize(s1)
13
+ ['On', 'a', '$', '50,000', 'mortgage', 'of', '30', 'years', 'at', '8', 'percent', ',', 'the', 'monthly', 'payment', 'would', 'be', '$', '366.88', '.']
14
+ >>> s2 = "\"We beat some pretty good teams to get here,\" Slocum said."
15
+ >>> word_tokenize(s2)
16
+ ['``', 'We', 'beat', 'some', 'pretty', 'good', 'teams', 'to', 'get', 'here', ',', "''", 'Slocum', 'said', '.']
17
+ >>> s3 = "Well, we couldn't have this predictable, cliche-ridden, \"Touched by an Angel\" (a show creator John Masius worked on) wanna-be if she didn't."
18
+ >>> word_tokenize(s3)
19
+ ['Well', ',', 'we', 'could', "n't", 'have', 'this', 'predictable', ',', 'cliche-ridden', ',', '``', 'Touched', 'by', 'an', 'Angel', "''", '(', 'a', 'show', 'creator', 'John', 'Masius', 'worked', 'on', ')', 'wanna-be', 'if', 'she', 'did', "n't", '.']
20
+ >>> s4 = "I cannot cannot work under these conditions!"
21
+ >>> word_tokenize(s4)
22
+ ['I', 'can', 'not', 'can', 'not', 'work', 'under', 'these', 'conditions', '!']
23
+ >>> s5 = "The company spent $30,000,000 last year."
24
+ >>> word_tokenize(s5)
25
+ ['The', 'company', 'spent', '$', '30,000,000', 'last', 'year', '.']
26
+ >>> s6 = "The company spent 40.75% of its income last year."
27
+ >>> word_tokenize(s6)
28
+ ['The', 'company', 'spent', '40.75', '%', 'of', 'its', 'income', 'last', 'year', '.']
29
+ >>> s7 = "He arrived at 3:00 pm."
30
+ >>> word_tokenize(s7)
31
+ ['He', 'arrived', 'at', '3:00', 'pm', '.']
32
+ >>> s8 = "I bought these items: books, pencils, and pens."
33
+ >>> word_tokenize(s8)
34
+ ['I', 'bought', 'these', 'items', ':', 'books', ',', 'pencils', ',', 'and', 'pens', '.']
35
+ >>> s9 = "Though there were 150, 100 of them were old."
36
+ >>> word_tokenize(s9)
37
+ ['Though', 'there', 'were', '150', ',', '100', 'of', 'them', 'were', 'old', '.']
38
+ >>> s10 = "There were 300,000, but that wasn't enough."
39
+ >>> word_tokenize(s10)
40
+ ['There', 'were', '300,000', ',', 'but', 'that', 'was', "n't", 'enough', '.']
41
+ >>> s11 = "It's more'n enough."
42
+ >>> word_tokenize(s11)
43
+ ['It', "'s", 'more', "'n", 'enough', '.']
44
+
45
+ Gathering the spans of the tokenized strings.
46
+
47
+ >>> s = '''Good muffins cost $3.88\nin New (York). Please (buy) me\ntwo of them.\n(Thanks).'''
48
+ >>> expected = [(0, 4), (5, 12), (13, 17), (18, 19), (19, 23),
49
+ ... (24, 26), (27, 30), (31, 32), (32, 36), (36, 37), (37, 38),
50
+ ... (40, 46), (47, 48), (48, 51), (51, 52), (53, 55), (56, 59),
51
+ ... (60, 62), (63, 68), (69, 70), (70, 76), (76, 77), (77, 78)]
52
+ >>> list(NLTKWordTokenizer().span_tokenize(s)) == expected
53
+ True
54
+ >>> expected = ['Good', 'muffins', 'cost', '$', '3.88', 'in',
55
+ ... 'New', '(', 'York', ')', '.', 'Please', '(', 'buy', ')',
56
+ ... 'me', 'two', 'of', 'them.', '(', 'Thanks', ')', '.']
57
+ >>> [s[start:end] for start, end in NLTKWordTokenizer().span_tokenize(s)] == expected
58
+ True
59
+
60
+ >>> s = '''I said, "I'd like to buy some ''good muffins" which cost $3.88\n each in New (York)."'''
61
+ >>> expected = [(0, 1), (2, 6), (6, 7), (8, 9), (9, 10), (10, 12),
62
+ ... (13, 17), (18, 20), (21, 24), (25, 29), (30, 32), (32, 36),
63
+ ... (37, 44), (44, 45), (46, 51), (52, 56), (57, 58), (58, 62),
64
+ ... (64, 68), (69, 71), (72, 75), (76, 77), (77, 81), (81, 82),
65
+ ... (82, 83), (83, 84)]
66
+ >>> list(NLTKWordTokenizer().span_tokenize(s)) == expected
67
+ True
68
+ >>> expected = ['I', 'said', ',', '"', 'I', "'d", 'like', 'to',
69
+ ... 'buy', 'some', "''", "good", 'muffins', '"', 'which', 'cost',
70
+ ... '$', '3.88', 'each', 'in', 'New', '(', 'York', ')', '.', '"']
71
+ >>> [s[start:end] for start, end in NLTKWordTokenizer().span_tokenize(s)] == expected
72
+ True
73
+
74
+ Testing improvement made to the TreebankWordTokenizer
75
+
76
+ >>> sx1 = '\xabNow that I can do.\xbb'
77
+ >>> expected = ['\xab', 'Now', 'that', 'I', 'can', 'do', '.', '\xbb']
78
+ >>> word_tokenize(sx1) == expected
79
+ True
80
+ >>> sx2 = 'The unicode 201C and 201D \u201cLEFT(RIGHT) DOUBLE QUOTATION MARK\u201d is also OPEN_PUNCT and CLOSE_PUNCT.'
81
+ >>> expected = ['The', 'unicode', '201C', 'and', '201D', '\u201c', 'LEFT', '(', 'RIGHT', ')', 'DOUBLE', 'QUOTATION', 'MARK', '\u201d', 'is', 'also', 'OPEN_PUNCT', 'and', 'CLOSE_PUNCT', '.']
82
+ >>> word_tokenize(sx2) == expected
83
+ True
84
+
85
+
86
+ Testing treebank's detokenizer
87
+
88
+ >>> from nltk.tokenize.treebank import TreebankWordDetokenizer
89
+ >>> detokenizer = TreebankWordDetokenizer()
90
+ >>> s = "On a $50,000 mortgage of 30 years at 8 percent, the monthly payment would be $366.88."
91
+ >>> detokenizer.detokenize(word_tokenize(s))
92
+ 'On a $50,000 mortgage of 30 years at 8 percent, the monthly payment would be $366.88.'
93
+ >>> s = "\"We beat some pretty good teams to get here,\" Slocum said."
94
+ >>> detokenizer.detokenize(word_tokenize(s))
95
+ '"We beat some pretty good teams to get here," Slocum said.'
96
+ >>> s = "Well, we couldn't have this predictable, cliche-ridden, \"Touched by an Angel\" (a show creator John Masius worked on) wanna-be if she didn't."
97
+ >>> detokenizer.detokenize(word_tokenize(s))
98
+ 'Well, we couldn\'t have this predictable, cliche-ridden, "Touched by an Angel" (a show creator John Masius worked on) wanna-be if she didn\'t.'
99
+ >>> s = "I cannot cannot work under these conditions!"
100
+ >>> detokenizer.detokenize(word_tokenize(s))
101
+ 'I cannot cannot work under these conditions!'
102
+ >>> s = "The company spent $30,000,000 last year."
103
+ >>> detokenizer.detokenize(word_tokenize(s))
104
+ 'The company spent $30,000,000 last year.'
105
+ >>> s = "The company spent 40.75% of its income last year."
106
+ >>> detokenizer.detokenize(word_tokenize(s))
107
+ 'The company spent 40.75% of its income last year.'
108
+ >>> s = "He arrived at 3:00 pm."
109
+ >>> detokenizer.detokenize(word_tokenize(s))
110
+ 'He arrived at 3:00 pm.'
111
+ >>> s = "I bought these items: books, pencils, and pens."
112
+ >>> detokenizer.detokenize(word_tokenize(s))
113
+ 'I bought these items: books, pencils, and pens.'
114
+ >>> s = "Though there were 150, 100 of them were old."
115
+ >>> detokenizer.detokenize(word_tokenize(s))
116
+ 'Though there were 150, 100 of them were old.'
117
+ >>> s = "There were 300,000, but that wasn't enough."
118
+ >>> detokenizer.detokenize(word_tokenize(s))
119
+ "There were 300,000, but that wasn't enough."
120
+ >>> s = 'How "are" you?'
121
+ >>> detokenizer.detokenize(word_tokenize(s))
122
+ 'How "are" you?'
123
+ >>> s = "Hello (world)"
124
+ >>> detokenizer.detokenize(word_tokenize(s))
125
+ 'Hello (world)'
126
+ >>> s = '<A sentence> with (many) [kinds] of {parentheses}. "Sometimes it\'s inside (quotes)". ("Sometimes the otherway around").'
127
+ >>> detokenizer.detokenize(word_tokenize(s))
128
+ '<A sentence> with (many) [kinds] of {parentheses}. "Sometimes it\'s inside (quotes)". ("Sometimes the otherway around").'
129
+ >>> s = "Sentence ending with (parentheses)"
130
+ >>> detokenizer.detokenize(word_tokenize(s))
131
+ 'Sentence ending with (parentheses)'
132
+ >>> s = "(Sentence) starting with parentheses."
133
+ >>> detokenizer.detokenize(word_tokenize(s))
134
+ '(Sentence) starting with parentheses.'
135
+ >>> s = "I've"
136
+ >>> detokenizer.detokenize(word_tokenize(s))
137
+ "I've"
138
+ >>> s = "Don't"
139
+ >>> detokenizer.detokenize(word_tokenize(s))
140
+ "Don't"
141
+ >>> s = "I'd"
142
+ >>> detokenizer.detokenize(word_tokenize(s))
143
+ "I'd"
144
+
145
+
146
+ Sentence tokenization in word_tokenize:
147
+
148
+ >>> s11 = "I called Dr. Jones. I called Dr. Jones."
149
+ >>> word_tokenize(s11)
150
+ ['I', 'called', 'Dr.', 'Jones', '.', 'I', 'called', 'Dr.', 'Jones', '.']
151
+ >>> s12 = ("Ich muss unbedingt daran denken, Mehl, usw. fur einen "
152
+ ... "Kuchen einzukaufen. Ich muss.")
153
+ >>> word_tokenize(s12)
154
+ ['Ich', 'muss', 'unbedingt', 'daran', 'denken', ',', 'Mehl', ',', 'usw',
155
+ '.', 'fur', 'einen', 'Kuchen', 'einzukaufen', '.', 'Ich', 'muss', '.']
156
+ >>> word_tokenize(s12, 'german')
157
+ ['Ich', 'muss', 'unbedingt', 'daran', 'denken', ',', 'Mehl', ',', 'usw.',
158
+ 'fur', 'einen', 'Kuchen', 'einzukaufen', '.', 'Ich', 'muss', '.']
159
+
160
+
161
+ Regression Tests: Regexp Tokenizer
162
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
163
+
164
+ Some additional test strings.
165
+
166
+ >>> s = ("Good muffins cost $3.88\nin New York. Please buy me\n"
167
+ ... "two of them.\n\nThanks.")
168
+ >>> s2 = ("Alas, it has not rained today. When, do you think, "
169
+ ... "will it rain again?")
170
+ >>> s3 = ("<p>Although this is <b>not</b> the case here, we must "
171
+ ... "not relax our vigilance!</p>")
172
+
173
+ >>> regexp_tokenize(s2, r'[,\.\?!"]\s*', gaps=False)
174
+ [', ', '. ', ', ', ', ', '?']
175
+ >>> regexp_tokenize(s2, r'[,\.\?!"]\s*', gaps=True)
176
+ ['Alas', 'it has not rained today', 'When', 'do you think',
177
+ 'will it rain again']
178
+
179
+ Take care to avoid using capturing groups:
180
+
181
+ >>> regexp_tokenize(s3, r'</?[bp]>', gaps=False)
182
+ ['<p>', '<b>', '</b>', '</p>']
183
+ >>> regexp_tokenize(s3, r'</?(?:b|p)>', gaps=False)
184
+ ['<p>', '<b>', '</b>', '</p>']
185
+ >>> regexp_tokenize(s3, r'</?(?:b|p)>', gaps=True)
186
+ ['Although this is ', 'not',
187
+ ' the case here, we must not relax our vigilance!']
188
+
189
+ Named groups are capturing groups, and confuse the tokenizer:
190
+
191
+ >>> regexp_tokenize(s3, r'</?(?P<named>b|p)>', gaps=False)
192
+ ['p', 'b', 'b', 'p']
193
+ >>> regexp_tokenize(s3, r'</?(?P<named>b|p)>', gaps=True)
194
+ ['p', 'Although this is ', 'b', 'not', 'b',
195
+ ' the case here, we must not relax our vigilance!', 'p']
196
+
197
+ Make sure that nested groups don't confuse the tokenizer:
198
+
199
+ >>> regexp_tokenize(s2, r'(?:h|r|l)a(?:s|(?:i|n0))', gaps=False)
200
+ ['las', 'has', 'rai', 'rai']
201
+ >>> regexp_tokenize(s2, r'(?:h|r|l)a(?:s|(?:i|n0))', gaps=True)
202
+ ['A', ', it ', ' not ', 'ned today. When, do you think, will it ',
203
+ 'n again?']
204
+
205
+ Back-references require capturing groups, and these are not supported:
206
+
207
+ >>> regexp_tokenize("aabbbcccc", r'(.)\1')
208
+ ['a', 'b', 'c', 'c']
209
+
210
+ A simple sentence tokenizer '\.(\s+|$)'
211
+
212
+ >>> regexp_tokenize(s, pattern=r'\.(?:\s+|$)', gaps=True)
213
+ ['Good muffins cost $3.88\nin New York',
214
+ 'Please buy me\ntwo of them', 'Thanks']
215
+
216
+
217
+ Regression Tests: TweetTokenizer
218
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
219
+
220
+ TweetTokenizer is a tokenizer specifically designed for micro-blogging tokenization tasks.
221
+
222
+ >>> from nltk.tokenize import TweetTokenizer
223
+ >>> tknzr = TweetTokenizer()
224
+ >>> s0 = "This is a cooool #dummysmiley: :-) :-P <3 and some arrows < > -> <--"
225
+ >>> tknzr.tokenize(s0)
226
+ ['This', 'is', 'a', 'cooool', '#dummysmiley', ':', ':-)', ':-P', '<3', 'and', 'some', 'arrows', '<', '>', '->', '<--']
227
+ >>> s1 = "@Joyster2012 @CathStaincliffe Good for you, girl!! Best wishes :-)"
228
+ >>> tknzr.tokenize(s1)
229
+ ['@Joyster2012', '@CathStaincliffe', 'Good', 'for', 'you', ',', 'girl', '!', '!', 'Best', 'wishes', ':-)']
230
+ >>> s2 = "3Points for #DreamTeam Gooo BAILEY! :) #PBB737Gold @PBBabscbn"
231
+ >>> tknzr.tokenize(s2)
232
+ ['3Points', 'for', '#DreamTeam', 'Gooo', 'BAILEY', '!', ':)', '#PBB737Gold', '@PBBabscbn']
233
+ >>> s3 = "@Insanomania They do... Their mentality doesn't :("
234
+ >>> tknzr.tokenize(s3)
235
+ ['@Insanomania', 'They', 'do', '...', 'Their', 'mentality', "doesn't", ':(']
236
+ >>> s4 = "RT @facugambande: Ya por arrancar a grabar !!! #TirenTirenTiren vamoo !!"
237
+ >>> tknzr.tokenize(s4)
238
+ ['RT', '@facugambande', ':', 'Ya', 'por', 'arrancar', 'a', 'grabar', '!', '!', '!', '#TirenTirenTiren', 'vamoo', '!', '!']
239
+ >>> tknzr = TweetTokenizer(reduce_len=True)
240
+ >>> s5 = "@crushinghes the summer holidays are great but I'm so bored already :("
241
+ >>> tknzr.tokenize(s5)
242
+ ['@crushinghes', 'the', 'summer', 'holidays', 'are', 'great', 'but', "I'm", 'so', 'bored', 'already', ':(']
243
+
244
+ It is possible to specify `strip_handles` and `reduce_len` parameters for a TweetTokenizer instance. Setting `strip_handles` to True, the tokenizer will remove Twitter handles (e.g. usernames). Setting `reduce_len` to True, repeated character sequences of length 3 or greater will be replaced with sequences of length 3.
245
+
246
+ >>> tknzr = TweetTokenizer(strip_handles=True, reduce_len=True)
247
+ >>> s6 = '@remy: This is waaaaayyyy too much for you!!!!!!'
248
+ >>> tknzr.tokenize(s6)
249
+ [':', 'This', 'is', 'waaayyy', 'too', 'much', 'for', 'you', '!', '!', '!']
250
+ >>> s7 = '@_willy65: No place for @chuck tonight. Sorry.'
251
+ >>> tknzr.tokenize(s7)
252
+ [':', 'No', 'place', 'for', 'tonight', '.', 'Sorry', '.']
253
+ >>> s8 = '@mar_tin is a great developer. Contact him at [email protected].'
254
+ >>> tknzr.tokenize(s8)
255
+ ['is', 'a', 'great', 'developer', '.', 'Contact', 'him', 'at', '[email protected]', '.']
256
+
257
+ The `preserve_case` parameter (default: True) allows to convert uppercase tokens to lowercase tokens. Emoticons are not affected:
258
+
259
+ >>> tknzr = TweetTokenizer(preserve_case=False)
260
+ >>> s9 = "@jrmy: I'm REALLY HAPPYYY about that! NICEEEE :D :P"
261
+ >>> tknzr.tokenize(s9)
262
+ ['@jrmy', ':', "i'm", 'really', 'happyyy', 'about', 'that', '!', 'niceeee', ':D', ':P']
263
+
264
+ It should not hang on long sequences of the same punctuation character.
265
+
266
+ >>> tknzr = TweetTokenizer()
267
+ >>> s10 = "Photo: Aujourd'hui sur http://t.co/0gebOFDUzn Projet... http://t.co/bKfIUbydz2.............................. http://fb.me/3b6uXpz0L"
268
+ >>> tknzr.tokenize(s10)
269
+ ['Photo', ':', "Aujourd'hui", 'sur', 'http://t.co/0gebOFDUzn', 'Projet', '...', 'http://t.co/bKfIUbydz2', '...', 'http://fb.me/3b6uXpz0L']
270
+
271
+ Tokenizing multiple sentences at once:
272
+
273
+ >>> tknzr = TweetTokenizer()
274
+ >>> sentences = [
275
+ ... "This is a cooool #dummysmiley: :-) :-P <3 and some arrows < > -> <--",
276
+ ... "@jrmy: I'm REALLY HAPPYYY about that! NICEEEE :D :P",
277
+ ... "@_willy65: No place for @chuck tonight. Sorry."
278
+ ... ]
279
+ >>> tknzr.tokenize_sents(sentences) # doctest: +NORMALIZE_WHITESPACE
280
+ [['This', 'is', 'a', 'cooool', '#dummysmiley', ':', ':-)', ':-P', '<3', 'and', 'some', 'arrows', '<', '>', '->', '<--'],
281
+ ['@jrmy', ':', "I'm", 'REALLY', 'HAPPYYY', 'about', 'that', '!', 'NICEEEE', ':D', ':P'],
282
+ ['@_willy65', ':', 'No', 'place', 'for', '@chuck', 'tonight', '.', 'Sorry', '.']]
283
+
284
+
285
+ Regression Tests: PunktSentenceTokenizer
286
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
287
+
288
+ The sentence splitter should remove whitespace following the sentence boundary.
289
+
290
+ >>> pst = PunktSentenceTokenizer()
291
+ >>> pst.tokenize('See Section 3). Or Section 2). ')
292
+ ['See Section 3).', 'Or Section 2).']
293
+ >>> pst.tokenize('See Section 3.) Or Section 2.) ')
294
+ ['See Section 3.)', 'Or Section 2.)']
295
+ >>> pst.tokenize('See Section 3.) Or Section 2.) ', realign_boundaries=False)
296
+ ['See Section 3.', ') Or Section 2.', ')']
297
+
298
+
299
+ Two instances of PunktSentenceTokenizer should not share PunktParameters.
300
+
301
+ >>> pst = PunktSentenceTokenizer()
302
+ >>> pst2 = PunktSentenceTokenizer()
303
+ >>> pst._params is pst2._params
304
+ False
305
+
306
+ Testing mutable default arguments for https://github.com/nltk/nltk/pull/2067
307
+
308
+ >>> from nltk.tokenize.punkt import PunktBaseClass, PunktTrainer, PunktSentenceTokenizer
309
+ >>> from nltk.tokenize.punkt import PunktLanguageVars, PunktParameters
310
+ >>> pbc = PunktBaseClass(lang_vars=None, params=None)
311
+ >>> type(pbc._params)
312
+ <class 'nltk.tokenize.punkt.PunktParameters'>
313
+ >>> type(pbc._lang_vars)
314
+ <class 'nltk.tokenize.punkt.PunktLanguageVars'>
315
+ >>> pt = PunktTrainer(lang_vars=None)
316
+ >>> type(pt._lang_vars)
317
+ <class 'nltk.tokenize.punkt.PunktLanguageVars'>
318
+ >>> pst = PunktSentenceTokenizer(lang_vars=None)
319
+ >>> type(pst._lang_vars)
320
+ <class 'nltk.tokenize.punkt.PunktLanguageVars'>
321
+
322
+ Testing that inputs can start with dots.
323
+
324
+ >>> pst = PunktSentenceTokenizer(lang_vars=None)
325
+ >>> pst.tokenize(". This input starts with a dot. This used to cause issues.")
326
+ ['.', 'This input starts with a dot.', 'This used to cause issues.']
327
+
328
+ Regression Tests: align_tokens
329
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
330
+ Post-hoc alignment of tokens with a source string
331
+
332
+ >>> from nltk.tokenize.util import align_tokens
333
+ >>> list(align_tokens([''], ""))
334
+ [(0, 0)]
335
+ >>> list(align_tokens([''], " "))
336
+ [(0, 0)]
337
+ >>> list(align_tokens([], ""))
338
+ []
339
+ >>> list(align_tokens([], " "))
340
+ []
341
+ >>> list(align_tokens(['a'], "a"))
342
+ [(0, 1)]
343
+ >>> list(align_tokens(['abc', 'def'], "abcdef"))
344
+ [(0, 3), (3, 6)]
345
+ >>> list(align_tokens(['abc', 'def'], "abc def"))
346
+ [(0, 3), (4, 7)]
347
+ >>> list(align_tokens(['ab', 'cd'], "ab cd ef"))
348
+ [(0, 2), (3, 5)]
349
+ >>> list(align_tokens(['ab', 'cd', 'ef'], "ab cd ef"))
350
+ [(0, 2), (3, 5), (6, 8)]
351
+ >>> list(align_tokens(['ab', 'cd', 'efg'], "ab cd ef"))
352
+ Traceback (most recent call last):
353
+ ....
354
+ ValueError: substring "efg" not found in "ab cd ef"
355
+ >>> list(align_tokens(['ab', 'cd', 'ef', 'gh'], "ab cd ef"))
356
+ Traceback (most recent call last):
357
+ ....
358
+ ValueError: substring "gh" not found in "ab cd ef"
359
+ >>> list(align_tokens(['The', 'plane', ',', 'bound', 'for', 'St', 'Petersburg', ',', 'crashed', 'in', 'Egypt', "'s", 'Sinai', 'desert', 'just', '23', 'minutes', 'after', 'take-off', 'from', 'Sharm', 'el-Sheikh', 'on', 'Saturday', '.'], "The plane, bound for St Petersburg, crashed in Egypt's Sinai desert just 23 minutes after take-off from Sharm el-Sheikh on Saturday."))
360
+ [(0, 3), (4, 9), (9, 10), (11, 16), (17, 20), (21, 23), (24, 34), (34, 35), (36, 43), (44, 46), (47, 52), (52, 54), (55, 60), (61, 67), (68, 72), (73, 75), (76, 83), (84, 89), (90, 98), (99, 103), (104, 109), (110, 119), (120, 122), (123, 131), (131, 132)]
361
+
362
+
363
+ Regression Tests: MWETokenizer
364
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
365
+ Pickle an MWETokenizer
366
+
367
+ >>> from nltk.tokenize import MWETokenizer
368
+ >>> import pickle
369
+
370
+ >>> tokenizer = MWETokenizer([('hors', "d'oeuvre")], separator='+')
371
+ >>> p = pickle.dumps(tokenizer)
372
+ >>> unpickeled = pickle.loads(p)
373
+ >>> unpickeled.tokenize("An hors d'oeuvre tonight, sir?".split())
374
+ ['An', "hors+d'oeuvre", 'tonight,', 'sir?']
375
+
376
+
377
+ Regression Tests: TextTilingTokenizer
378
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
379
+
380
+ TextTilingTokenizer tokenizes text into coherent subtopic chunks based upon Hearst's TextTiling algorithm.
381
+
382
+ >>> from nltk.tokenize import TextTilingTokenizer
383
+ >>> from nltk.corpus import brown
384
+ >>> tt = TextTilingTokenizer()
385
+ >>> tt.tokenize(brown.raw()[0:1000])
386
+ ["\n\n\tThe/at Fulton/np-tl County/nn-tl Grand/jj-tl Jury/nn-tl said/vbd Friday/nr an/at investigation/nn of/in Atlanta's/np$ recent/jj primary/nn election/nn produced/vbd ``/`` no/at evidence/nn ''/'' that/cs any/dti irregularities/nns took/vbd place/nn ./.\n\n\n\tThe/at jury/nn further/rbr said/vbd in/in term-end/nn presentments/nns that/cs the/at City/nn-tl Executive/jj-tl Committee/nn-tl ,/, which/wdt had/hvd over-all/jj charge/nn of/in the/at election/nn ,/, ``/`` deserves/vbz the/at praise/nn and/cc thanks/nns of/in the/at City/nn-tl of/in-tl Atlanta/np-tl ''/'' for/in the/at manner/nn in/in which/wdt the/at election/nn was/bedz conducted/vbn ./.\n\n\n\tThe/at September-October/np term/nn jury/nn had/hvd been/ben charged/vbn by/in Fulton/np-tl Superior/jj-tl Court/nn-tl Judge/nn-tl Durwood/np Pye/np to/to investigate/vb reports/nns of/in possible/jj ``/`` irregularities/nns ''/'' in/in the/at hard-fought/jj primary/nn which/wdt was/bedz won/vbn by/in Mayor-nominate/nn-tl Ivan/np Allen/np Jr./"]
387
+
388
+ Test that `ValueError` exceptions are raised when illegal arguments are used.
389
+
390
+ >>> TextTilingTokenizer(similarity_method='foo').tokenize(brown.raw()[0:1000])
391
+ Traceback (most recent call last):
392
+ ...
393
+ ValueError: Similarity method foo not recognized
394
+ >>> TextTilingTokenizer(smoothing_method='bar').tokenize(brown.raw()[0:1000])
395
+ Traceback (most recent call last):
396
+ ...
397
+ ValueError: Smoothing method bar not recognized
env-llmeval/lib/python3.10/site-packages/nltk/test/toolbox.doctest ADDED
@@ -0,0 +1,306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ ===============================
5
+ Unit test cases for ``toolbox``
6
+ ===============================
7
+
8
+ >>> from nltk import toolbox
9
+
10
+ --------------------------
11
+ ``toolbox.StandardFormat``
12
+ --------------------------
13
+
14
+ >>> f = toolbox.StandardFormat()
15
+
16
+ ``toolbox.StandardFormat.open()``
17
+ ---------------------------------
18
+ >>> import os, tempfile
19
+ >>> (fd, fname) = tempfile.mkstemp()
20
+ >>> tf = os.fdopen(fd, "w")
21
+ >>> _ = tf.write('\\lx a value\n\\lx another value\n')
22
+ >>> tf.close()
23
+ >>> f = toolbox.StandardFormat()
24
+ >>> f.open(fname)
25
+ >>> list(f.fields())
26
+ [('lx', 'a value'), ('lx', 'another value')]
27
+ >>> f.close()
28
+ >>> os.unlink(fname)
29
+
30
+ ``toolbox.StandardFormat.open_string()``
31
+ ----------------------------------------
32
+ >>> f = toolbox.StandardFormat()
33
+ >>> f.open_string('\\lx a value\n\\lx another value\n')
34
+ >>> list(f.fields())
35
+ [('lx', 'a value'), ('lx', 'another value')]
36
+ >>> f.close()
37
+
38
+ ``toolbox.StandardFormat.close()``
39
+ ----------------------------------
40
+ >>> f = toolbox.StandardFormat()
41
+ >>> f.open_string('\\lx a value\n\\lx another value\n')
42
+ >>> list(f.fields())
43
+ [('lx', 'a value'), ('lx', 'another value')]
44
+ >>> f.close()
45
+
46
+ ``toolbox.StandardFormat.line_num``
47
+ ---------------------------------------
48
+
49
+ ``StandardFormat.line_num`` contains the line number of the last line returned:
50
+
51
+ >>> f = toolbox.StandardFormat()
52
+ >>> f.open_string('\\lx a value\n\\lx another value\n\\lx a third value\n')
53
+ >>> line_nums = []
54
+ >>> for l in f.raw_fields():
55
+ ... line_nums.append(f.line_num)
56
+ >>> line_nums
57
+ [1, 2, 3]
58
+
59
+ ``StandardFormat.line_num`` contains the line number of the last line returned:
60
+
61
+ >>> f = toolbox.StandardFormat()
62
+ >>> f.open_string('\\lx two\nlines\n\\lx three\nlines\n\n\\lx two\nlines\n')
63
+ >>> line_nums = []
64
+ >>> for l in f.raw_fields():
65
+ ... line_nums.append(f.line_num)
66
+ >>> line_nums
67
+ [2, 5, 7]
68
+
69
+ ``StandardFormat.line_num`` doesn't exist before opening or after closing
70
+ a file or string:
71
+
72
+ >>> f = toolbox.StandardFormat()
73
+ >>> f.line_num
74
+ Traceback (most recent call last):
75
+ ...
76
+ AttributeError: 'StandardFormat' object has no attribute 'line_num'
77
+ >>> f.open_string('\\lx two\nlines\n\\lx three\nlines\n\n\\lx two\nlines\n')
78
+ >>> line_nums = []
79
+ >>> for l in f.raw_fields():
80
+ ... line_nums.append(f.line_num)
81
+ >>> line_nums
82
+ [2, 5, 7]
83
+ >>> f.close()
84
+ >>> f.line_num
85
+ Traceback (most recent call last):
86
+ ...
87
+ AttributeError: 'StandardFormat' object has no attribute 'line_num'
88
+
89
+ ``toolbox.StandardFormat.raw_fields()``
90
+ ---------------------------------------
91
+ ``raw_fields()`` returns an iterator over tuples of two strings representing the
92
+ marker and its value. The marker is given without the backslash and the value
93
+ without its trailing newline:
94
+
95
+ >>> f = toolbox.StandardFormat()
96
+ >>> f.open_string('\\lx a value\n\\lx another value\n')
97
+ >>> list(f.raw_fields())
98
+ [('lx', 'a value'), ('lx', 'another value')]
99
+
100
+ an empty file returns nothing:
101
+
102
+ >>> f = toolbox.StandardFormat()
103
+ >>> f.open_string('')
104
+ >>> list(f.raw_fields())
105
+ []
106
+
107
+ file with only a newline returns WHAT SHOULD IT RETURN???:
108
+
109
+ >>> f = toolbox.StandardFormat()
110
+ >>> f.open_string('\n')
111
+ >>> list(f.raw_fields())
112
+ [(None, '')]
113
+
114
+ file with only one field should be parsed ok:
115
+
116
+ >>> f = toolbox.StandardFormat()
117
+ >>> f.open_string('\\lx one value\n')
118
+ >>> list(f.raw_fields())
119
+ [('lx', 'one value')]
120
+
121
+ file without a trailing newline should be parsed ok:
122
+
123
+ >>> f = toolbox.StandardFormat()
124
+ >>> f.open_string('\\lx a value\n\\lx another value')
125
+ >>> list(f.raw_fields())
126
+ [('lx', 'a value'), ('lx', 'another value')]
127
+
128
+ trailing white space is preserved except for the final newline:
129
+
130
+ >>> f = toolbox.StandardFormat()
131
+ >>> f.open_string('\\lx trailing space \n\\lx trailing tab\t\n\\lx extra newline\n\n')
132
+ >>> list(f.raw_fields())
133
+ [('lx', 'trailing space '), ('lx', 'trailing tab\t'), ('lx', 'extra newline\n')]
134
+
135
+ line wrapping is preserved:
136
+
137
+ >>> f = toolbox.StandardFormat()
138
+ >>> f.open_string('\\lx a value\nmore of the value\nand still more\n\\lc another val\n')
139
+ >>> list(f.raw_fields())
140
+ [('lx', 'a value\nmore of the value\nand still more'), ('lc', 'another val')]
141
+
142
+ file beginning with a multiline record should be parsed ok:
143
+
144
+ >>> f = toolbox.StandardFormat()
145
+ >>> f.open_string('\\lx a value\nmore of the value\nand still more\n\\lc another val\n')
146
+ >>> list(f.raw_fields())
147
+ [('lx', 'a value\nmore of the value\nand still more'), ('lc', 'another val')]
148
+
149
+ file ending with a multiline record should be parsed ok:
150
+
151
+ >>> f = toolbox.StandardFormat()
152
+ >>> f.open_string('\\lc a value\n\\lx another value\nmore of the value\nand still more\n')
153
+ >>> list(f.raw_fields())
154
+ [('lc', 'a value'), ('lx', 'another value\nmore of the value\nand still more')]
155
+
156
+ file beginning with a BOM should be parsed ok:
157
+
158
+ >>> f = toolbox.StandardFormat()
159
+ >>> f.open_string('\xef\xbb\xbf\\lx a value\n\\lx another value\n')
160
+ >>> list(f.raw_fields())
161
+ [('lx', 'a value'), ('lx', 'another value')]
162
+
163
+ file beginning with two BOMs should ignore only the first one:
164
+
165
+ >>> f = toolbox.StandardFormat()
166
+ >>> f.open_string('\xef\xbb\xbf\xef\xbb\xbf\\lx a value\n\\lx another value\n')
167
+ >>> list(f.raw_fields())
168
+ [(None, '\xef\xbb\xbf\\lx a value'), ('lx', 'another value')]
169
+
170
+ should not ignore a BOM not at the beginning of the file:
171
+
172
+ >>> f = toolbox.StandardFormat()
173
+ >>> f.open_string('\\lx a value\n\xef\xbb\xbf\\lx another value\n')
174
+ >>> list(f.raw_fields())
175
+ [('lx', 'a value\n\xef\xbb\xbf\\lx another value')]
176
+
177
+ ``toolbox.StandardFormat.fields()``
178
+ -----------------------------------
179
+ trailing white space is not preserved:
180
+
181
+ >>> f = toolbox.StandardFormat()
182
+ >>> f.open_string('\\lx trailing space \n\\lx trailing tab\t\n\\lx extra newline\n\n')
183
+ >>> list(f.fields())
184
+ [('lx', 'trailing space'), ('lx', 'trailing tab'), ('lx', 'extra newline')]
185
+
186
+ multiline fields are unwrapped:
187
+
188
+ >>> f = toolbox.StandardFormat()
189
+ >>> f.open_string('\\lx a value\nmore of the value\nand still more\n\\lc another val\n')
190
+ >>> list(f.fields())
191
+ [('lx', 'a value more of the value and still more'), ('lc', 'another val')]
192
+
193
+ markers
194
+ -------
195
+ A backslash in the first position on a new line indicates the start of a
196
+ marker. The backslash is not part of the marker:
197
+
198
+ >>> f = toolbox.StandardFormat()
199
+ >>> f.open_string('\\mk a value\n')
200
+ >>> list(f.fields())
201
+ [('mk', 'a value')]
202
+
203
+ If the backslash occurs later in the line it does not indicate the start
204
+ of a marker:
205
+
206
+ >>> f = toolbox.StandardFormat()
207
+ >>> f.open_string('\\mk a value\n \\mk another one\n')
208
+ >>> list(f.raw_fields())
209
+ [('mk', 'a value\n \\mk another one')]
210
+
211
+ There is no specific limit to the length of a marker:
212
+
213
+ >>> f = toolbox.StandardFormat()
214
+ >>> f.open_string('\\this_is_an_extremely_long_marker value\n')
215
+ >>> list(f.fields())
216
+ [('this_is_an_extremely_long_marker', 'value')]
217
+
218
+ A marker can contain any non white space character:
219
+
220
+ >>> f = toolbox.StandardFormat()
221
+ >>> f.open_string('\\`~!@#$%^&*()_-=+[{]}\\|,<.>/?;:"0123456789 value\n')
222
+ >>> list(f.fields())
223
+ [('`~!@#$%^&*()_-=+[{]}\\|,<.>/?;:"0123456789', 'value')]
224
+
225
+ A marker is terminated by any white space character:
226
+
227
+ >>> f = toolbox.StandardFormat()
228
+ >>> f.open_string('\\mk a value\n\\mk\tanother one\n\\mk\rthird one\n\\mk\ffourth one')
229
+ >>> list(f.fields())
230
+ [('mk', 'a value'), ('mk', 'another one'), ('mk', 'third one'), ('mk', 'fourth one')]
231
+
232
+ Consecutive whitespace characters (except newline) are treated the same as one:
233
+
234
+ >>> f = toolbox.StandardFormat()
235
+ >>> f.open_string('\\mk \t\r\fa value\n')
236
+ >>> list(f.fields())
237
+ [('mk', 'a value')]
238
+
239
+ -----------------------
240
+ ``toolbox.ToolboxData``
241
+ -----------------------
242
+
243
+ >>> db = toolbox.ToolboxData()
244
+
245
+ ``toolbox.ToolboxData.parse()``
246
+ -------------------------------
247
+ check that normal parsing works:
248
+
249
+ >>> from xml.etree import ElementTree
250
+ >>> td = toolbox.ToolboxData()
251
+ >>> s = """\\_sh v3.0 400 Rotokas Dictionary
252
+ ... \\_DateStampHasFourDigitYear
253
+ ...
254
+ ... \\lx kaa
255
+ ... \\ps V.A
256
+ ... \\ge gag
257
+ ... \\gp nek i pas
258
+ ...
259
+ ... \\lx kaa
260
+ ... \\ps V.B
261
+ ... \\ge strangle
262
+ ... \\gp pasim nek
263
+ ... """
264
+ >>> td.open_string(s)
265
+ >>> tree = td.parse(key='lx')
266
+ >>> tree.tag
267
+ 'toolbox_data'
268
+ >>> ElementTree.tostring(list(tree)[0]).decode('utf8')
269
+ '<header><_sh>v3.0 400 Rotokas Dictionary</_sh><_DateStampHasFourDigitYear /></header>'
270
+ >>> ElementTree.tostring(list(tree)[1]).decode('utf8')
271
+ '<record><lx>kaa</lx><ps>V.A</ps><ge>gag</ge><gp>nek i pas</gp></record>'
272
+ >>> ElementTree.tostring(list(tree)[2]).decode('utf8')
273
+ '<record><lx>kaa</lx><ps>V.B</ps><ge>strangle</ge><gp>pasim nek</gp></record>'
274
+
275
+ check that guessing the key marker works:
276
+
277
+ >>> from xml.etree import ElementTree
278
+ >>> td = toolbox.ToolboxData()
279
+ >>> s = """\\_sh v3.0 400 Rotokas Dictionary
280
+ ... \\_DateStampHasFourDigitYear
281
+ ...
282
+ ... \\lx kaa
283
+ ... \\ps V.A
284
+ ... \\ge gag
285
+ ... \\gp nek i pas
286
+ ...
287
+ ... \\lx kaa
288
+ ... \\ps V.B
289
+ ... \\ge strangle
290
+ ... \\gp pasim nek
291
+ ... """
292
+ >>> td.open_string(s)
293
+ >>> tree = td.parse()
294
+ >>> ElementTree.tostring(list(tree)[0]).decode('utf8')
295
+ '<header><_sh>v3.0 400 Rotokas Dictionary</_sh><_DateStampHasFourDigitYear /></header>'
296
+ >>> ElementTree.tostring(list(tree)[1]).decode('utf8')
297
+ '<record><lx>kaa</lx><ps>V.A</ps><ge>gag</ge><gp>nek i pas</gp></record>'
298
+ >>> ElementTree.tostring(list(tree)[2]).decode('utf8')
299
+ '<record><lx>kaa</lx><ps>V.B</ps><ge>strangle</ge><gp>pasim nek</gp></record>'
300
+
301
+ -----------------------
302
+ ``toolbox`` functions
303
+ -----------------------
304
+
305
+ ``toolbox.to_sfm_string()``
306
+ -------------------------------
env-llmeval/lib/python3.10/site-packages/nltk/test/tree.doctest ADDED
@@ -0,0 +1,1223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ ===============================
5
+ Unit tests for nltk.tree.Tree
6
+ ===============================
7
+
8
+ >>> from nltk.tree import *
9
+
10
+ Some trees to run tests on:
11
+
12
+ >>> dp1 = Tree('dp', [Tree('d', ['the']), Tree('np', ['dog'])])
13
+ >>> dp2 = Tree('dp', [Tree('d', ['the']), Tree('np', ['cat'])])
14
+ >>> vp = Tree('vp', [Tree('v', ['chased']), dp2])
15
+ >>> tree = Tree('s', [dp1, vp])
16
+ >>> print(tree)
17
+ (s (dp (d the) (np dog)) (vp (v chased) (dp (d the) (np cat))))
18
+
19
+ The node label is accessed using the `label()` method:
20
+
21
+ >>> dp1.label(), dp2.label(), vp.label(), tree.label()
22
+ ('dp', 'dp', 'vp', 's')
23
+
24
+ >>> print(tree[1,1,1,0])
25
+ cat
26
+
27
+ The `treepositions` method returns a list of the tree positions of
28
+ subtrees and leaves in a tree. By default, it gives the position of
29
+ every tree, subtree, and leaf, in prefix order:
30
+
31
+ >>> print(tree.treepositions())
32
+ [(), (0,), (0, 0), (0, 0, 0), (0, 1), (0, 1, 0), (1,), (1, 0), (1, 0, 0), (1, 1), (1, 1, 0), (1, 1, 0, 0), (1, 1, 1), (1, 1, 1, 0)]
33
+
34
+ In addition to `str` and `repr`, several methods exist to convert a
35
+ tree object to one of several standard tree encodings:
36
+
37
+ >>> print(tree.pformat_latex_qtree())
38
+ \Tree [.s
39
+ [.dp [.d the ] [.np dog ] ]
40
+ [.vp [.v chased ] [.dp [.d the ] [.np cat ] ] ] ]
41
+
42
+ There is also a fancy ASCII art representation:
43
+
44
+ >>> tree.pretty_print()
45
+ s
46
+ ________|_____
47
+ | vp
48
+ | _____|___
49
+ dp | dp
50
+ ___|___ | ___|___
51
+ d np v d np
52
+ | | | | |
53
+ the dog chased the cat
54
+
55
+ >>> tree.pretty_print(unicodelines=True, nodedist=4)
56
+ s
57
+ ┌──────────────┴────────┐
58
+ │ vp
59
+ │ ┌────────┴──────┐
60
+ dp │ dp
61
+ ┌──────┴──────┐ │ ┌──────┴──────┐
62
+ d np v d np
63
+ │ │ │ │ │
64
+ the dog chased the cat
65
+
66
+ Trees can be initialized from treebank strings:
67
+
68
+ >>> tree2 = Tree.fromstring('(S (NP I) (VP (V enjoyed) (NP my cookie)))')
69
+ >>> print(tree2)
70
+ (S (NP I) (VP (V enjoyed) (NP my cookie)))
71
+
72
+ Trees can be compared for equality:
73
+
74
+ >>> tree == Tree.fromstring(str(tree))
75
+ True
76
+ >>> tree2 == Tree.fromstring(str(tree2))
77
+ True
78
+ >>> tree == tree2
79
+ False
80
+ >>> tree == Tree.fromstring(str(tree2))
81
+ False
82
+ >>> tree2 == Tree.fromstring(str(tree))
83
+ False
84
+
85
+ >>> tree != Tree.fromstring(str(tree))
86
+ False
87
+ >>> tree2 != Tree.fromstring(str(tree2))
88
+ False
89
+ >>> tree != tree2
90
+ True
91
+ >>> tree != Tree.fromstring(str(tree2))
92
+ True
93
+ >>> tree2 != Tree.fromstring(str(tree))
94
+ True
95
+
96
+ >>> tree < tree2 or tree > tree2
97
+ True
98
+
99
+ Tree Parsing
100
+ ============
101
+
102
+ The class method `Tree.fromstring()` can be used to parse trees, and it
103
+ provides some additional options.
104
+
105
+ >>> tree = Tree.fromstring('(S (NP I) (VP (V enjoyed) (NP my cookie)))')
106
+ >>> print(tree)
107
+ (S (NP I) (VP (V enjoyed) (NP my cookie)))
108
+
109
+ When called on a subclass of `Tree`, it will create trees of that
110
+ type:
111
+
112
+ >>> tree = ImmutableTree.fromstring('(VP (V enjoyed) (NP my cookie))')
113
+ >>> print(tree)
114
+ (VP (V enjoyed) (NP my cookie))
115
+ >>> print(type(tree))
116
+ <class 'nltk.tree.immutable.ImmutableTree'>
117
+ >>> tree[1] = 'x'
118
+ Traceback (most recent call last):
119
+ . . .
120
+ ValueError: ImmutableTree may not be modified
121
+ >>> del tree[0]
122
+ Traceback (most recent call last):
123
+ . . .
124
+ ValueError: ImmutableTree may not be modified
125
+
126
+ The ``brackets`` parameter can be used to specify two characters that
127
+ should be used as brackets:
128
+
129
+ >>> print(Tree.fromstring('[S [NP I] [VP [V enjoyed] [NP my cookie]]]',
130
+ ... brackets='[]'))
131
+ (S (NP I) (VP (V enjoyed) (NP my cookie)))
132
+ >>> print(Tree.fromstring('<S <NP I> <VP <V enjoyed> <NP my cookie>>>',
133
+ ... brackets='<>'))
134
+ (S (NP I) (VP (V enjoyed) (NP my cookie)))
135
+
136
+ If ``brackets`` is not a string, or is not exactly two characters,
137
+ then `Tree.fromstring` raises an exception:
138
+
139
+ >>> Tree.fromstring('<VP <V enjoyed> <NP my cookie>>', brackets='')
140
+ Traceback (most recent call last):
141
+ . . .
142
+ TypeError: brackets must be a length-2 string
143
+ >>> Tree.fromstring('<VP <V enjoyed> <NP my cookie>>', brackets='<<>>')
144
+ Traceback (most recent call last):
145
+ . . .
146
+ TypeError: brackets must be a length-2 string
147
+ >>> Tree.fromstring('<VP <V enjoyed> <NP my cookie>>', brackets=12)
148
+ Traceback (most recent call last):
149
+ . . .
150
+ TypeError: brackets must be a length-2 string
151
+ >>> Tree.fromstring('<<NP my cookie>>', brackets=('<<','>>'))
152
+ Traceback (most recent call last):
153
+ . . .
154
+ TypeError: brackets must be a length-2 string
155
+
156
+ (We may add support for multi-character brackets in the future, in
157
+ which case the ``brackets=('<<','>>')`` example would start working.)
158
+
159
+ Whitespace brackets are not permitted:
160
+
161
+ >>> Tree.fromstring('(NP my cookie\n', brackets='(\n')
162
+ Traceback (most recent call last):
163
+ . . .
164
+ TypeError: whitespace brackets not allowed
165
+
166
+ If an invalid tree is given to Tree.fromstring, then it raises a
167
+ ValueError, with a description of the problem:
168
+
169
+ >>> Tree.fromstring('(NP my cookie) (NP my milk)')
170
+ Traceback (most recent call last):
171
+ . . .
172
+ ValueError: Tree.fromstring(): expected 'end-of-string' but got '(NP'
173
+ at index 15.
174
+ "...y cookie) (NP my mil..."
175
+ ^
176
+ >>> Tree.fromstring(')NP my cookie(')
177
+ Traceback (most recent call last):
178
+ . . .
179
+ ValueError: Tree.fromstring(): expected '(' but got ')'
180
+ at index 0.
181
+ ")NP my coo..."
182
+ ^
183
+ >>> Tree.fromstring('(NP my cookie))')
184
+ Traceback (most recent call last):
185
+ . . .
186
+ ValueError: Tree.fromstring(): expected 'end-of-string' but got ')'
187
+ at index 14.
188
+ "...my cookie))"
189
+ ^
190
+ >>> Tree.fromstring('my cookie)')
191
+ Traceback (most recent call last):
192
+ . . .
193
+ ValueError: Tree.fromstring(): expected '(' but got 'my'
194
+ at index 0.
195
+ "my cookie)"
196
+ ^
197
+ >>> Tree.fromstring('(NP my cookie')
198
+ Traceback (most recent call last):
199
+ . . .
200
+ ValueError: Tree.fromstring(): expected ')' but got 'end-of-string'
201
+ at index 13.
202
+ "... my cookie"
203
+ ^
204
+ >>> Tree.fromstring('')
205
+ Traceback (most recent call last):
206
+ . . .
207
+ ValueError: Tree.fromstring(): expected '(' but got 'end-of-string'
208
+ at index 0.
209
+ ""
210
+ ^
211
+
212
+ Trees with no children are supported:
213
+
214
+ >>> print(Tree.fromstring('(S)'))
215
+ (S )
216
+ >>> print(Tree.fromstring('(X (Y) (Z))'))
217
+ (X (Y ) (Z ))
218
+
219
+ Trees with an empty node label and no children are supported:
220
+
221
+ >>> print(Tree.fromstring('()'))
222
+ ( )
223
+ >>> print(Tree.fromstring('(X () ())'))
224
+ (X ( ) ( ))
225
+
226
+ Trees with an empty node label and children are supported, but only if the
227
+ first child is not a leaf (otherwise, it will be treated as the node label).
228
+
229
+ >>> print(Tree.fromstring('((A) (B) (C))'))
230
+ ( (A ) (B ) (C ))
231
+ >>> print(Tree.fromstring('((A) leaf)'))
232
+ ( (A ) leaf)
233
+ >>> print(Tree.fromstring('(((())))'))
234
+ ( ( ( ( ))))
235
+
236
+ The optional arguments `read_node` and `read_leaf` may be used to
237
+ transform the string values of nodes or leaves.
238
+
239
+ >>> print(Tree.fromstring('(A b (C d e) (F (G h i)))',
240
+ ... read_node=lambda s: '<%s>' % s,
241
+ ... read_leaf=lambda s: '"%s"' % s))
242
+ (<A> "b" (<C> "d" "e") (<F> (<G> "h" "i")))
243
+
244
+ These transformation functions are typically used when the node or
245
+ leaf labels should be parsed to a non-string value (such as a feature
246
+ structure). If node and leaf labels need to be able to include
247
+ whitespace, then you must also use the optional `node_pattern` and
248
+ `leaf_pattern` arguments.
249
+
250
+ >>> from nltk.featstruct import FeatStruct
251
+ >>> tree = Tree.fromstring('([cat=NP] [lex=the] [lex=dog])',
252
+ ... read_node=FeatStruct, read_leaf=FeatStruct)
253
+ >>> tree.set_label(tree.label().unify(FeatStruct('[num=singular]')))
254
+ >>> print(tree)
255
+ ([cat='NP', num='singular'] [lex='the'] [lex='dog'])
256
+
257
+ The optional argument ``remove_empty_top_bracketing`` can be used to
258
+ remove any top-level empty bracketing that occurs.
259
+
260
+ >>> print(Tree.fromstring('((S (NP I) (VP (V enjoyed) (NP my cookie))))',
261
+ ... remove_empty_top_bracketing=True))
262
+ (S (NP I) (VP (V enjoyed) (NP my cookie)))
263
+
264
+ It will not remove a top-level empty bracketing with multiple children:
265
+
266
+ >>> print(Tree.fromstring('((A a) (B b))'))
267
+ ( (A a) (B b))
268
+
269
+
270
+ Tree.fromlist()
271
+ ---------------
272
+ The class method `Tree.fromlist()` can be used to parse trees
273
+ that are expressed as nested lists, such as those produced by
274
+ the tree() function from the wordnet module.
275
+
276
+ >>> from nltk.corpus import wordnet as wn
277
+ >>> t=Tree.fromlist(wn.synset('dog.n.01').tree(lambda s:s.hypernyms()))
278
+ >>> print(t.height())
279
+ 14
280
+ >>> print(t.leaves())
281
+ ["Synset('entity.n.01')", "Synset('entity.n.01')"]
282
+ >>> t.pretty_print()
283
+ Synset('dog.n.01')
284
+ _________________|__________________
285
+ Synset('canine.n. |
286
+ 02') |
287
+ | |
288
+ Synset('carnivor |
289
+ e.n.01') |
290
+ | |
291
+ Synset('placenta |
292
+ l.n.01') |
293
+ | |
294
+ Synset('mammal.n. |
295
+ 01') |
296
+ | |
297
+ Synset('vertebra |
298
+ te.n.01') |
299
+ | |
300
+ Synset('chordate. Synset('domestic
301
+ n.01') _animal.n.01')
302
+ | |
303
+ Synset('animal.n. Synset('animal.n.
304
+ 01') 01')
305
+ | |
306
+ Synset('organism. Synset('organism.
307
+ n.01') n.01')
308
+ | |
309
+ Synset('living_t Synset('living_t
310
+ hing.n.01') hing.n.01')
311
+ | |
312
+ Synset('whole.n. Synset('whole.n.
313
+ 02') 02')
314
+ | |
315
+ Synset('object.n. Synset('object.n.
316
+ 01') 01')
317
+ | |
318
+ Synset('physical Synset('physical
319
+ _entity.n.01') _entity.n.01')
320
+ | |
321
+ Synset('entity.n. Synset('entity.n.
322
+ 01') 01')
323
+
324
+
325
+
326
+ Parented Trees
327
+ ==============
328
+ `ParentedTree` is a subclass of `Tree` that automatically maintains
329
+ parent pointers for single-parented trees. Parented trees can be
330
+ created directly from a node label and a list of children:
331
+
332
+ >>> ptree = (
333
+ ... ParentedTree('VP', [
334
+ ... ParentedTree('VERB', ['saw']),
335
+ ... ParentedTree('NP', [
336
+ ... ParentedTree('DET', ['the']),
337
+ ... ParentedTree('NOUN', ['dog'])])]))
338
+ >>> print(ptree)
339
+ (VP (VERB saw) (NP (DET the) (NOUN dog)))
340
+
341
+ Parented trees can be created from strings using the classmethod
342
+ `ParentedTree.fromstring`:
343
+
344
+ >>> ptree = ParentedTree.fromstring('(VP (VERB saw) (NP (DET the) (NOUN dog)))')
345
+ >>> print(ptree)
346
+ (VP (VERB saw) (NP (DET the) (NOUN dog)))
347
+ >>> print(type(ptree))
348
+ <class 'nltk.tree.parented.ParentedTree'>
349
+
350
+ Parented trees can also be created by using the classmethod
351
+ `ParentedTree.convert` to convert another type of tree to a parented
352
+ tree:
353
+
354
+ >>> tree = Tree.fromstring('(VP (VERB saw) (NP (DET the) (NOUN dog)))')
355
+ >>> ptree = ParentedTree.convert(tree)
356
+ >>> print(ptree)
357
+ (VP (VERB saw) (NP (DET the) (NOUN dog)))
358
+ >>> print(type(ptree))
359
+ <class 'nltk.tree.parented.ParentedTree'>
360
+
361
+ .. clean-up:
362
+
363
+ >>> del tree
364
+
365
+ `ParentedTree`\ s should never be used in the same tree as `Tree`\ s
366
+ or `MultiParentedTree`\ s. Mixing tree implementations may result in
367
+ incorrect parent pointers and in `TypeError` exceptions:
368
+
369
+ >>> # Inserting a Tree in a ParentedTree gives an exception:
370
+ >>> ParentedTree('NP', [
371
+ ... Tree('DET', ['the']), Tree('NOUN', ['dog'])])
372
+ Traceback (most recent call last):
373
+ . . .
374
+ TypeError: Can not insert a non-ParentedTree into a ParentedTree
375
+
376
+ >>> # inserting a ParentedTree in a Tree gives incorrect parent pointers:
377
+ >>> broken_tree = Tree('NP', [
378
+ ... ParentedTree('DET', ['the']), ParentedTree('NOUN', ['dog'])])
379
+ >>> print(broken_tree[0].parent())
380
+ None
381
+
382
+ Parented Tree Methods
383
+ ------------------------
384
+ In addition to all the methods defined by the `Tree` class, the
385
+ `ParentedTree` class adds six new methods whose values are
386
+ automatically updated whenever a parented tree is modified: `parent()`,
387
+ `parent_index()`, `left_sibling()`, `right_sibling()`, `root()`, and
388
+ `treeposition()`.
389
+
390
+ The `parent()` method contains a `ParentedTree`\ 's parent, if it has
391
+ one; and ``None`` otherwise. `ParentedTree`\ s that do not have
392
+ parents are known as "root trees."
393
+
394
+ >>> for subtree in ptree.subtrees():
395
+ ... print(subtree)
396
+ ... print(' Parent = %s' % subtree.parent())
397
+ (VP (VERB saw) (NP (DET the) (NOUN dog)))
398
+ Parent = None
399
+ (VERB saw)
400
+ Parent = (VP (VERB saw) (NP (DET the) (NOUN dog)))
401
+ (NP (DET the) (NOUN dog))
402
+ Parent = (VP (VERB saw) (NP (DET the) (NOUN dog)))
403
+ (DET the)
404
+ Parent = (NP (DET the) (NOUN dog))
405
+ (NOUN dog)
406
+ Parent = (NP (DET the) (NOUN dog))
407
+
408
+ The `parent_index()` method stores the index of a tree in its parent's
409
+ child list. If a tree does not have a parent, then its `parent_index`
410
+ is ``None``.
411
+
412
+ >>> for subtree in ptree.subtrees():
413
+ ... print(subtree)
414
+ ... print(' Parent Index = %s' % subtree.parent_index())
415
+ ... assert (subtree.parent() is None or
416
+ ... subtree.parent()[subtree.parent_index()] is subtree)
417
+ (VP (VERB saw) (NP (DET the) (NOUN dog)))
418
+ Parent Index = None
419
+ (VERB saw)
420
+ Parent Index = 0
421
+ (NP (DET the) (NOUN dog))
422
+ Parent Index = 1
423
+ (DET the)
424
+ Parent Index = 0
425
+ (NOUN dog)
426
+ Parent Index = 1
427
+
428
+ Note that ``ptree.parent().index(ptree)`` is *not* equivalent to
429
+ ``ptree.parent_index()``. In particular, ``ptree.parent().index(ptree)``
430
+ will return the index of the first child of ``ptree.parent()`` that is
431
+ equal to ``ptree`` (using ``==``); and that child may not be
432
+ ``ptree``:
433
+
434
+ >>> on_and_on = ParentedTree('CONJP', [
435
+ ... ParentedTree('PREP', ['on']),
436
+ ... ParentedTree('COJN', ['and']),
437
+ ... ParentedTree('PREP', ['on'])])
438
+ >>> second_on = on_and_on[2]
439
+ >>> print(second_on.parent_index())
440
+ 2
441
+ >>> print(second_on.parent().index(second_on))
442
+ 0
443
+
444
+ The methods `left_sibling()` and `right_sibling()` can be used to get a
445
+ parented tree's siblings. If a tree does not have a left or right
446
+ sibling, then the corresponding method's value is ``None``:
447
+
448
+ >>> for subtree in ptree.subtrees():
449
+ ... print(subtree)
450
+ ... print(' Left Sibling = %s' % subtree.left_sibling())
451
+ ... print(' Right Sibling = %s' % subtree.right_sibling())
452
+ (VP (VERB saw) (NP (DET the) (NOUN dog)))
453
+ Left Sibling = None
454
+ Right Sibling = None
455
+ (VERB saw)
456
+ Left Sibling = None
457
+ Right Sibling = (NP (DET the) (NOUN dog))
458
+ (NP (DET the) (NOUN dog))
459
+ Left Sibling = (VERB saw)
460
+ Right Sibling = None
461
+ (DET the)
462
+ Left Sibling = None
463
+ Right Sibling = (NOUN dog)
464
+ (NOUN dog)
465
+ Left Sibling = (DET the)
466
+ Right Sibling = None
467
+
468
+ A parented tree's root tree can be accessed using the `root()`
469
+ method. This method follows the tree's parent pointers until it
470
+ finds a tree without a parent. If a tree does not have a parent, then
471
+ it is its own root:
472
+
473
+ >>> for subtree in ptree.subtrees():
474
+ ... print(subtree)
475
+ ... print(' Root = %s' % subtree.root())
476
+ (VP (VERB saw) (NP (DET the) (NOUN dog)))
477
+ Root = (VP (VERB saw) (NP (DET the) (NOUN dog)))
478
+ (VERB saw)
479
+ Root = (VP (VERB saw) (NP (DET the) (NOUN dog)))
480
+ (NP (DET the) (NOUN dog))
481
+ Root = (VP (VERB saw) (NP (DET the) (NOUN dog)))
482
+ (DET the)
483
+ Root = (VP (VERB saw) (NP (DET the) (NOUN dog)))
484
+ (NOUN dog)
485
+ Root = (VP (VERB saw) (NP (DET the) (NOUN dog)))
486
+
487
+ The `treeposition()` method can be used to find a tree's treeposition
488
+ relative to its root:
489
+
490
+ >>> for subtree in ptree.subtrees():
491
+ ... print(subtree)
492
+ ... print(' Tree Position = %s' % (subtree.treeposition(),))
493
+ ... assert subtree.root()[subtree.treeposition()] is subtree
494
+ (VP (VERB saw) (NP (DET the) (NOUN dog)))
495
+ Tree Position = ()
496
+ (VERB saw)
497
+ Tree Position = (0,)
498
+ (NP (DET the) (NOUN dog))
499
+ Tree Position = (1,)
500
+ (DET the)
501
+ Tree Position = (1, 0)
502
+ (NOUN dog)
503
+ Tree Position = (1, 1)
504
+
505
+ Whenever a parented tree is modified, all of the methods described
506
+ above (`parent()`, `parent_index()`, `left_sibling()`, `right_sibling()`,
507
+ `root()`, and `treeposition()`) are automatically updated. For example,
508
+ if we replace ``ptree``\ 's subtree for the word "dog" with a new
509
+ subtree for "cat," the method values for both the "dog" subtree and the
510
+ "cat" subtree get automatically updated:
511
+
512
+ >>> # Replace the dog with a cat
513
+ >>> dog = ptree[1,1]
514
+ >>> cat = ParentedTree('NOUN', ['cat'])
515
+ >>> ptree[1,1] = cat
516
+
517
+ >>> # the noun phrase is no longer the dog's parent:
518
+ >>> print(dog.parent(), dog.parent_index(), dog.left_sibling())
519
+ None None None
520
+ >>> # dog is now its own root.
521
+ >>> print(dog.root())
522
+ (NOUN dog)
523
+ >>> print(dog.treeposition())
524
+ ()
525
+
526
+ >>> # the cat's parent is now the noun phrase:
527
+ >>> print(cat.parent())
528
+ (NP (DET the) (NOUN cat))
529
+ >>> print(cat.parent_index())
530
+ 1
531
+ >>> print(cat.left_sibling())
532
+ (DET the)
533
+ >>> print(cat.root())
534
+ (VP (VERB saw) (NP (DET the) (NOUN cat)))
535
+ >>> print(cat.treeposition())
536
+ (1, 1)
537
+
538
+ ParentedTree Regression Tests
539
+ -----------------------------
540
+ Keep track of all trees that we create (including subtrees) using this
541
+ variable:
542
+
543
+ >>> all_ptrees = []
544
+
545
+ Define a helper function to create new parented trees:
546
+
547
+ >>> def make_ptree(s):
548
+ ... ptree = ParentedTree.convert(Tree.fromstring(s))
549
+ ... all_ptrees.extend(t for t in ptree.subtrees()
550
+ ... if isinstance(t, Tree))
551
+ ... return ptree
552
+
553
+ Define a test function that examines every subtree in all_ptrees; and
554
+ checks that all six of its methods are defined correctly. If any
555
+ ptrees are passed as arguments, then they are printed.
556
+
557
+ >>> def pcheck(*print_ptrees):
558
+ ... for ptree in all_ptrees:
559
+ ... # Check ptree's methods.
560
+ ... if ptree.parent() is not None:
561
+ ... i = ptree.parent_index()
562
+ ... assert ptree.parent()[i] is ptree
563
+ ... if i > 0:
564
+ ... assert ptree.left_sibling() is ptree.parent()[i-1]
565
+ ... if i < (len(ptree.parent())-1):
566
+ ... assert ptree.right_sibling() is ptree.parent()[i+1]
567
+ ... assert len(ptree.treeposition()) > 0
568
+ ... assert (ptree.treeposition() ==
569
+ ... ptree.parent().treeposition() + (ptree.parent_index(),))
570
+ ... assert ptree.root() is not ptree
571
+ ... assert ptree.root() is not None
572
+ ... assert ptree.root() is ptree.parent().root()
573
+ ... assert ptree.root()[ptree.treeposition()] is ptree
574
+ ... else:
575
+ ... assert ptree.parent_index() is None
576
+ ... assert ptree.left_sibling() is None
577
+ ... assert ptree.right_sibling() is None
578
+ ... assert ptree.root() is ptree
579
+ ... assert ptree.treeposition() == ()
580
+ ... # Check ptree's children's methods:
581
+ ... for i, child in enumerate(ptree):
582
+ ... if isinstance(child, Tree):
583
+ ... # pcheck parent() & parent_index() methods
584
+ ... assert child.parent() is ptree
585
+ ... assert child.parent_index() == i
586
+ ... # pcheck sibling methods
587
+ ... if i == 0:
588
+ ... assert child.left_sibling() is None
589
+ ... else:
590
+ ... assert child.left_sibling() is ptree[i-1]
591
+ ... if i == len(ptree)-1:
592
+ ... assert child.right_sibling() is None
593
+ ... else:
594
+ ... assert child.right_sibling() is ptree[i+1]
595
+ ... if print_ptrees:
596
+ ... print('ok!', end=' ')
597
+ ... for ptree in print_ptrees: print(ptree)
598
+ ... else:
599
+ ... print('ok!')
600
+
601
+ Run our test function on a variety of newly-created trees:
602
+
603
+ >>> pcheck(make_ptree('(A)'))
604
+ ok! (A )
605
+ >>> pcheck(make_ptree('(A (B (C (D) (E f)) g) h)'))
606
+ ok! (A (B (C (D ) (E f)) g) h)
607
+ >>> pcheck(make_ptree('(A (B) (C c) (D d d) (E e e e))'))
608
+ ok! (A (B ) (C c) (D d d) (E e e e))
609
+ >>> pcheck(make_ptree('(A (B) (C (c)) (D (d) (d)) (E (e) (e) (e)))'))
610
+ ok! (A (B ) (C (c )) (D (d ) (d )) (E (e ) (e ) (e )))
611
+
612
+ Run our test function after performing various tree-modification
613
+ operations:
614
+
615
+ **__delitem__()**
616
+
617
+ >>> ptree = make_ptree('(A (B (C (D) (E f) (Q p)) g) h)')
618
+ >>> e = ptree[0,0,1]
619
+ >>> del ptree[0,0,1]; pcheck(ptree); pcheck(e)
620
+ ok! (A (B (C (D ) (Q p)) g) h)
621
+ ok! (E f)
622
+ >>> del ptree[0,0,0]; pcheck(ptree)
623
+ ok! (A (B (C (Q p)) g) h)
624
+ >>> del ptree[0,1]; pcheck(ptree)
625
+ ok! (A (B (C (Q p))) h)
626
+ >>> del ptree[-1]; pcheck(ptree)
627
+ ok! (A (B (C (Q p))))
628
+ >>> del ptree[-100]
629
+ Traceback (most recent call last):
630
+ . . .
631
+ IndexError: index out of range
632
+ >>> del ptree[()]
633
+ Traceback (most recent call last):
634
+ . . .
635
+ IndexError: The tree position () may not be deleted.
636
+
637
+ >>> # With slices:
638
+ >>> ptree = make_ptree('(A (B c) (D e) f g (H i) j (K l))')
639
+ >>> b = ptree[0]
640
+ >>> del ptree[0:0]; pcheck(ptree)
641
+ ok! (A (B c) (D e) f g (H i) j (K l))
642
+ >>> del ptree[:1]; pcheck(ptree); pcheck(b)
643
+ ok! (A (D e) f g (H i) j (K l))
644
+ ok! (B c)
645
+ >>> del ptree[-2:]; pcheck(ptree)
646
+ ok! (A (D e) f g (H i))
647
+ >>> del ptree[1:3]; pcheck(ptree)
648
+ ok! (A (D e) (H i))
649
+ >>> ptree = make_ptree('(A (B c) (D e) f g (H i) j (K l))')
650
+ >>> del ptree[5:1000]; pcheck(ptree)
651
+ ok! (A (B c) (D e) f g (H i))
652
+ >>> del ptree[-2:1000]; pcheck(ptree)
653
+ ok! (A (B c) (D e) f)
654
+ >>> del ptree[-100:1]; pcheck(ptree)
655
+ ok! (A (D e) f)
656
+ >>> ptree = make_ptree('(A (B c) (D e) f g (H i) j (K l))')
657
+ >>> del ptree[1:-2:2]; pcheck(ptree)
658
+ ok! (A (B c) f (H i) j (K l))
659
+
660
+ **__setitem__()**
661
+
662
+ >>> ptree = make_ptree('(A (B (C (D) (E f) (Q p)) g) h)')
663
+ >>> d, e, q = ptree[0,0]
664
+ >>> ptree[0,0,0] = 'x'; pcheck(ptree); pcheck(d)
665
+ ok! (A (B (C x (E f) (Q p)) g) h)
666
+ ok! (D )
667
+ >>> ptree[0,0,1] = make_ptree('(X (Y z))'); pcheck(ptree); pcheck(e)
668
+ ok! (A (B (C x (X (Y z)) (Q p)) g) h)
669
+ ok! (E f)
670
+ >>> ptree[1] = d; pcheck(ptree)
671
+ ok! (A (B (C x (X (Y z)) (Q p)) g) (D ))
672
+ >>> ptree[-1] = 'x'; pcheck(ptree)
673
+ ok! (A (B (C x (X (Y z)) (Q p)) g) x)
674
+ >>> ptree[-100] = 'y'
675
+ Traceback (most recent call last):
676
+ . . .
677
+ IndexError: index out of range
678
+ >>> ptree[()] = make_ptree('(X y)')
679
+ Traceback (most recent call last):
680
+ . . .
681
+ IndexError: The tree position () may not be assigned to.
682
+
683
+ >>> # With slices:
684
+ >>> ptree = make_ptree('(A (B c) (D e) f g (H i) j (K l))')
685
+ >>> b = ptree[0]
686
+ >>> ptree[0:0] = ('x', make_ptree('(Y)')); pcheck(ptree)
687
+ ok! (A x (Y ) (B c) (D e) f g (H i) j (K l))
688
+ >>> ptree[2:6] = (); pcheck(ptree); pcheck(b)
689
+ ok! (A x (Y ) (H i) j (K l))
690
+ ok! (B c)
691
+ >>> ptree[-2:] = ('z', 'p'); pcheck(ptree)
692
+ ok! (A x (Y ) (H i) z p)
693
+ >>> ptree[1:3] = [make_ptree('(X)') for x in range(10)]; pcheck(ptree)
694
+ ok! (A x (X ) (X ) (X ) (X ) (X ) (X ) (X ) (X ) (X ) (X ) z p)
695
+ >>> ptree[5:1000] = []; pcheck(ptree)
696
+ ok! (A x (X ) (X ) (X ) (X ))
697
+ >>> ptree[-2:1000] = ['n']; pcheck(ptree)
698
+ ok! (A x (X ) (X ) n)
699
+ >>> ptree[-100:1] = [make_ptree('(U v)')]; pcheck(ptree)
700
+ ok! (A (U v) (X ) (X ) n)
701
+ >>> ptree[-1:] = (make_ptree('(X)') for x in range(3)); pcheck(ptree)
702
+ ok! (A (U v) (X ) (X ) (X ) (X ) (X ))
703
+ >>> ptree[1:-2:2] = ['x', 'y']; pcheck(ptree)
704
+ ok! (A (U v) x (X ) y (X ) (X ))
705
+
706
+ **append()**
707
+
708
+ >>> ptree = make_ptree('(A (B (C (D) (E f) (Q p)) g) h)')
709
+ >>> ptree.append('x'); pcheck(ptree)
710
+ ok! (A (B (C (D ) (E f) (Q p)) g) h x)
711
+ >>> ptree.append(make_ptree('(X (Y z))')); pcheck(ptree)
712
+ ok! (A (B (C (D ) (E f) (Q p)) g) h x (X (Y z)))
713
+
714
+ **extend()**
715
+
716
+ >>> ptree = make_ptree('(A (B (C (D) (E f) (Q p)) g) h)')
717
+ >>> ptree.extend(['x', 'y', make_ptree('(X (Y z))')]); pcheck(ptree)
718
+ ok! (A (B (C (D ) (E f) (Q p)) g) h x y (X (Y z)))
719
+ >>> ptree.extend([]); pcheck(ptree)
720
+ ok! (A (B (C (D ) (E f) (Q p)) g) h x y (X (Y z)))
721
+ >>> ptree.extend(make_ptree('(X)') for x in range(3)); pcheck(ptree)
722
+ ok! (A (B (C (D ) (E f) (Q p)) g) h x y (X (Y z)) (X ) (X ) (X ))
723
+
724
+ **insert()**
725
+
726
+ >>> ptree = make_ptree('(A (B (C (D) (E f) (Q p)) g) h)')
727
+ >>> ptree.insert(0, make_ptree('(X (Y z))')); pcheck(ptree)
728
+ ok! (A (X (Y z)) (B (C (D ) (E f) (Q p)) g) h)
729
+ >>> ptree.insert(-1, make_ptree('(X (Y z))')); pcheck(ptree)
730
+ ok! (A (X (Y z)) (B (C (D ) (E f) (Q p)) g) (X (Y z)) h)
731
+ >>> ptree.insert(-4, make_ptree('(X (Y z))')); pcheck(ptree)
732
+ ok! (A (X (Y z)) (X (Y z)) (B (C (D ) (E f) (Q p)) g) (X (Y z)) h)
733
+ >>> # Note: as with ``list``, inserting at a negative index that
734
+ >>> # gives a position before the start of the list does *not*
735
+ >>> # raise an IndexError exception; it just inserts at 0.
736
+ >>> ptree.insert(-400, make_ptree('(X (Y z))')); pcheck(ptree)
737
+ ok! (A
738
+ (X (Y z))
739
+ (X (Y z))
740
+ (X (Y z))
741
+ (B (C (D ) (E f) (Q p)) g)
742
+ (X (Y z))
743
+ h)
744
+
745
+ **pop()**
746
+
747
+ >>> ptree = make_ptree('(A (B (C (D) (E f) (Q p)) g) h)')
748
+ >>> ptree[0,0].pop(1); pcheck(ptree)
749
+ ParentedTree('E', ['f'])
750
+ ok! (A (B (C (D ) (Q p)) g) h)
751
+ >>> ptree[0].pop(-1); pcheck(ptree)
752
+ 'g'
753
+ ok! (A (B (C (D ) (Q p))) h)
754
+ >>> ptree.pop(); pcheck(ptree)
755
+ 'h'
756
+ ok! (A (B (C (D ) (Q p))))
757
+ >>> ptree.pop(-100)
758
+ Traceback (most recent call last):
759
+ . . .
760
+ IndexError: index out of range
761
+
762
+ **remove()**
763
+
764
+ >>> ptree = make_ptree('(A (B (C (D) (E f) (Q p)) g) h)')
765
+ >>> e = ptree[0,0,1]
766
+ >>> ptree[0,0].remove(ptree[0,0,1]); pcheck(ptree); pcheck(e)
767
+ ok! (A (B (C (D ) (Q p)) g) h)
768
+ ok! (E f)
769
+ >>> ptree[0,0].remove(make_ptree('(Q p)')); pcheck(ptree)
770
+ ok! (A (B (C (D )) g) h)
771
+ >>> ptree[0,0].remove(make_ptree('(Q p)'))
772
+ Traceback (most recent call last):
773
+ . . .
774
+ ValueError: ParentedTree('Q', ['p']) is not in list
775
+ >>> ptree.remove('h'); pcheck(ptree)
776
+ ok! (A (B (C (D )) g))
777
+ >>> ptree.remove('h');
778
+ Traceback (most recent call last):
779
+ . . .
780
+ ValueError: 'h' is not in list
781
+ >>> # remove() removes the first subtree that is equal (==) to the
782
+ >>> # given tree, which may not be the identical tree we give it:
783
+ >>> ptree = make_ptree('(A (X x) (Y y) (X x))')
784
+ >>> x1, y, x2 = ptree
785
+ >>> ptree.remove(ptree[-1]); pcheck(ptree)
786
+ ok! (A (Y y) (X x))
787
+ >>> print(x1.parent()); pcheck(x1)
788
+ None
789
+ ok! (X x)
790
+ >>> print(x2.parent())
791
+ (A (Y y) (X x))
792
+
793
+ Test that a tree can not be given multiple parents:
794
+
795
+ >>> ptree = make_ptree('(A (X x) (Y y) (Z z))')
796
+ >>> ptree[0] = ptree[1]
797
+ Traceback (most recent call last):
798
+ . . .
799
+ ValueError: Can not insert a subtree that already has a parent.
800
+ >>> pcheck()
801
+ ok!
802
+
803
+ [more to be written]
804
+
805
+ Shallow copying can be tricky for Tree and several of its subclasses.
806
+ For shallow copies of Tree, only the root node is reconstructed, while
807
+ all the children are shared between the two trees. Modify the children
808
+ of one tree - and the shallowly copied tree will also update.
809
+
810
+ >>> from nltk.tree import Tree, ParentedTree, MultiParentedTree
811
+ >>> tree = Tree.fromstring("(TOP (S (NP (NNP Bell,)) (NP (NP (DT a) (NN company)) (SBAR (WHNP (WDT which)) (S (VP (VBZ is) (VP (VBN based) (PP (IN in) (NP (NNP LA,)))))))) (VP (VBZ makes) (CC and) (VBZ distributes) (NP (NN computer))) (. products.)))")
812
+ >>> copy_tree = tree.copy(deep=False)
813
+ >>> tree == copy_tree # Ensure identical labels and nodes
814
+ True
815
+ >>> id(copy_tree[0]) == id(tree[0]) # Ensure shallow copy - the children are the same objects in memory
816
+ True
817
+
818
+ For ParentedTree objects, this behaviour is not possible. With a shallow
819
+ copy, the children of the root node would be reused for both the original
820
+ and the shallow copy. For this to be possible, some children would need
821
+ to have multiple parents. As this is forbidden for ParentedTree objects,
822
+ attempting to make a shallow copy will cause a warning, and a deep copy
823
+ is made instead.
824
+
825
+ >>> ptree = ParentedTree.fromstring("(TOP (S (NP (NNP Bell,)) (NP (NP (DT a) (NN company)) (SBAR (WHNP (WDT which)) (S (VP (VBZ is) (VP (VBN based) (PP (IN in) (NP (NNP LA,)))))))) (VP (VBZ makes) (CC and) (VBZ distributes) (NP (NN computer))) (. products.)))")
826
+ >>> copy_ptree = ptree.copy(deep=False)
827
+ >>> copy_ptree == ptree # Ensure identical labels and nodes
828
+ True
829
+ >>> id(copy_ptree[0]) != id(ptree[0]) # Shallow copying isn't supported - it defaults to deep copy.
830
+ True
831
+
832
+ For MultiParentedTree objects, the issue of only allowing one parent that
833
+ can be seen for ParentedTree objects is no more. Shallow copying a
834
+ MultiParentedTree gives the children of the root node two parents:
835
+ the original and the newly copied root.
836
+
837
+ >>> mptree = MultiParentedTree.fromstring("(TOP (S (NP (NNP Bell,)) (NP (NP (DT a) (NN company)) (SBAR (WHNP (WDT which)) (S (VP (VBZ is) (VP (VBN based) (PP (IN in) (NP (NNP LA,)))))))) (VP (VBZ makes) (CC and) (VBZ distributes) (NP (NN computer))) (. products.)))")
838
+ >>> len(mptree[0].parents())
839
+ 1
840
+ >>> copy_mptree = mptree.copy(deep=False)
841
+ >>> copy_mptree == mptree # Ensure identical labels and nodes
842
+ True
843
+ >>> len(mptree[0].parents())
844
+ 2
845
+ >>> len(copy_mptree[0].parents())
846
+ 2
847
+
848
+ Shallow copying a MultiParentedTree is similar to creating a second root
849
+ which is identically labeled as the root on which the copy method was called.
850
+
851
+
852
+ ImmutableParentedTree Regression Tests
853
+ --------------------------------------
854
+
855
+ >>> iptree = ImmutableParentedTree.convert(ptree)
856
+ >>> type(iptree)
857
+ <class 'nltk.tree.immutable.ImmutableParentedTree'>
858
+ >>> del iptree[0]
859
+ Traceback (most recent call last):
860
+ . . .
861
+ ValueError: ImmutableParentedTree may not be modified
862
+ >>> iptree.set_label('newnode')
863
+ Traceback (most recent call last):
864
+ . . .
865
+ ValueError: ImmutableParentedTree may not be modified
866
+
867
+
868
+ MultiParentedTree Regression Tests
869
+ ----------------------------------
870
+ Keep track of all trees that we create (including subtrees) using this
871
+ variable:
872
+
873
+ >>> all_mptrees = []
874
+
875
+ Define a helper function to create new parented trees:
876
+
877
+ >>> def make_mptree(s):
878
+ ... mptree = MultiParentedTree.convert(Tree.fromstring(s))
879
+ ... all_mptrees.extend(t for t in mptree.subtrees()
880
+ ... if isinstance(t, Tree))
881
+ ... return mptree
882
+
883
+ Define a test function that examines every subtree in all_mptrees; and
884
+ checks that all six of its methods are defined correctly. If any
885
+ mptrees are passed as arguments, then they are printed.
886
+
887
+ >>> def mpcheck(*print_mptrees):
888
+ ... def has(seq, val): # uses identity comparison
889
+ ... for item in seq:
890
+ ... if item is val: return True
891
+ ... return False
892
+ ... for mptree in all_mptrees:
893
+ ... # Check mptree's methods.
894
+ ... if len(mptree.parents()) == 0:
895
+ ... assert len(mptree.left_siblings()) == 0
896
+ ... assert len(mptree.right_siblings()) == 0
897
+ ... assert len(mptree.roots()) == 1
898
+ ... assert mptree.roots()[0] is mptree
899
+ ... assert mptree.treepositions(mptree) == [()]
900
+ ... left_siblings = right_siblings = ()
901
+ ... roots = {id(mptree): 1}
902
+ ... else:
903
+ ... roots = dict((id(r), 0) for r in mptree.roots())
904
+ ... left_siblings = mptree.left_siblings()
905
+ ... right_siblings = mptree.right_siblings()
906
+ ... for parent in mptree.parents():
907
+ ... for i in mptree.parent_indices(parent):
908
+ ... assert parent[i] is mptree
909
+ ... # check left siblings
910
+ ... if i > 0:
911
+ ... for j in range(len(left_siblings)):
912
+ ... if left_siblings[j] is parent[i-1]:
913
+ ... del left_siblings[j]
914
+ ... break
915
+ ... else:
916
+ ... assert 0, 'sibling not found!'
917
+ ... # check ight siblings
918
+ ... if i < (len(parent)-1):
919
+ ... for j in range(len(right_siblings)):
920
+ ... if right_siblings[j] is parent[i+1]:
921
+ ... del right_siblings[j]
922
+ ... break
923
+ ... else:
924
+ ... assert 0, 'sibling not found!'
925
+ ... # check roots
926
+ ... for root in parent.roots():
927
+ ... assert id(root) in roots, 'missing root'
928
+ ... roots[id(root)] += 1
929
+ ... # check that we don't have any unexplained values
930
+ ... assert len(left_siblings)==0, 'unexpected sibling'
931
+ ... assert len(right_siblings)==0, 'unexpected sibling'
932
+ ... for v in roots.values(): assert v>0, roots #'unexpected root'
933
+ ... # check treepositions
934
+ ... for root in mptree.roots():
935
+ ... for treepos in mptree.treepositions(root):
936
+ ... assert root[treepos] is mptree
937
+ ... # Check mptree's children's methods:
938
+ ... for i, child in enumerate(mptree):
939
+ ... if isinstance(child, Tree):
940
+ ... # mpcheck parent() & parent_index() methods
941
+ ... assert has(child.parents(), mptree)
942
+ ... assert i in child.parent_indices(mptree)
943
+ ... # mpcheck sibling methods
944
+ ... if i > 0:
945
+ ... assert has(child.left_siblings(), mptree[i-1])
946
+ ... if i < len(mptree)-1:
947
+ ... assert has(child.right_siblings(), mptree[i+1])
948
+ ... if print_mptrees:
949
+ ... print('ok!', end=' ')
950
+ ... for mptree in print_mptrees: print(mptree)
951
+ ... else:
952
+ ... print('ok!')
953
+
954
+ Run our test function on a variety of newly-created trees:
955
+
956
+ >>> mpcheck(make_mptree('(A)'))
957
+ ok! (A )
958
+ >>> mpcheck(make_mptree('(A (B (C (D) (E f)) g) h)'))
959
+ ok! (A (B (C (D ) (E f)) g) h)
960
+ >>> mpcheck(make_mptree('(A (B) (C c) (D d d) (E e e e))'))
961
+ ok! (A (B ) (C c) (D d d) (E e e e))
962
+ >>> mpcheck(make_mptree('(A (B) (C (c)) (D (d) (d)) (E (e) (e) (e)))'))
963
+ ok! (A (B ) (C (c )) (D (d ) (d )) (E (e ) (e ) (e )))
964
+ >>> subtree = make_mptree('(A (B (C (D) (E f)) g) h)')
965
+
966
+ Including some trees that contain multiple parents:
967
+
968
+ >>> mpcheck(MultiParentedTree('Z', [subtree, subtree]))
969
+ ok! (Z (A (B (C (D ) (E f)) g) h) (A (B (C (D ) (E f)) g) h))
970
+
971
+ Run our test function after performing various tree-modification
972
+ operations (n.b., these are the same tests that we ran for
973
+ `ParentedTree`, above; thus, none of these trees actually *uses*
974
+ multiple parents.)
975
+
976
+ **__delitem__()**
977
+
978
+ >>> mptree = make_mptree('(A (B (C (D) (E f) (Q p)) g) h)')
979
+ >>> e = mptree[0,0,1]
980
+ >>> del mptree[0,0,1]; mpcheck(mptree); mpcheck(e)
981
+ ok! (A (B (C (D ) (Q p)) g) h)
982
+ ok! (E f)
983
+ >>> del mptree[0,0,0]; mpcheck(mptree)
984
+ ok! (A (B (C (Q p)) g) h)
985
+ >>> del mptree[0,1]; mpcheck(mptree)
986
+ ok! (A (B (C (Q p))) h)
987
+ >>> del mptree[-1]; mpcheck(mptree)
988
+ ok! (A (B (C (Q p))))
989
+ >>> del mptree[-100]
990
+ Traceback (most recent call last):
991
+ . . .
992
+ IndexError: index out of range
993
+ >>> del mptree[()]
994
+ Traceback (most recent call last):
995
+ . . .
996
+ IndexError: The tree position () may not be deleted.
997
+
998
+ >>> # With slices:
999
+ >>> mptree = make_mptree('(A (B c) (D e) f g (H i) j (K l))')
1000
+ >>> b = mptree[0]
1001
+ >>> del mptree[0:0]; mpcheck(mptree)
1002
+ ok! (A (B c) (D e) f g (H i) j (K l))
1003
+ >>> del mptree[:1]; mpcheck(mptree); mpcheck(b)
1004
+ ok! (A (D e) f g (H i) j (K l))
1005
+ ok! (B c)
1006
+ >>> del mptree[-2:]; mpcheck(mptree)
1007
+ ok! (A (D e) f g (H i))
1008
+ >>> del mptree[1:3]; mpcheck(mptree)
1009
+ ok! (A (D e) (H i))
1010
+ >>> mptree = make_mptree('(A (B c) (D e) f g (H i) j (K l))')
1011
+ >>> del mptree[5:1000]; mpcheck(mptree)
1012
+ ok! (A (B c) (D e) f g (H i))
1013
+ >>> del mptree[-2:1000]; mpcheck(mptree)
1014
+ ok! (A (B c) (D e) f)
1015
+ >>> del mptree[-100:1]; mpcheck(mptree)
1016
+ ok! (A (D e) f)
1017
+ >>> mptree = make_mptree('(A (B c) (D e) f g (H i) j (K l))')
1018
+ >>> del mptree[1:-2:2]; mpcheck(mptree)
1019
+ ok! (A (B c) f (H i) j (K l))
1020
+
1021
+ **__setitem__()**
1022
+
1023
+ >>> mptree = make_mptree('(A (B (C (D) (E f) (Q p)) g) h)')
1024
+ >>> d, e, q = mptree[0,0]
1025
+ >>> mptree[0,0,0] = 'x'; mpcheck(mptree); mpcheck(d)
1026
+ ok! (A (B (C x (E f) (Q p)) g) h)
1027
+ ok! (D )
1028
+ >>> mptree[0,0,1] = make_mptree('(X (Y z))'); mpcheck(mptree); mpcheck(e)
1029
+ ok! (A (B (C x (X (Y z)) (Q p)) g) h)
1030
+ ok! (E f)
1031
+ >>> mptree[1] = d; mpcheck(mptree)
1032
+ ok! (A (B (C x (X (Y z)) (Q p)) g) (D ))
1033
+ >>> mptree[-1] = 'x'; mpcheck(mptree)
1034
+ ok! (A (B (C x (X (Y z)) (Q p)) g) x)
1035
+ >>> mptree[-100] = 'y'
1036
+ Traceback (most recent call last):
1037
+ . . .
1038
+ IndexError: index out of range
1039
+ >>> mptree[()] = make_mptree('(X y)')
1040
+ Traceback (most recent call last):
1041
+ . . .
1042
+ IndexError: The tree position () may not be assigned to.
1043
+
1044
+ >>> # With slices:
1045
+ >>> mptree = make_mptree('(A (B c) (D e) f g (H i) j (K l))')
1046
+ >>> b = mptree[0]
1047
+ >>> mptree[0:0] = ('x', make_mptree('(Y)')); mpcheck(mptree)
1048
+ ok! (A x (Y ) (B c) (D e) f g (H i) j (K l))
1049
+ >>> mptree[2:6] = (); mpcheck(mptree); mpcheck(b)
1050
+ ok! (A x (Y ) (H i) j (K l))
1051
+ ok! (B c)
1052
+ >>> mptree[-2:] = ('z', 'p'); mpcheck(mptree)
1053
+ ok! (A x (Y ) (H i) z p)
1054
+ >>> mptree[1:3] = [make_mptree('(X)') for x in range(10)]; mpcheck(mptree)
1055
+ ok! (A x (X ) (X ) (X ) (X ) (X ) (X ) (X ) (X ) (X ) (X ) z p)
1056
+ >>> mptree[5:1000] = []; mpcheck(mptree)
1057
+ ok! (A x (X ) (X ) (X ) (X ))
1058
+ >>> mptree[-2:1000] = ['n']; mpcheck(mptree)
1059
+ ok! (A x (X ) (X ) n)
1060
+ >>> mptree[-100:1] = [make_mptree('(U v)')]; mpcheck(mptree)
1061
+ ok! (A (U v) (X ) (X ) n)
1062
+ >>> mptree[-1:] = (make_mptree('(X)') for x in range(3)); mpcheck(mptree)
1063
+ ok! (A (U v) (X ) (X ) (X ) (X ) (X ))
1064
+ >>> mptree[1:-2:2] = ['x', 'y']; mpcheck(mptree)
1065
+ ok! (A (U v) x (X ) y (X ) (X ))
1066
+
1067
+ **append()**
1068
+
1069
+ >>> mptree = make_mptree('(A (B (C (D) (E f) (Q p)) g) h)')
1070
+ >>> mptree.append('x'); mpcheck(mptree)
1071
+ ok! (A (B (C (D ) (E f) (Q p)) g) h x)
1072
+ >>> mptree.append(make_mptree('(X (Y z))')); mpcheck(mptree)
1073
+ ok! (A (B (C (D ) (E f) (Q p)) g) h x (X (Y z)))
1074
+
1075
+ **extend()**
1076
+
1077
+ >>> mptree = make_mptree('(A (B (C (D) (E f) (Q p)) g) h)')
1078
+ >>> mptree.extend(['x', 'y', make_mptree('(X (Y z))')]); mpcheck(mptree)
1079
+ ok! (A (B (C (D ) (E f) (Q p)) g) h x y (X (Y z)))
1080
+ >>> mptree.extend([]); mpcheck(mptree)
1081
+ ok! (A (B (C (D ) (E f) (Q p)) g) h x y (X (Y z)))
1082
+ >>> mptree.extend(make_mptree('(X)') for x in range(3)); mpcheck(mptree)
1083
+ ok! (A (B (C (D ) (E f) (Q p)) g) h x y (X (Y z)) (X ) (X ) (X ))
1084
+
1085
+ **insert()**
1086
+
1087
+ >>> mptree = make_mptree('(A (B (C (D) (E f) (Q p)) g) h)')
1088
+ >>> mptree.insert(0, make_mptree('(X (Y z))')); mpcheck(mptree)
1089
+ ok! (A (X (Y z)) (B (C (D ) (E f) (Q p)) g) h)
1090
+ >>> mptree.insert(-1, make_mptree('(X (Y z))')); mpcheck(mptree)
1091
+ ok! (A (X (Y z)) (B (C (D ) (E f) (Q p)) g) (X (Y z)) h)
1092
+ >>> mptree.insert(-4, make_mptree('(X (Y z))')); mpcheck(mptree)
1093
+ ok! (A (X (Y z)) (X (Y z)) (B (C (D ) (E f) (Q p)) g) (X (Y z)) h)
1094
+ >>> # Note: as with ``list``, inserting at a negative index that
1095
+ >>> # gives a position before the start of the list does *not*
1096
+ >>> # raise an IndexError exception; it just inserts at 0.
1097
+ >>> mptree.insert(-400, make_mptree('(X (Y z))')); mpcheck(mptree)
1098
+ ok! (A
1099
+ (X (Y z))
1100
+ (X (Y z))
1101
+ (X (Y z))
1102
+ (B (C (D ) (E f) (Q p)) g)
1103
+ (X (Y z))
1104
+ h)
1105
+
1106
+ **pop()**
1107
+
1108
+ >>> mptree = make_mptree('(A (B (C (D) (E f) (Q p)) g) h)')
1109
+ >>> mptree[0,0].pop(1); mpcheck(mptree)
1110
+ MultiParentedTree('E', ['f'])
1111
+ ok! (A (B (C (D ) (Q p)) g) h)
1112
+ >>> mptree[0].pop(-1); mpcheck(mptree)
1113
+ 'g'
1114
+ ok! (A (B (C (D ) (Q p))) h)
1115
+ >>> mptree.pop(); mpcheck(mptree)
1116
+ 'h'
1117
+ ok! (A (B (C (D ) (Q p))))
1118
+ >>> mptree.pop(-100)
1119
+ Traceback (most recent call last):
1120
+ . . .
1121
+ IndexError: index out of range
1122
+
1123
+ **remove()**
1124
+
1125
+ >>> mptree = make_mptree('(A (B (C (D) (E f) (Q p)) g) h)')
1126
+ >>> e = mptree[0,0,1]
1127
+ >>> mptree[0,0].remove(mptree[0,0,1]); mpcheck(mptree); mpcheck(e)
1128
+ ok! (A (B (C (D ) (Q p)) g) h)
1129
+ ok! (E f)
1130
+ >>> mptree[0,0].remove(make_mptree('(Q p)')); mpcheck(mptree)
1131
+ ok! (A (B (C (D )) g) h)
1132
+ >>> mptree[0,0].remove(make_mptree('(Q p)'))
1133
+ Traceback (most recent call last):
1134
+ . . .
1135
+ ValueError: MultiParentedTree('Q', ['p']) is not in list
1136
+ >>> mptree.remove('h'); mpcheck(mptree)
1137
+ ok! (A (B (C (D )) g))
1138
+ >>> mptree.remove('h');
1139
+ Traceback (most recent call last):
1140
+ . . .
1141
+ ValueError: 'h' is not in list
1142
+ >>> # remove() removes the first subtree that is equal (==) to the
1143
+ >>> # given tree, which may not be the identical tree we give it:
1144
+ >>> mptree = make_mptree('(A (X x) (Y y) (X x))')
1145
+ >>> x1, y, x2 = mptree
1146
+ >>> mptree.remove(mptree[-1]); mpcheck(mptree)
1147
+ ok! (A (Y y) (X x))
1148
+ >>> print([str(p) for p in x1.parents()])
1149
+ []
1150
+ >>> print([str(p) for p in x2.parents()])
1151
+ ['(A (Y y) (X x))']
1152
+
1153
+
1154
+ ImmutableMultiParentedTree Regression Tests
1155
+ -------------------------------------------
1156
+
1157
+ >>> imptree = ImmutableMultiParentedTree.convert(mptree)
1158
+ >>> type(imptree)
1159
+ <class 'nltk.tree.immutable.ImmutableMultiParentedTree'>
1160
+ >>> del imptree[0]
1161
+ Traceback (most recent call last):
1162
+ . . .
1163
+ ValueError: ImmutableMultiParentedTree may not be modified
1164
+ >>> imptree.set_label('newnode')
1165
+ Traceback (most recent call last):
1166
+ . . .
1167
+ ValueError: ImmutableMultiParentedTree may not be modified
1168
+
1169
+
1170
+ ProbabilisticTree Regression Tests
1171
+ ----------------------------------
1172
+
1173
+ >>> prtree = ProbabilisticTree("S", [ProbabilisticTree("NP", ["N"], prob=0.3)], prob=0.6)
1174
+ >>> print(prtree)
1175
+ (S (NP N)) (p=0.6)
1176
+ >>> import copy
1177
+ >>> prtree == copy.deepcopy(prtree) == prtree.copy(deep=True) == prtree.copy()
1178
+ True
1179
+ >>> prtree[0] is prtree.copy()[0]
1180
+ True
1181
+ >>> prtree[0] is prtree.copy(deep=True)[0]
1182
+ False
1183
+
1184
+ >>> imprtree = ImmutableProbabilisticTree.convert(prtree)
1185
+ >>> type(imprtree)
1186
+ <class 'nltk.tree.immutable.ImmutableProbabilisticTree'>
1187
+ >>> del imprtree[0]
1188
+ Traceback (most recent call last):
1189
+ . . .
1190
+ ValueError: ImmutableProbabilisticTree may not be modified
1191
+ >>> imprtree.set_label('newnode')
1192
+ Traceback (most recent call last):
1193
+ . . .
1194
+ ValueError: ImmutableProbabilisticTree may not be modified
1195
+
1196
+
1197
+ Squashed Bugs
1198
+ =============
1199
+
1200
+ This used to discard the ``(B b)`` subtree (fixed in svn 6270):
1201
+
1202
+ >>> print(Tree.fromstring('((A a) (B b))'))
1203
+ ( (A a) (B b))
1204
+
1205
+ Pickling ParentedTree instances didn't work for Python 3.7 onwards (See #2478)
1206
+
1207
+ >>> import pickle
1208
+ >>> tree = ParentedTree.fromstring('(S (NN x) (NP x) (NN x))')
1209
+ >>> print(tree)
1210
+ (S (NN x) (NP x) (NN x))
1211
+
1212
+ >>> pickled = pickle.dumps(tree)
1213
+ >>> tree_loaded = pickle.loads(pickled)
1214
+ >>> print(tree_loaded)
1215
+ (S (NN x) (NP x) (NN x))
1216
+
1217
+ ParentedTree used to be impossible to (deep)copy. (See #1324)
1218
+
1219
+ >>> from nltk.tree import ParentedTree
1220
+ >>> import copy
1221
+ >>> tree = ParentedTree.fromstring("(TOP (S (NP (NNP Bell,)) (NP (NP (DT a) (NN company)) (SBAR (WHNP (WDT which)) (S (VP (VBZ is) (VP (VBN based) (PP (IN in) (NP (NNP LA,)))))))) (VP (VBZ makes) (CC and) (VBZ distributes) (NP (NN computer))) (. products.)))")
1222
+ >>> tree == copy.deepcopy(tree) == copy.copy(tree) == tree.copy(deep=True) == tree.copy()
1223
+ True
env-llmeval/lib/python3.10/site-packages/nltk/test/treeprettyprinter.doctest ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ =========================================================
5
+ Unit tests for nltk.tree.prettyprinter.TreePrettyPrinter
6
+ =========================================================
7
+
8
+ >>> from nltk.tree import Tree, TreePrettyPrinter
9
+
10
+ Tree nr 2170 from nltk.corpus.treebank:
11
+
12
+ >>> tree = Tree.fromstring(
13
+ ... '(S (NP-SBJ (PRP I)) (VP (VBP feel) (ADJP-PRD (RB pretty) '
14
+ ... '(JJ good)) (PP-CLR (IN about) (NP (PRP it)))) (. .))')
15
+ >>> tpp = TreePrettyPrinter(tree)
16
+ >>> print(tpp.text())
17
+ S
18
+ __________________________|_____________________
19
+ | VP |
20
+ | ____________________|___________ |
21
+ | | | PP-CLR |
22
+ | | | _____|_____ |
23
+ NP-SBJ | ADJP-PRD | NP |
24
+ | | _______|______ | | |
25
+ PRP VBP RB JJ IN PRP .
26
+ | | | | | | |
27
+ I feel pretty good about it .
28
+
29
+ >>> print(tpp.text(unicodelines=True))
30
+ S
31
+ ┌──────────────────────────┼─────────────────────┐
32
+ │ VP │
33
+ │ ┌─────────────┬──────┴───────────┐ │
34
+ │ │ │ PP-CLR │
35
+ │ │ │ ┌─────┴─────┐ │
36
+ NP-SBJ │ ADJP-PRD │ NP │
37
+ │ │ ┌───────┴──────┐ │ │ │
38
+ PRP VBP RB JJ IN PRP .
39
+ │ │ │ │ │ │ │
40
+ I feel pretty good about it .
41
+
42
+ A tree with long labels:
43
+
44
+ >>> tree = Tree.fromstring(
45
+ ... '(sentence (plural-noun-phrase (plural-noun Superconductors)) '
46
+ ... '(verb-phrase (plural-verb conduct) '
47
+ ... '(noun-phrase (singular-noun electricity))))')
48
+ >>> tpp = TreePrettyPrinter(tree)
49
+ >>> print(tpp.text(abbreviate=8, nodedist=2))
50
+ sentence
51
+ __________|__________
52
+ | verb-phr.
53
+ | __________|__________
54
+ plural-n. | noun-phr.
55
+ | | |
56
+ plural-n. plural-v. singular.
57
+ | | |
58
+ Supercon. conduct electric.
59
+
60
+ >>> print(tpp.text(maxwidth=8, nodedist=2))
61
+ sentence
62
+ _________|________
63
+ | verb-
64
+ | phrase
65
+ | ________|_________
66
+ plural- | noun-
67
+ noun- | phrase
68
+ phrase | |
69
+ | | |
70
+ plural- plural- singular-
71
+ noun verb noun
72
+ | | |
73
+ Supercon conduct electric
74
+ ductors ity
75
+
76
+ A discontinuous tree:
77
+
78
+ >>> tree = Tree.fromstring(
79
+ ... '(top (punct 8) (smain (noun 0) (verb 1) (inf (verb 5) (inf (verb 6) '
80
+ ... '(conj (inf (pp (prep 2) (np (det 3) (noun 4))) (verb 7)) (inf (verb 9)) '
81
+ ... '(vg 10) (inf (verb 11)))))) (punct 12))', read_leaf=int)
82
+ >>> sentence = ('Ze had met haar moeder kunnen gaan winkelen ,'
83
+ ... ' zwemmen of terrassen .'.split())
84
+ >>> tpp = TreePrettyPrinter(tree, sentence)
85
+ >>> print(tpp.text())
86
+ top
87
+ _____|______________________________________________
88
+ smain | |
89
+ _______________________________|_____ | |
90
+ | | inf | |
91
+ | | _____|____ | |
92
+ | | | inf | |
93
+ | | | ____|_____ | |
94
+ | | | | conj | |
95
+ | | _____ | ___ | _________|______ | __________________ |
96
+ | | inf | | | | | | |
97
+ | | _________|_____ | ___ | _________ | | | | |
98
+ | | pp | | | | | | | |
99
+ | | ____|____ | | | | | | | |
100
+ | | | np | | | | inf | inf |
101
+ | | | ____|____ | | | | | | | |
102
+ noun verb prep det noun verb verb verb punct verb vg verb punct
103
+ | | | | | | | | | | | | |
104
+ Ze had met haar moeder kunnen gaan winkelen , zwemmen of terrassen .
105
+
106
+ >>> print(tpp.text(unicodelines=True))
107
+ top
108
+ ┌─────┴──────────────────┬───────────────────────────┐
109
+ smain │ │
110
+ ┌────┬──────────────────────────┴─────┐ │ │
111
+ │ │ inf │ │
112
+ │ │ ┌─────┴────┐ │ │
113
+ │ │ │ inf │ │
114
+ │ │ │ ┌────┴─────┐ │ │
115
+ │ │ │ │ conj │ │
116
+ │ │ ┌───── │ ─── │ ─────────┴────── │ ─────┬─────┬──────┐ │
117
+ │ │ inf │ │ │ │ │ │ │
118
+ │ │ ┌─────────┴───── │ ─── │ ─────────┐ │ │ │ │ │
119
+ │ │ pp │ │ │ │ │ │ │ │
120
+ │ │ ┌────┴────┐ │ │ │ │ │ │ │ │
121
+ │ │ │ np │ │ │ │ inf │ inf │
122
+ │ │ │ ┌────┴────┐ │ │ │ │ │ │ │ │
123
+ noun verb prep det noun verb verb verb punct verb vg verb punct
124
+ │ │ │ │ │ │ │ │ │ │ │ │ │
125
+ Ze had met haar moeder kunnen gaan winkelen , zwemmen of terrassen .
126
+
127
+ Importing TreePrettyPrinter
128
+ ---------------------------
129
+
130
+ First of all, a simple tree will be constructed::
131
+
132
+ >>> from nltk.tree import Tree
133
+ >>> tree = Tree.fromstring('(S (NP Mary) (VP walks))')
134
+
135
+ We'll use this sample tree to show that the method of importing `TreePrettyPrinter` work correctly:
136
+
137
+ - Recommended::
138
+
139
+ >>> from nltk.tree import TreePrettyPrinter
140
+ >>> print(TreePrettyPrinter(tree).text())
141
+ S
142
+ ____|____
143
+ NP VP
144
+ | |
145
+ Mary walks
146
+
147
+ - Alternative but valid options::
148
+
149
+ >>> from nltk import TreePrettyPrinter
150
+ >>> print(TreePrettyPrinter(tree).text())
151
+ S
152
+ ____|____
153
+ NP VP
154
+ | |
155
+ Mary walks
156
+
157
+ >>> from nltk.tree.prettyprinter import TreePrettyPrinter
158
+ >>> print(TreePrettyPrinter(tree).text())
159
+ S
160
+ ____|____
161
+ NP VP
162
+ | |
163
+ Mary walks
164
+
165
+ - Deprecated, do not use::
166
+
167
+ >>> from nltk.treeprettyprinter import TreePrettyPrinter
168
+ >>> print(TreePrettyPrinter(tree).text())
169
+ S
170
+ ____|____
171
+ NP VP
172
+ | |
173
+ Mary walks
174
+
175
+ This method will throw a DeprecationWarning::
176
+
177
+ Import `TreePrettyPrinter` using `from nltk.tree import TreePrettyPrinter` instead.
env-llmeval/lib/python3.10/site-packages/nltk/test/treetransforms.doctest ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ -------------------------------------------
5
+ Unit tests for the TreeTransformation class
6
+ -------------------------------------------
7
+
8
+ >>> from copy import deepcopy
9
+ >>> from nltk.tree import Tree, collapse_unary, chomsky_normal_form, un_chomsky_normal_form
10
+
11
+ >>> tree_string = "(TOP (S (S (VP (VBN Turned) (ADVP (RB loose)) (PP (IN in) (NP (NP (NNP Shane) (NNP Longman) (POS 's)) (NN trading) (NN room))))) (, ,) (NP (DT the) (NN yuppie) (NNS dealers)) (VP (AUX do) (NP (NP (RB little)) (ADJP (RB right)))) (. .)))"
12
+
13
+ >>> tree = Tree.fromstring(tree_string)
14
+ >>> print(tree)
15
+ (TOP
16
+ (S
17
+ (S
18
+ (VP
19
+ (VBN Turned)
20
+ (ADVP (RB loose))
21
+ (PP
22
+ (IN in)
23
+ (NP
24
+ (NP (NNP Shane) (NNP Longman) (POS 's))
25
+ (NN trading)
26
+ (NN room)))))
27
+ (, ,)
28
+ (NP (DT the) (NN yuppie) (NNS dealers))
29
+ (VP (AUX do) (NP (NP (RB little)) (ADJP (RB right))))
30
+ (. .)))
31
+
32
+ Make a copy of the original tree and collapse the subtrees with only one child
33
+
34
+ >>> collapsedTree = deepcopy(tree)
35
+ >>> collapse_unary(collapsedTree)
36
+ >>> print(collapsedTree)
37
+ (TOP
38
+ (S
39
+ (S+VP
40
+ (VBN Turned)
41
+ (ADVP (RB loose))
42
+ (PP
43
+ (IN in)
44
+ (NP
45
+ (NP (NNP Shane) (NNP Longman) (POS 's))
46
+ (NN trading)
47
+ (NN room))))
48
+ (, ,)
49
+ (NP (DT the) (NN yuppie) (NNS dealers))
50
+ (VP (AUX do) (NP (NP (RB little)) (ADJP (RB right))))
51
+ (. .)))
52
+
53
+ >>> collapsedTree2 = deepcopy(tree)
54
+ >>> collapse_unary(collapsedTree2, collapsePOS=True, collapseRoot=True)
55
+ >>> print(collapsedTree2)
56
+ (TOP+S
57
+ (S+VP
58
+ (VBN Turned)
59
+ (ADVP+RB loose)
60
+ (PP
61
+ (IN in)
62
+ (NP
63
+ (NP (NNP Shane) (NNP Longman) (POS 's))
64
+ (NN trading)
65
+ (NN room))))
66
+ (, ,)
67
+ (NP (DT the) (NN yuppie) (NNS dealers))
68
+ (VP (AUX do) (NP (NP+RB little) (ADJP+RB right)))
69
+ (. .))
70
+
71
+ Convert the tree to Chomsky Normal Form i.e. each subtree has either two
72
+ subtree children or a single leaf value. This conversion can be performed
73
+ using either left- or right-factoring.
74
+
75
+ >>> cnfTree = deepcopy(collapsedTree)
76
+ >>> chomsky_normal_form(cnfTree, factor='left')
77
+ >>> print(cnfTree)
78
+ (TOP
79
+ (S
80
+ (S|<S+VP-,-NP-VP>
81
+ (S|<S+VP-,-NP>
82
+ (S|<S+VP-,>
83
+ (S+VP
84
+ (S+VP|<VBN-ADVP> (VBN Turned) (ADVP (RB loose)))
85
+ (PP
86
+ (IN in)
87
+ (NP
88
+ (NP|<NP-NN>
89
+ (NP
90
+ (NP|<NNP-NNP> (NNP Shane) (NNP Longman))
91
+ (POS 's))
92
+ (NN trading))
93
+ (NN room))))
94
+ (, ,))
95
+ (NP (NP|<DT-NN> (DT the) (NN yuppie)) (NNS dealers)))
96
+ (VP (AUX do) (NP (NP (RB little)) (ADJP (RB right)))))
97
+ (. .)))
98
+
99
+ >>> cnfTree = deepcopy(collapsedTree)
100
+ >>> chomsky_normal_form(cnfTree, factor='right')
101
+ >>> print(cnfTree)
102
+ (TOP
103
+ (S
104
+ (S+VP
105
+ (VBN Turned)
106
+ (S+VP|<ADVP-PP>
107
+ (ADVP (RB loose))
108
+ (PP
109
+ (IN in)
110
+ (NP
111
+ (NP (NNP Shane) (NP|<NNP-POS> (NNP Longman) (POS 's)))
112
+ (NP|<NN-NN> (NN trading) (NN room))))))
113
+ (S|<,-NP-VP-.>
114
+ (, ,)
115
+ (S|<NP-VP-.>
116
+ (NP (DT the) (NP|<NN-NNS> (NN yuppie) (NNS dealers)))
117
+ (S|<VP-.>
118
+ (VP (AUX do) (NP (NP (RB little)) (ADJP (RB right))))
119
+ (. .))))))
120
+
121
+ Employ some Markov smoothing to make the artificial node labels a bit more
122
+ readable. See the treetransforms.py documentation for more details.
123
+
124
+ >>> markovTree = deepcopy(collapsedTree)
125
+ >>> chomsky_normal_form(markovTree, horzMarkov=2, vertMarkov=1)
126
+ >>> print(markovTree)
127
+ (TOP
128
+ (S^<TOP>
129
+ (S+VP^<S>
130
+ (VBN Turned)
131
+ (S+VP|<ADVP-PP>^<S>
132
+ (ADVP^<S+VP> (RB loose))
133
+ (PP^<S+VP>
134
+ (IN in)
135
+ (NP^<PP>
136
+ (NP^<NP>
137
+ (NNP Shane)
138
+ (NP|<NNP-POS>^<NP> (NNP Longman) (POS 's)))
139
+ (NP|<NN-NN>^<PP> (NN trading) (NN room))))))
140
+ (S|<,-NP>^<TOP>
141
+ (, ,)
142
+ (S|<NP-VP>^<TOP>
143
+ (NP^<S> (DT the) (NP|<NN-NNS>^<S> (NN yuppie) (NNS dealers)))
144
+ (S|<VP-.>^<TOP>
145
+ (VP^<S>
146
+ (AUX do)
147
+ (NP^<VP> (NP^<NP> (RB little)) (ADJP^<NP> (RB right))))
148
+ (. .))))))
149
+
150
+ Convert the transformed tree back to its original form
151
+
152
+ >>> un_chomsky_normal_form(markovTree)
153
+ >>> tree == markovTree
154
+ True
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_distance.cpython-310.pyc ADDED
Binary file (2.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/test_aline.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Test Aline algorithm for aligning phonetic sequences
3
+ """
4
+ from nltk.metrics import aline
5
+
6
+
7
+ def test_aline():
8
+ result = aline.align("θin", "tenwis")
9
+ expected = [[("θ", "t"), ("i", "e"), ("n", "n")]]
10
+
11
+ assert result == expected
12
+
13
+ result = aline.align("jo", "ʒə")
14
+ expected = [[("j", "ʒ"), ("o", "ə")]]
15
+
16
+ assert result == expected
17
+
18
+ result = aline.align("pematesiweni", "pematesewen")
19
+ expected = [
20
+ [
21
+ ("p", "p"),
22
+ ("e", "e"),
23
+ ("m", "m"),
24
+ ("a", "a"),
25
+ ("t", "t"),
26
+ ("e", "e"),
27
+ ("s", "s"),
28
+ ("i", "e"),
29
+ ("w", "w"),
30
+ ("e", "e"),
31
+ ("n", "n"),
32
+ ]
33
+ ]
34
+
35
+ assert result == expected
36
+
37
+ result = aline.align("tuwθ", "dentis")
38
+ expected = [[("t", "t"), ("u", "i"), ("w", "-"), ("θ", "s")]]
39
+
40
+ assert result == expected
41
+
42
+
43
+ def test_aline_delta():
44
+ """
45
+ Test aline for computing the difference between two segments
46
+ """
47
+ assert aline.delta("p", "q") == 20.0
48
+ assert aline.delta("a", "A") == 0.0
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/test_bllip.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ from nltk.data import find
4
+ from nltk.parse.bllip import BllipParser
5
+ from nltk.tree import Tree
6
+
7
+
8
+ @pytest.fixture(scope="module")
9
+ def parser():
10
+ model_dir = find("models/bllip_wsj_no_aux").path
11
+ return BllipParser.from_unified_model_dir(model_dir)
12
+
13
+
14
+ def setup_module():
15
+ pytest.importorskip("bllipparser")
16
+
17
+
18
+ class TestBllipParser:
19
+ def test_parser_loads_a_valid_tree(self, parser):
20
+ parsed = parser.parse("I saw the man with the telescope")
21
+ tree = next(parsed)
22
+
23
+ assert isinstance(tree, Tree)
24
+ assert (
25
+ tree.pformat()
26
+ == """
27
+ (S1
28
+ (S
29
+ (NP (PRP I))
30
+ (VP
31
+ (VBD saw)
32
+ (NP (DT the) (NN man))
33
+ (PP (IN with) (NP (DT the) (NN telescope))))))
34
+ """.strip()
35
+ )
36
+
37
+ def test_tagged_parse_finds_matching_element(self, parser):
38
+ parsed = parser.parse("I saw the man with the telescope")
39
+ tagged_tree = next(parser.tagged_parse([("telescope", "NN")]))
40
+
41
+ assert isinstance(tagged_tree, Tree)
42
+ assert tagged_tree.pformat() == "(S1 (NP (NN telescope)))"
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/test_brill.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests for Brill tagger.
3
+ """
4
+
5
+ import unittest
6
+
7
+ from nltk.corpus import treebank
8
+ from nltk.tag import UnigramTagger, brill, brill_trainer
9
+ from nltk.tbl import demo
10
+
11
+
12
+ class TestBrill(unittest.TestCase):
13
+ def test_pos_template(self):
14
+ train_sents = treebank.tagged_sents()[:1000]
15
+ tagger = UnigramTagger(train_sents)
16
+ trainer = brill_trainer.BrillTaggerTrainer(
17
+ tagger, [brill.Template(brill.Pos([-1]))]
18
+ )
19
+ brill_tagger = trainer.train(train_sents)
20
+ # Example from https://github.com/nltk/nltk/issues/769
21
+ result = brill_tagger.tag("This is a foo bar sentence".split())
22
+ expected = [
23
+ ("This", "DT"),
24
+ ("is", "VBZ"),
25
+ ("a", "DT"),
26
+ ("foo", None),
27
+ ("bar", "NN"),
28
+ ("sentence", None),
29
+ ]
30
+ self.assertEqual(result, expected)
31
+
32
+ @unittest.skip("Should be tested in __main__ of nltk.tbl.demo")
33
+ def test_brill_demo(self):
34
+ demo()
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/test_cfd_mutation.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import unittest
2
+
3
+ import pytest
4
+
5
+ from nltk import ConditionalFreqDist, tokenize
6
+
7
+
8
+ class TestEmptyCondFreq(unittest.TestCase):
9
+ def test_tabulate(self):
10
+ empty = ConditionalFreqDist()
11
+ self.assertEqual(empty.conditions(), [])
12
+ with pytest.raises(ValueError):
13
+ empty.tabulate(conditions="BUG") # nonexistent keys shouldn't be added
14
+ self.assertEqual(empty.conditions(), [])
15
+
16
+ def test_plot(self):
17
+ empty = ConditionalFreqDist()
18
+ self.assertEqual(empty.conditions(), [])
19
+ empty.plot(conditions=["BUG"]) # nonexistent keys shouldn't be added
20
+ self.assertEqual(empty.conditions(), [])
21
+
22
+ def test_increment(self):
23
+ # make sure that we can still mutate cfd normally
24
+ text = "cow cat mouse cat tiger"
25
+ cfd = ConditionalFreqDist()
26
+
27
+ # create cfd with word length as condition
28
+ for word in tokenize.word_tokenize(text):
29
+ condition = len(word)
30
+ cfd[condition][word] += 1
31
+
32
+ self.assertEqual(cfd.conditions(), [3, 5])
33
+
34
+ # incrementing previously unseen key is still possible
35
+ cfd[2]["hi"] += 1
36
+ self.assertCountEqual(cfd.conditions(), [3, 5, 2]) # new condition added
37
+ self.assertEqual(
38
+ cfd[2]["hi"], 1
39
+ ) # key's frequency incremented from 0 (unseen) to 1
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/test_cfg2chomsky.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import unittest
2
+
3
+ import nltk
4
+ from nltk.grammar import CFG
5
+
6
+
7
+ class ChomskyNormalFormForCFGTest(unittest.TestCase):
8
+ def test_simple(self):
9
+ grammar = CFG.fromstring(
10
+ """
11
+ S -> NP VP
12
+ PP -> P NP
13
+ NP -> Det N | NP PP P
14
+ VP -> V NP | VP PP
15
+ VP -> Det
16
+ Det -> 'a' | 'the'
17
+ N -> 'dog' | 'cat'
18
+ V -> 'chased' | 'sat'
19
+ P -> 'on' | 'in'
20
+ """
21
+ )
22
+ self.assertFalse(grammar.is_flexible_chomsky_normal_form())
23
+ self.assertFalse(grammar.is_chomsky_normal_form())
24
+ grammar = grammar.chomsky_normal_form(flexible=True)
25
+ self.assertTrue(grammar.is_flexible_chomsky_normal_form())
26
+ self.assertFalse(grammar.is_chomsky_normal_form())
27
+
28
+ grammar2 = CFG.fromstring(
29
+ """
30
+ S -> NP VP
31
+ NP -> VP N P
32
+ VP -> P
33
+ N -> 'dog' | 'cat'
34
+ P -> 'on' | 'in'
35
+ """
36
+ )
37
+ self.assertFalse(grammar2.is_flexible_chomsky_normal_form())
38
+ self.assertFalse(grammar2.is_chomsky_normal_form())
39
+ grammar2 = grammar2.chomsky_normal_form()
40
+ self.assertTrue(grammar2.is_flexible_chomsky_normal_form())
41
+ self.assertTrue(grammar2.is_chomsky_normal_form())
42
+
43
+ def test_complex(self):
44
+ grammar = nltk.data.load("grammars/large_grammars/atis.cfg")
45
+ self.assertFalse(grammar.is_flexible_chomsky_normal_form())
46
+ self.assertFalse(grammar.is_chomsky_normal_form())
47
+ grammar = grammar.chomsky_normal_form(flexible=True)
48
+ self.assertTrue(grammar.is_flexible_chomsky_normal_form())
49
+ self.assertFalse(grammar.is_chomsky_normal_form())
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/test_chunk.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import unittest
2
+
3
+ from nltk import RegexpParser
4
+
5
+
6
+ class TestChunkRule(unittest.TestCase):
7
+ def test_tag_pattern2re_pattern_quantifier(self):
8
+ """Test for bug https://github.com/nltk/nltk/issues/1597
9
+
10
+ Ensures that curly bracket quantifiers can be used inside a chunk rule.
11
+ This type of quantifier has been used for the supplementary example
12
+ in https://www.nltk.org/book/ch07.html#exploring-text-corpora.
13
+ """
14
+ sent = [
15
+ ("The", "AT"),
16
+ ("September-October", "NP"),
17
+ ("term", "NN"),
18
+ ("jury", "NN"),
19
+ ("had", "HVD"),
20
+ ("been", "BEN"),
21
+ ("charged", "VBN"),
22
+ ("by", "IN"),
23
+ ("Fulton", "NP-TL"),
24
+ ("Superior", "JJ-TL"),
25
+ ("Court", "NN-TL"),
26
+ ("Judge", "NN-TL"),
27
+ ("Durwood", "NP"),
28
+ ("Pye", "NP"),
29
+ ("to", "TO"),
30
+ ("investigate", "VB"),
31
+ ("reports", "NNS"),
32
+ ("of", "IN"),
33
+ ("possible", "JJ"),
34
+ ("``", "``"),
35
+ ("irregularities", "NNS"),
36
+ ("''", "''"),
37
+ ("in", "IN"),
38
+ ("the", "AT"),
39
+ ("hard-fought", "JJ"),
40
+ ("primary", "NN"),
41
+ ("which", "WDT"),
42
+ ("was", "BEDZ"),
43
+ ("won", "VBN"),
44
+ ("by", "IN"),
45
+ ("Mayor-nominate", "NN-TL"),
46
+ ("Ivan", "NP"),
47
+ ("Allen", "NP"),
48
+ ("Jr.", "NP"),
49
+ (".", "."),
50
+ ] # source: brown corpus
51
+ cp = RegexpParser("CHUNK: {<N.*>{4,}}")
52
+ tree = cp.parse(sent)
53
+ assert (
54
+ tree.pformat()
55
+ == """(S
56
+ The/AT
57
+ September-October/NP
58
+ term/NN
59
+ jury/NN
60
+ had/HVD
61
+ been/BEN
62
+ charged/VBN
63
+ by/IN
64
+ Fulton/NP-TL
65
+ Superior/JJ-TL
66
+ (CHUNK Court/NN-TL Judge/NN-TL Durwood/NP Pye/NP)
67
+ to/TO
68
+ investigate/VB
69
+ reports/NNS
70
+ of/IN
71
+ possible/JJ
72
+ ``/``
73
+ irregularities/NNS
74
+ ''/''
75
+ in/IN
76
+ the/AT
77
+ hard-fought/JJ
78
+ primary/NN
79
+ which/WDT
80
+ was/BEDZ
81
+ won/VBN
82
+ by/IN
83
+ (CHUNK Mayor-nominate/NN-TL Ivan/NP Allen/NP Jr./NP)
84
+ ./.)"""
85
+ )
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/test_classify.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Unit tests for nltk.classify. See also: nltk/test/classify.doctest
3
+ """
4
+ import pytest
5
+
6
+ from nltk import classify
7
+
8
+ TRAIN = [
9
+ (dict(a=1, b=1, c=1), "y"),
10
+ (dict(a=1, b=1, c=1), "x"),
11
+ (dict(a=1, b=1, c=0), "y"),
12
+ (dict(a=0, b=1, c=1), "x"),
13
+ (dict(a=0, b=1, c=1), "y"),
14
+ (dict(a=0, b=0, c=1), "y"),
15
+ (dict(a=0, b=1, c=0), "x"),
16
+ (dict(a=0, b=0, c=0), "x"),
17
+ (dict(a=0, b=1, c=1), "y"),
18
+ ]
19
+
20
+ TEST = [
21
+ (dict(a=1, b=0, c=1)), # unseen
22
+ (dict(a=1, b=0, c=0)), # unseen
23
+ (dict(a=0, b=1, c=1)), # seen 3 times, labels=y,y,x
24
+ (dict(a=0, b=1, c=0)), # seen 1 time, label=x
25
+ ]
26
+
27
+ RESULTS = [(0.16, 0.84), (0.46, 0.54), (0.41, 0.59), (0.76, 0.24)]
28
+
29
+
30
+ def assert_classifier_correct(algorithm):
31
+ try:
32
+ classifier = classify.MaxentClassifier.train(
33
+ TRAIN, algorithm, trace=0, max_iter=1000
34
+ )
35
+ except (LookupError, AttributeError) as e:
36
+ pytest.skip(str(e))
37
+
38
+ for (px, py), featureset in zip(RESULTS, TEST):
39
+ pdist = classifier.prob_classify(featureset)
40
+ assert abs(pdist.prob("x") - px) < 1e-2, (pdist.prob("x"), px)
41
+ assert abs(pdist.prob("y") - py) < 1e-2, (pdist.prob("y"), py)
42
+
43
+
44
+ def test_megam():
45
+ assert_classifier_correct("MEGAM")
46
+
47
+
48
+ def test_tadm():
49
+ assert_classifier_correct("TADM")
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/test_collocations.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from nltk.collocations import BigramCollocationFinder
2
+ from nltk.metrics import BigramAssocMeasures
3
+
4
+ ## Test bigram counters with discontinuous bigrams and repeated words
5
+
6
+ _EPSILON = 1e-8
7
+ SENT = "this this is is a a test test".split()
8
+
9
+
10
+ def close_enough(x, y):
11
+ """Verify that two sequences of n-gram association values are within
12
+ _EPSILON of each other.
13
+ """
14
+
15
+ return all(abs(x1[1] - y1[1]) <= _EPSILON for x1, y1 in zip(x, y))
16
+
17
+
18
+ def test_bigram2():
19
+ b = BigramCollocationFinder.from_words(SENT)
20
+
21
+ assert sorted(b.ngram_fd.items()) == [
22
+ (("a", "a"), 1),
23
+ (("a", "test"), 1),
24
+ (("is", "a"), 1),
25
+ (("is", "is"), 1),
26
+ (("test", "test"), 1),
27
+ (("this", "is"), 1),
28
+ (("this", "this"), 1),
29
+ ]
30
+ assert sorted(b.word_fd.items()) == [("a", 2), ("is", 2), ("test", 2), ("this", 2)]
31
+
32
+ assert len(SENT) == sum(b.word_fd.values()) == sum(b.ngram_fd.values()) + 1
33
+ assert close_enough(
34
+ sorted(b.score_ngrams(BigramAssocMeasures.pmi)),
35
+ [
36
+ (("a", "a"), 1.0),
37
+ (("a", "test"), 1.0),
38
+ (("is", "a"), 1.0),
39
+ (("is", "is"), 1.0),
40
+ (("test", "test"), 1.0),
41
+ (("this", "is"), 1.0),
42
+ (("this", "this"), 1.0),
43
+ ],
44
+ )
45
+
46
+
47
+ def test_bigram3():
48
+ b = BigramCollocationFinder.from_words(SENT, window_size=3)
49
+ assert sorted(b.ngram_fd.items()) == sorted(
50
+ [
51
+ (("a", "test"), 3),
52
+ (("is", "a"), 3),
53
+ (("this", "is"), 3),
54
+ (("a", "a"), 1),
55
+ (("is", "is"), 1),
56
+ (("test", "test"), 1),
57
+ (("this", "this"), 1),
58
+ ]
59
+ )
60
+
61
+ assert sorted(b.word_fd.items()) == sorted(
62
+ [("a", 2), ("is", 2), ("test", 2), ("this", 2)]
63
+ )
64
+
65
+ assert (
66
+ len(SENT) == sum(b.word_fd.values()) == (sum(b.ngram_fd.values()) + 2 + 1) / 2.0
67
+ )
68
+ assert close_enough(
69
+ sorted(b.score_ngrams(BigramAssocMeasures.pmi)),
70
+ sorted(
71
+ [
72
+ (("a", "test"), 1.584962500721156),
73
+ (("is", "a"), 1.584962500721156),
74
+ (("this", "is"), 1.584962500721156),
75
+ (("a", "a"), 0.0),
76
+ (("is", "is"), 0.0),
77
+ (("test", "test"), 0.0),
78
+ (("this", "this"), 0.0),
79
+ ]
80
+ ),
81
+ )
82
+
83
+
84
+ def test_bigram5():
85
+ b = BigramCollocationFinder.from_words(SENT, window_size=5)
86
+ assert sorted(b.ngram_fd.items()) == sorted(
87
+ [
88
+ (("a", "test"), 4),
89
+ (("is", "a"), 4),
90
+ (("this", "is"), 4),
91
+ (("is", "test"), 3),
92
+ (("this", "a"), 3),
93
+ (("a", "a"), 1),
94
+ (("is", "is"), 1),
95
+ (("test", "test"), 1),
96
+ (("this", "this"), 1),
97
+ ]
98
+ )
99
+ assert sorted(b.word_fd.items()) == sorted(
100
+ [("a", 2), ("is", 2), ("test", 2), ("this", 2)]
101
+ )
102
+ n_word_fd = sum(b.word_fd.values())
103
+ n_ngram_fd = (sum(b.ngram_fd.values()) + 4 + 3 + 2 + 1) / 4.0
104
+ assert len(SENT) == n_word_fd == n_ngram_fd
105
+ assert close_enough(
106
+ sorted(b.score_ngrams(BigramAssocMeasures.pmi)),
107
+ sorted(
108
+ [
109
+ (("a", "test"), 1.0),
110
+ (("is", "a"), 1.0),
111
+ (("this", "is"), 1.0),
112
+ (("is", "test"), 0.5849625007211562),
113
+ (("this", "a"), 0.5849625007211562),
114
+ (("a", "a"), -1.0),
115
+ (("is", "is"), -1.0),
116
+ (("test", "test"), -1.0),
117
+ (("this", "this"), -1.0),
118
+ ]
119
+ ),
120
+ )
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/test_concordance.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ import sys
3
+ import unittest
4
+ from io import StringIO
5
+
6
+ from nltk.corpus import gutenberg
7
+ from nltk.text import Text
8
+
9
+
10
+ @contextlib.contextmanager
11
+ def stdout_redirect(where):
12
+ sys.stdout = where
13
+ try:
14
+ yield where
15
+ finally:
16
+ sys.stdout = sys.__stdout__
17
+
18
+
19
+ class TestConcordance(unittest.TestCase):
20
+ """Text constructed using: https://www.nltk.org/book/ch01.html"""
21
+
22
+ @classmethod
23
+ def setUpClass(cls):
24
+ cls.corpus = gutenberg.words("melville-moby_dick.txt")
25
+
26
+ @classmethod
27
+ def tearDownClass(cls):
28
+ pass
29
+
30
+ def setUp(self):
31
+ self.text = Text(TestConcordance.corpus)
32
+ self.query = "monstrous"
33
+ self.maxDiff = None
34
+ self.list_out = [
35
+ "ong the former , one was of a most monstrous size . ... This came towards us , ",
36
+ 'ON OF THE PSALMS . " Touching that monstrous bulk of the whale or ork we have r',
37
+ "ll over with a heathenish array of monstrous clubs and spears . Some were thick",
38
+ "d as you gazed , and wondered what monstrous cannibal and savage could ever hav",
39
+ "that has survived the flood ; most monstrous and most mountainous ! That Himmal",
40
+ "they might scout at Moby Dick as a monstrous fable , or still worse and more de",
41
+ "th of Radney .'\" CHAPTER 55 Of the Monstrous Pictures of Whales . I shall ere l",
42
+ "ing Scenes . In connexion with the monstrous pictures of whales , I am strongly",
43
+ "ere to enter upon those still more monstrous stories of them which are to be fo",
44
+ "ght have been rummaged out of this monstrous cabinet there is no telling . But ",
45
+ "of Whale - Bones ; for Whales of a monstrous size are oftentimes cast up dead u",
46
+ ]
47
+
48
+ def tearDown(self):
49
+ pass
50
+
51
+ def test_concordance_list(self):
52
+ concordance_out = self.text.concordance_list(self.query)
53
+ self.assertEqual(self.list_out, [c.line for c in concordance_out])
54
+
55
+ def test_concordance_width(self):
56
+ list_out = [
57
+ "monstrous",
58
+ "monstrous",
59
+ "monstrous",
60
+ "monstrous",
61
+ "monstrous",
62
+ "monstrous",
63
+ "Monstrous",
64
+ "monstrous",
65
+ "monstrous",
66
+ "monstrous",
67
+ "monstrous",
68
+ ]
69
+
70
+ concordance_out = self.text.concordance_list(self.query, width=0)
71
+ self.assertEqual(list_out, [c.query for c in concordance_out])
72
+
73
+ def test_concordance_lines(self):
74
+ concordance_out = self.text.concordance_list(self.query, lines=3)
75
+ self.assertEqual(self.list_out[:3], [c.line for c in concordance_out])
76
+
77
+ def test_concordance_print(self):
78
+ print_out = """Displaying 11 of 11 matches:
79
+ ong the former , one was of a most monstrous size . ... This came towards us ,
80
+ ON OF THE PSALMS . " Touching that monstrous bulk of the whale or ork we have r
81
+ ll over with a heathenish array of monstrous clubs and spears . Some were thick
82
+ d as you gazed , and wondered what monstrous cannibal and savage could ever hav
83
+ that has survived the flood ; most monstrous and most mountainous ! That Himmal
84
+ they might scout at Moby Dick as a monstrous fable , or still worse and more de
85
+ th of Radney .'" CHAPTER 55 Of the Monstrous Pictures of Whales . I shall ere l
86
+ ing Scenes . In connexion with the monstrous pictures of whales , I am strongly
87
+ ere to enter upon those still more monstrous stories of them which are to be fo
88
+ ght have been rummaged out of this monstrous cabinet there is no telling . But
89
+ of Whale - Bones ; for Whales of a monstrous size are oftentimes cast up dead u
90
+ """
91
+
92
+ with stdout_redirect(StringIO()) as stdout:
93
+ self.text.concordance(self.query)
94
+
95
+ def strip_space(raw_str):
96
+ return raw_str.replace(" ", "")
97
+
98
+ self.assertEqual(strip_space(print_out), strip_space(stdout.getvalue()))
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/test_corenlp.py ADDED
@@ -0,0 +1,1436 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Mock test for Stanford CoreNLP wrappers.
3
+ """
4
+
5
+ from unittest import TestCase
6
+ from unittest.mock import MagicMock
7
+
8
+ import pytest
9
+
10
+ from nltk.parse import corenlp
11
+ from nltk.tree import Tree
12
+
13
+
14
+ def setup_module(module):
15
+ global server
16
+
17
+ try:
18
+ server = corenlp.CoreNLPServer(port=9000)
19
+ except LookupError:
20
+ pytest.skip("Could not instantiate CoreNLPServer.")
21
+
22
+ try:
23
+ server.start()
24
+ except corenlp.CoreNLPServerError as e:
25
+ pytest.skip(
26
+ "Skipping CoreNLP tests because the server could not be started. "
27
+ "Make sure that the 9000 port is free. "
28
+ "{}".format(e.strerror)
29
+ )
30
+
31
+
32
+ def teardown_module(module):
33
+ server.stop()
34
+
35
+
36
+ class TestTokenizerAPI(TestCase):
37
+ def test_tokenize(self):
38
+ corenlp_tokenizer = corenlp.CoreNLPParser()
39
+
40
+ api_return_value = {
41
+ "sentences": [
42
+ {
43
+ "index": 0,
44
+ "tokens": [
45
+ {
46
+ "after": " ",
47
+ "before": "",
48
+ "characterOffsetBegin": 0,
49
+ "characterOffsetEnd": 4,
50
+ "index": 1,
51
+ "originalText": "Good",
52
+ "word": "Good",
53
+ },
54
+ {
55
+ "after": " ",
56
+ "before": " ",
57
+ "characterOffsetBegin": 5,
58
+ "characterOffsetEnd": 12,
59
+ "index": 2,
60
+ "originalText": "muffins",
61
+ "word": "muffins",
62
+ },
63
+ {
64
+ "after": " ",
65
+ "before": " ",
66
+ "characterOffsetBegin": 13,
67
+ "characterOffsetEnd": 17,
68
+ "index": 3,
69
+ "originalText": "cost",
70
+ "word": "cost",
71
+ },
72
+ {
73
+ "after": "",
74
+ "before": " ",
75
+ "characterOffsetBegin": 18,
76
+ "characterOffsetEnd": 19,
77
+ "index": 4,
78
+ "originalText": "$",
79
+ "word": "$",
80
+ },
81
+ {
82
+ "after": "\n",
83
+ "before": "",
84
+ "characterOffsetBegin": 19,
85
+ "characterOffsetEnd": 23,
86
+ "index": 5,
87
+ "originalText": "3.88",
88
+ "word": "3.88",
89
+ },
90
+ {
91
+ "after": " ",
92
+ "before": "\n",
93
+ "characterOffsetBegin": 24,
94
+ "characterOffsetEnd": 26,
95
+ "index": 6,
96
+ "originalText": "in",
97
+ "word": "in",
98
+ },
99
+ {
100
+ "after": " ",
101
+ "before": " ",
102
+ "characterOffsetBegin": 27,
103
+ "characterOffsetEnd": 30,
104
+ "index": 7,
105
+ "originalText": "New",
106
+ "word": "New",
107
+ },
108
+ {
109
+ "after": "",
110
+ "before": " ",
111
+ "characterOffsetBegin": 31,
112
+ "characterOffsetEnd": 35,
113
+ "index": 8,
114
+ "originalText": "York",
115
+ "word": "York",
116
+ },
117
+ {
118
+ "after": " ",
119
+ "before": "",
120
+ "characterOffsetBegin": 35,
121
+ "characterOffsetEnd": 36,
122
+ "index": 9,
123
+ "originalText": ".",
124
+ "word": ".",
125
+ },
126
+ ],
127
+ },
128
+ {
129
+ "index": 1,
130
+ "tokens": [
131
+ {
132
+ "after": " ",
133
+ "before": " ",
134
+ "characterOffsetBegin": 38,
135
+ "characterOffsetEnd": 44,
136
+ "index": 1,
137
+ "originalText": "Please",
138
+ "word": "Please",
139
+ },
140
+ {
141
+ "after": " ",
142
+ "before": " ",
143
+ "characterOffsetBegin": 45,
144
+ "characterOffsetEnd": 48,
145
+ "index": 2,
146
+ "originalText": "buy",
147
+ "word": "buy",
148
+ },
149
+ {
150
+ "after": "\n",
151
+ "before": " ",
152
+ "characterOffsetBegin": 49,
153
+ "characterOffsetEnd": 51,
154
+ "index": 3,
155
+ "originalText": "me",
156
+ "word": "me",
157
+ },
158
+ {
159
+ "after": " ",
160
+ "before": "\n",
161
+ "characterOffsetBegin": 52,
162
+ "characterOffsetEnd": 55,
163
+ "index": 4,
164
+ "originalText": "two",
165
+ "word": "two",
166
+ },
167
+ {
168
+ "after": " ",
169
+ "before": " ",
170
+ "characterOffsetBegin": 56,
171
+ "characterOffsetEnd": 58,
172
+ "index": 5,
173
+ "originalText": "of",
174
+ "word": "of",
175
+ },
176
+ {
177
+ "after": "",
178
+ "before": " ",
179
+ "characterOffsetBegin": 59,
180
+ "characterOffsetEnd": 63,
181
+ "index": 6,
182
+ "originalText": "them",
183
+ "word": "them",
184
+ },
185
+ {
186
+ "after": "\n",
187
+ "before": "",
188
+ "characterOffsetBegin": 63,
189
+ "characterOffsetEnd": 64,
190
+ "index": 7,
191
+ "originalText": ".",
192
+ "word": ".",
193
+ },
194
+ ],
195
+ },
196
+ {
197
+ "index": 2,
198
+ "tokens": [
199
+ {
200
+ "after": "",
201
+ "before": "\n",
202
+ "characterOffsetBegin": 65,
203
+ "characterOffsetEnd": 71,
204
+ "index": 1,
205
+ "originalText": "Thanks",
206
+ "word": "Thanks",
207
+ },
208
+ {
209
+ "after": "",
210
+ "before": "",
211
+ "characterOffsetBegin": 71,
212
+ "characterOffsetEnd": 72,
213
+ "index": 2,
214
+ "originalText": ".",
215
+ "word": ".",
216
+ },
217
+ ],
218
+ },
219
+ ]
220
+ }
221
+ corenlp_tokenizer.api_call = MagicMock(return_value=api_return_value)
222
+
223
+ input_string = "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\nThanks."
224
+
225
+ expected_output = [
226
+ "Good",
227
+ "muffins",
228
+ "cost",
229
+ "$",
230
+ "3.88",
231
+ "in",
232
+ "New",
233
+ "York",
234
+ ".",
235
+ "Please",
236
+ "buy",
237
+ "me",
238
+ "two",
239
+ "of",
240
+ "them",
241
+ ".",
242
+ "Thanks",
243
+ ".",
244
+ ]
245
+
246
+ tokenized_output = list(corenlp_tokenizer.tokenize(input_string))
247
+
248
+ corenlp_tokenizer.api_call.assert_called_once_with(
249
+ "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\nThanks.",
250
+ properties={"annotators": "tokenize,ssplit"},
251
+ )
252
+ self.assertEqual(expected_output, tokenized_output)
253
+
254
+
255
+ class TestTaggerAPI(TestCase):
256
+ def test_pos_tagger(self):
257
+ corenlp_tagger = corenlp.CoreNLPParser(tagtype="pos")
258
+
259
+ api_return_value = {
260
+ "sentences": [
261
+ {
262
+ "basicDependencies": [
263
+ {
264
+ "dep": "ROOT",
265
+ "dependent": 1,
266
+ "dependentGloss": "What",
267
+ "governor": 0,
268
+ "governorGloss": "ROOT",
269
+ },
270
+ {
271
+ "dep": "cop",
272
+ "dependent": 2,
273
+ "dependentGloss": "is",
274
+ "governor": 1,
275
+ "governorGloss": "What",
276
+ },
277
+ {
278
+ "dep": "det",
279
+ "dependent": 3,
280
+ "dependentGloss": "the",
281
+ "governor": 4,
282
+ "governorGloss": "airspeed",
283
+ },
284
+ {
285
+ "dep": "nsubj",
286
+ "dependent": 4,
287
+ "dependentGloss": "airspeed",
288
+ "governor": 1,
289
+ "governorGloss": "What",
290
+ },
291
+ {
292
+ "dep": "case",
293
+ "dependent": 5,
294
+ "dependentGloss": "of",
295
+ "governor": 8,
296
+ "governorGloss": "swallow",
297
+ },
298
+ {
299
+ "dep": "det",
300
+ "dependent": 6,
301
+ "dependentGloss": "an",
302
+ "governor": 8,
303
+ "governorGloss": "swallow",
304
+ },
305
+ {
306
+ "dep": "compound",
307
+ "dependent": 7,
308
+ "dependentGloss": "unladen",
309
+ "governor": 8,
310
+ "governorGloss": "swallow",
311
+ },
312
+ {
313
+ "dep": "nmod",
314
+ "dependent": 8,
315
+ "dependentGloss": "swallow",
316
+ "governor": 4,
317
+ "governorGloss": "airspeed",
318
+ },
319
+ {
320
+ "dep": "punct",
321
+ "dependent": 9,
322
+ "dependentGloss": "?",
323
+ "governor": 1,
324
+ "governorGloss": "What",
325
+ },
326
+ ],
327
+ "enhancedDependencies": [
328
+ {
329
+ "dep": "ROOT",
330
+ "dependent": 1,
331
+ "dependentGloss": "What",
332
+ "governor": 0,
333
+ "governorGloss": "ROOT",
334
+ },
335
+ {
336
+ "dep": "cop",
337
+ "dependent": 2,
338
+ "dependentGloss": "is",
339
+ "governor": 1,
340
+ "governorGloss": "What",
341
+ },
342
+ {
343
+ "dep": "det",
344
+ "dependent": 3,
345
+ "dependentGloss": "the",
346
+ "governor": 4,
347
+ "governorGloss": "airspeed",
348
+ },
349
+ {
350
+ "dep": "nsubj",
351
+ "dependent": 4,
352
+ "dependentGloss": "airspeed",
353
+ "governor": 1,
354
+ "governorGloss": "What",
355
+ },
356
+ {
357
+ "dep": "case",
358
+ "dependent": 5,
359
+ "dependentGloss": "of",
360
+ "governor": 8,
361
+ "governorGloss": "swallow",
362
+ },
363
+ {
364
+ "dep": "det",
365
+ "dependent": 6,
366
+ "dependentGloss": "an",
367
+ "governor": 8,
368
+ "governorGloss": "swallow",
369
+ },
370
+ {
371
+ "dep": "compound",
372
+ "dependent": 7,
373
+ "dependentGloss": "unladen",
374
+ "governor": 8,
375
+ "governorGloss": "swallow",
376
+ },
377
+ {
378
+ "dep": "nmod:of",
379
+ "dependent": 8,
380
+ "dependentGloss": "swallow",
381
+ "governor": 4,
382
+ "governorGloss": "airspeed",
383
+ },
384
+ {
385
+ "dep": "punct",
386
+ "dependent": 9,
387
+ "dependentGloss": "?",
388
+ "governor": 1,
389
+ "governorGloss": "What",
390
+ },
391
+ ],
392
+ "enhancedPlusPlusDependencies": [
393
+ {
394
+ "dep": "ROOT",
395
+ "dependent": 1,
396
+ "dependentGloss": "What",
397
+ "governor": 0,
398
+ "governorGloss": "ROOT",
399
+ },
400
+ {
401
+ "dep": "cop",
402
+ "dependent": 2,
403
+ "dependentGloss": "is",
404
+ "governor": 1,
405
+ "governorGloss": "What",
406
+ },
407
+ {
408
+ "dep": "det",
409
+ "dependent": 3,
410
+ "dependentGloss": "the",
411
+ "governor": 4,
412
+ "governorGloss": "airspeed",
413
+ },
414
+ {
415
+ "dep": "nsubj",
416
+ "dependent": 4,
417
+ "dependentGloss": "airspeed",
418
+ "governor": 1,
419
+ "governorGloss": "What",
420
+ },
421
+ {
422
+ "dep": "case",
423
+ "dependent": 5,
424
+ "dependentGloss": "of",
425
+ "governor": 8,
426
+ "governorGloss": "swallow",
427
+ },
428
+ {
429
+ "dep": "det",
430
+ "dependent": 6,
431
+ "dependentGloss": "an",
432
+ "governor": 8,
433
+ "governorGloss": "swallow",
434
+ },
435
+ {
436
+ "dep": "compound",
437
+ "dependent": 7,
438
+ "dependentGloss": "unladen",
439
+ "governor": 8,
440
+ "governorGloss": "swallow",
441
+ },
442
+ {
443
+ "dep": "nmod:of",
444
+ "dependent": 8,
445
+ "dependentGloss": "swallow",
446
+ "governor": 4,
447
+ "governorGloss": "airspeed",
448
+ },
449
+ {
450
+ "dep": "punct",
451
+ "dependent": 9,
452
+ "dependentGloss": "?",
453
+ "governor": 1,
454
+ "governorGloss": "What",
455
+ },
456
+ ],
457
+ "index": 0,
458
+ "parse": "(ROOT\n (SBARQ\n (WHNP (WP What))\n (SQ (VBZ is)\n (NP\n (NP (DT the) (NN airspeed))\n (PP (IN of)\n (NP (DT an) (NN unladen) (NN swallow)))))\n (. ?)))",
459
+ "tokens": [
460
+ {
461
+ "after": " ",
462
+ "before": "",
463
+ "characterOffsetBegin": 0,
464
+ "characterOffsetEnd": 4,
465
+ "index": 1,
466
+ "lemma": "what",
467
+ "originalText": "What",
468
+ "pos": "WP",
469
+ "word": "What",
470
+ },
471
+ {
472
+ "after": " ",
473
+ "before": " ",
474
+ "characterOffsetBegin": 5,
475
+ "characterOffsetEnd": 7,
476
+ "index": 2,
477
+ "lemma": "be",
478
+ "originalText": "is",
479
+ "pos": "VBZ",
480
+ "word": "is",
481
+ },
482
+ {
483
+ "after": " ",
484
+ "before": " ",
485
+ "characterOffsetBegin": 8,
486
+ "characterOffsetEnd": 11,
487
+ "index": 3,
488
+ "lemma": "the",
489
+ "originalText": "the",
490
+ "pos": "DT",
491
+ "word": "the",
492
+ },
493
+ {
494
+ "after": " ",
495
+ "before": " ",
496
+ "characterOffsetBegin": 12,
497
+ "characterOffsetEnd": 20,
498
+ "index": 4,
499
+ "lemma": "airspeed",
500
+ "originalText": "airspeed",
501
+ "pos": "NN",
502
+ "word": "airspeed",
503
+ },
504
+ {
505
+ "after": " ",
506
+ "before": " ",
507
+ "characterOffsetBegin": 21,
508
+ "characterOffsetEnd": 23,
509
+ "index": 5,
510
+ "lemma": "of",
511
+ "originalText": "of",
512
+ "pos": "IN",
513
+ "word": "of",
514
+ },
515
+ {
516
+ "after": " ",
517
+ "before": " ",
518
+ "characterOffsetBegin": 24,
519
+ "characterOffsetEnd": 26,
520
+ "index": 6,
521
+ "lemma": "a",
522
+ "originalText": "an",
523
+ "pos": "DT",
524
+ "word": "an",
525
+ },
526
+ {
527
+ "after": " ",
528
+ "before": " ",
529
+ "characterOffsetBegin": 27,
530
+ "characterOffsetEnd": 34,
531
+ "index": 7,
532
+ "lemma": "unladen",
533
+ "originalText": "unladen",
534
+ "pos": "JJ",
535
+ "word": "unladen",
536
+ },
537
+ {
538
+ "after": " ",
539
+ "before": " ",
540
+ "characterOffsetBegin": 35,
541
+ "characterOffsetEnd": 42,
542
+ "index": 8,
543
+ "lemma": "swallow",
544
+ "originalText": "swallow",
545
+ "pos": "VB",
546
+ "word": "swallow",
547
+ },
548
+ {
549
+ "after": "",
550
+ "before": " ",
551
+ "characterOffsetBegin": 43,
552
+ "characterOffsetEnd": 44,
553
+ "index": 9,
554
+ "lemma": "?",
555
+ "originalText": "?",
556
+ "pos": ".",
557
+ "word": "?",
558
+ },
559
+ ],
560
+ }
561
+ ]
562
+ }
563
+ corenlp_tagger.api_call = MagicMock(return_value=api_return_value)
564
+
565
+ input_tokens = "What is the airspeed of an unladen swallow ?".split()
566
+ expected_output = [
567
+ ("What", "WP"),
568
+ ("is", "VBZ"),
569
+ ("the", "DT"),
570
+ ("airspeed", "NN"),
571
+ ("of", "IN"),
572
+ ("an", "DT"),
573
+ ("unladen", "JJ"),
574
+ ("swallow", "VB"),
575
+ ("?", "."),
576
+ ]
577
+ tagged_output = corenlp_tagger.tag(input_tokens)
578
+
579
+ corenlp_tagger.api_call.assert_called_once_with(
580
+ "What is the airspeed of an unladen swallow ?",
581
+ properties={
582
+ "ssplit.isOneSentence": "true",
583
+ "annotators": "tokenize,ssplit,pos",
584
+ },
585
+ )
586
+ self.assertEqual(expected_output, tagged_output)
587
+
588
+ def test_ner_tagger(self):
589
+ corenlp_tagger = corenlp.CoreNLPParser(tagtype="ner")
590
+
591
+ api_return_value = {
592
+ "sentences": [
593
+ {
594
+ "index": 0,
595
+ "tokens": [
596
+ {
597
+ "after": " ",
598
+ "before": "",
599
+ "characterOffsetBegin": 0,
600
+ "characterOffsetEnd": 4,
601
+ "index": 1,
602
+ "lemma": "Rami",
603
+ "ner": "PERSON",
604
+ "originalText": "Rami",
605
+ "pos": "NNP",
606
+ "word": "Rami",
607
+ },
608
+ {
609
+ "after": " ",
610
+ "before": " ",
611
+ "characterOffsetBegin": 5,
612
+ "characterOffsetEnd": 8,
613
+ "index": 2,
614
+ "lemma": "Eid",
615
+ "ner": "PERSON",
616
+ "originalText": "Eid",
617
+ "pos": "NNP",
618
+ "word": "Eid",
619
+ },
620
+ {
621
+ "after": " ",
622
+ "before": " ",
623
+ "characterOffsetBegin": 9,
624
+ "characterOffsetEnd": 11,
625
+ "index": 3,
626
+ "lemma": "be",
627
+ "ner": "O",
628
+ "originalText": "is",
629
+ "pos": "VBZ",
630
+ "word": "is",
631
+ },
632
+ {
633
+ "after": " ",
634
+ "before": " ",
635
+ "characterOffsetBegin": 12,
636
+ "characterOffsetEnd": 20,
637
+ "index": 4,
638
+ "lemma": "study",
639
+ "ner": "O",
640
+ "originalText": "studying",
641
+ "pos": "VBG",
642
+ "word": "studying",
643
+ },
644
+ {
645
+ "after": " ",
646
+ "before": " ",
647
+ "characterOffsetBegin": 21,
648
+ "characterOffsetEnd": 23,
649
+ "index": 5,
650
+ "lemma": "at",
651
+ "ner": "O",
652
+ "originalText": "at",
653
+ "pos": "IN",
654
+ "word": "at",
655
+ },
656
+ {
657
+ "after": " ",
658
+ "before": " ",
659
+ "characterOffsetBegin": 24,
660
+ "characterOffsetEnd": 29,
661
+ "index": 6,
662
+ "lemma": "Stony",
663
+ "ner": "ORGANIZATION",
664
+ "originalText": "Stony",
665
+ "pos": "NNP",
666
+ "word": "Stony",
667
+ },
668
+ {
669
+ "after": " ",
670
+ "before": " ",
671
+ "characterOffsetBegin": 30,
672
+ "characterOffsetEnd": 35,
673
+ "index": 7,
674
+ "lemma": "Brook",
675
+ "ner": "ORGANIZATION",
676
+ "originalText": "Brook",
677
+ "pos": "NNP",
678
+ "word": "Brook",
679
+ },
680
+ {
681
+ "after": " ",
682
+ "before": " ",
683
+ "characterOffsetBegin": 36,
684
+ "characterOffsetEnd": 46,
685
+ "index": 8,
686
+ "lemma": "University",
687
+ "ner": "ORGANIZATION",
688
+ "originalText": "University",
689
+ "pos": "NNP",
690
+ "word": "University",
691
+ },
692
+ {
693
+ "after": " ",
694
+ "before": " ",
695
+ "characterOffsetBegin": 47,
696
+ "characterOffsetEnd": 49,
697
+ "index": 9,
698
+ "lemma": "in",
699
+ "ner": "O",
700
+ "originalText": "in",
701
+ "pos": "IN",
702
+ "word": "in",
703
+ },
704
+ {
705
+ "after": "",
706
+ "before": " ",
707
+ "characterOffsetBegin": 50,
708
+ "characterOffsetEnd": 52,
709
+ "index": 10,
710
+ "lemma": "NY",
711
+ "ner": "O",
712
+ "originalText": "NY",
713
+ "pos": "NNP",
714
+ "word": "NY",
715
+ },
716
+ ],
717
+ }
718
+ ]
719
+ }
720
+
721
+ corenlp_tagger.api_call = MagicMock(return_value=api_return_value)
722
+
723
+ input_tokens = "Rami Eid is studying at Stony Brook University in NY".split()
724
+ expected_output = [
725
+ ("Rami", "PERSON"),
726
+ ("Eid", "PERSON"),
727
+ ("is", "O"),
728
+ ("studying", "O"),
729
+ ("at", "O"),
730
+ ("Stony", "ORGANIZATION"),
731
+ ("Brook", "ORGANIZATION"),
732
+ ("University", "ORGANIZATION"),
733
+ ("in", "O"),
734
+ ("NY", "O"),
735
+ ]
736
+ tagged_output = corenlp_tagger.tag(input_tokens)
737
+
738
+ corenlp_tagger.api_call.assert_called_once_with(
739
+ "Rami Eid is studying at Stony Brook University in NY",
740
+ properties={
741
+ "ssplit.isOneSentence": "true",
742
+ "annotators": "tokenize,ssplit,ner",
743
+ },
744
+ )
745
+ self.assertEqual(expected_output, tagged_output)
746
+
747
+ def test_unexpected_tagtype(self):
748
+ with self.assertRaises(ValueError):
749
+ corenlp_tagger = corenlp.CoreNLPParser(tagtype="test")
750
+
751
+
752
+ class TestParserAPI(TestCase):
753
+ def test_parse(self):
754
+ corenlp_parser = corenlp.CoreNLPParser()
755
+
756
+ api_return_value = {
757
+ "sentences": [
758
+ {
759
+ "basicDependencies": [
760
+ {
761
+ "dep": "ROOT",
762
+ "dependent": 4,
763
+ "dependentGloss": "fox",
764
+ "governor": 0,
765
+ "governorGloss": "ROOT",
766
+ },
767
+ {
768
+ "dep": "det",
769
+ "dependent": 1,
770
+ "dependentGloss": "The",
771
+ "governor": 4,
772
+ "governorGloss": "fox",
773
+ },
774
+ {
775
+ "dep": "amod",
776
+ "dependent": 2,
777
+ "dependentGloss": "quick",
778
+ "governor": 4,
779
+ "governorGloss": "fox",
780
+ },
781
+ {
782
+ "dep": "amod",
783
+ "dependent": 3,
784
+ "dependentGloss": "brown",
785
+ "governor": 4,
786
+ "governorGloss": "fox",
787
+ },
788
+ {
789
+ "dep": "dep",
790
+ "dependent": 5,
791
+ "dependentGloss": "jumps",
792
+ "governor": 4,
793
+ "governorGloss": "fox",
794
+ },
795
+ {
796
+ "dep": "case",
797
+ "dependent": 6,
798
+ "dependentGloss": "over",
799
+ "governor": 9,
800
+ "governorGloss": "dog",
801
+ },
802
+ {
803
+ "dep": "det",
804
+ "dependent": 7,
805
+ "dependentGloss": "the",
806
+ "governor": 9,
807
+ "governorGloss": "dog",
808
+ },
809
+ {
810
+ "dep": "amod",
811
+ "dependent": 8,
812
+ "dependentGloss": "lazy",
813
+ "governor": 9,
814
+ "governorGloss": "dog",
815
+ },
816
+ {
817
+ "dep": "nmod",
818
+ "dependent": 9,
819
+ "dependentGloss": "dog",
820
+ "governor": 5,
821
+ "governorGloss": "jumps",
822
+ },
823
+ ],
824
+ "enhancedDependencies": [
825
+ {
826
+ "dep": "ROOT",
827
+ "dependent": 4,
828
+ "dependentGloss": "fox",
829
+ "governor": 0,
830
+ "governorGloss": "ROOT",
831
+ },
832
+ {
833
+ "dep": "det",
834
+ "dependent": 1,
835
+ "dependentGloss": "The",
836
+ "governor": 4,
837
+ "governorGloss": "fox",
838
+ },
839
+ {
840
+ "dep": "amod",
841
+ "dependent": 2,
842
+ "dependentGloss": "quick",
843
+ "governor": 4,
844
+ "governorGloss": "fox",
845
+ },
846
+ {
847
+ "dep": "amod",
848
+ "dependent": 3,
849
+ "dependentGloss": "brown",
850
+ "governor": 4,
851
+ "governorGloss": "fox",
852
+ },
853
+ {
854
+ "dep": "dep",
855
+ "dependent": 5,
856
+ "dependentGloss": "jumps",
857
+ "governor": 4,
858
+ "governorGloss": "fox",
859
+ },
860
+ {
861
+ "dep": "case",
862
+ "dependent": 6,
863
+ "dependentGloss": "over",
864
+ "governor": 9,
865
+ "governorGloss": "dog",
866
+ },
867
+ {
868
+ "dep": "det",
869
+ "dependent": 7,
870
+ "dependentGloss": "the",
871
+ "governor": 9,
872
+ "governorGloss": "dog",
873
+ },
874
+ {
875
+ "dep": "amod",
876
+ "dependent": 8,
877
+ "dependentGloss": "lazy",
878
+ "governor": 9,
879
+ "governorGloss": "dog",
880
+ },
881
+ {
882
+ "dep": "nmod:over",
883
+ "dependent": 9,
884
+ "dependentGloss": "dog",
885
+ "governor": 5,
886
+ "governorGloss": "jumps",
887
+ },
888
+ ],
889
+ "enhancedPlusPlusDependencies": [
890
+ {
891
+ "dep": "ROOT",
892
+ "dependent": 4,
893
+ "dependentGloss": "fox",
894
+ "governor": 0,
895
+ "governorGloss": "ROOT",
896
+ },
897
+ {
898
+ "dep": "det",
899
+ "dependent": 1,
900
+ "dependentGloss": "The",
901
+ "governor": 4,
902
+ "governorGloss": "fox",
903
+ },
904
+ {
905
+ "dep": "amod",
906
+ "dependent": 2,
907
+ "dependentGloss": "quick",
908
+ "governor": 4,
909
+ "governorGloss": "fox",
910
+ },
911
+ {
912
+ "dep": "amod",
913
+ "dependent": 3,
914
+ "dependentGloss": "brown",
915
+ "governor": 4,
916
+ "governorGloss": "fox",
917
+ },
918
+ {
919
+ "dep": "dep",
920
+ "dependent": 5,
921
+ "dependentGloss": "jumps",
922
+ "governor": 4,
923
+ "governorGloss": "fox",
924
+ },
925
+ {
926
+ "dep": "case",
927
+ "dependent": 6,
928
+ "dependentGloss": "over",
929
+ "governor": 9,
930
+ "governorGloss": "dog",
931
+ },
932
+ {
933
+ "dep": "det",
934
+ "dependent": 7,
935
+ "dependentGloss": "the",
936
+ "governor": 9,
937
+ "governorGloss": "dog",
938
+ },
939
+ {
940
+ "dep": "amod",
941
+ "dependent": 8,
942
+ "dependentGloss": "lazy",
943
+ "governor": 9,
944
+ "governorGloss": "dog",
945
+ },
946
+ {
947
+ "dep": "nmod:over",
948
+ "dependent": 9,
949
+ "dependentGloss": "dog",
950
+ "governor": 5,
951
+ "governorGloss": "jumps",
952
+ },
953
+ ],
954
+ "index": 0,
955
+ "parse": "(ROOT\n (NP\n (NP (DT The) (JJ quick) (JJ brown) (NN fox))\n (NP\n (NP (NNS jumps))\n (PP (IN over)\n (NP (DT the) (JJ lazy) (NN dog))))))",
956
+ "tokens": [
957
+ {
958
+ "after": " ",
959
+ "before": "",
960
+ "characterOffsetBegin": 0,
961
+ "characterOffsetEnd": 3,
962
+ "index": 1,
963
+ "lemma": "the",
964
+ "originalText": "The",
965
+ "pos": "DT",
966
+ "word": "The",
967
+ },
968
+ {
969
+ "after": " ",
970
+ "before": " ",
971
+ "characterOffsetBegin": 4,
972
+ "characterOffsetEnd": 9,
973
+ "index": 2,
974
+ "lemma": "quick",
975
+ "originalText": "quick",
976
+ "pos": "JJ",
977
+ "word": "quick",
978
+ },
979
+ {
980
+ "after": " ",
981
+ "before": " ",
982
+ "characterOffsetBegin": 10,
983
+ "characterOffsetEnd": 15,
984
+ "index": 3,
985
+ "lemma": "brown",
986
+ "originalText": "brown",
987
+ "pos": "JJ",
988
+ "word": "brown",
989
+ },
990
+ {
991
+ "after": " ",
992
+ "before": " ",
993
+ "characterOffsetBegin": 16,
994
+ "characterOffsetEnd": 19,
995
+ "index": 4,
996
+ "lemma": "fox",
997
+ "originalText": "fox",
998
+ "pos": "NN",
999
+ "word": "fox",
1000
+ },
1001
+ {
1002
+ "after": " ",
1003
+ "before": " ",
1004
+ "characterOffsetBegin": 20,
1005
+ "characterOffsetEnd": 25,
1006
+ "index": 5,
1007
+ "lemma": "jump",
1008
+ "originalText": "jumps",
1009
+ "pos": "VBZ",
1010
+ "word": "jumps",
1011
+ },
1012
+ {
1013
+ "after": " ",
1014
+ "before": " ",
1015
+ "characterOffsetBegin": 26,
1016
+ "characterOffsetEnd": 30,
1017
+ "index": 6,
1018
+ "lemma": "over",
1019
+ "originalText": "over",
1020
+ "pos": "IN",
1021
+ "word": "over",
1022
+ },
1023
+ {
1024
+ "after": " ",
1025
+ "before": " ",
1026
+ "characterOffsetBegin": 31,
1027
+ "characterOffsetEnd": 34,
1028
+ "index": 7,
1029
+ "lemma": "the",
1030
+ "originalText": "the",
1031
+ "pos": "DT",
1032
+ "word": "the",
1033
+ },
1034
+ {
1035
+ "after": " ",
1036
+ "before": " ",
1037
+ "characterOffsetBegin": 35,
1038
+ "characterOffsetEnd": 39,
1039
+ "index": 8,
1040
+ "lemma": "lazy",
1041
+ "originalText": "lazy",
1042
+ "pos": "JJ",
1043
+ "word": "lazy",
1044
+ },
1045
+ {
1046
+ "after": "",
1047
+ "before": " ",
1048
+ "characterOffsetBegin": 40,
1049
+ "characterOffsetEnd": 43,
1050
+ "index": 9,
1051
+ "lemma": "dog",
1052
+ "originalText": "dog",
1053
+ "pos": "NN",
1054
+ "word": "dog",
1055
+ },
1056
+ ],
1057
+ }
1058
+ ]
1059
+ }
1060
+
1061
+ corenlp_parser.api_call = MagicMock(return_value=api_return_value)
1062
+
1063
+ input_string = "The quick brown fox jumps over the lazy dog".split()
1064
+ expected_output = Tree(
1065
+ "ROOT",
1066
+ [
1067
+ Tree(
1068
+ "NP",
1069
+ [
1070
+ Tree(
1071
+ "NP",
1072
+ [
1073
+ Tree("DT", ["The"]),
1074
+ Tree("JJ", ["quick"]),
1075
+ Tree("JJ", ["brown"]),
1076
+ Tree("NN", ["fox"]),
1077
+ ],
1078
+ ),
1079
+ Tree(
1080
+ "NP",
1081
+ [
1082
+ Tree("NP", [Tree("NNS", ["jumps"])]),
1083
+ Tree(
1084
+ "PP",
1085
+ [
1086
+ Tree("IN", ["over"]),
1087
+ Tree(
1088
+ "NP",
1089
+ [
1090
+ Tree("DT", ["the"]),
1091
+ Tree("JJ", ["lazy"]),
1092
+ Tree("NN", ["dog"]),
1093
+ ],
1094
+ ),
1095
+ ],
1096
+ ),
1097
+ ],
1098
+ ),
1099
+ ],
1100
+ )
1101
+ ],
1102
+ )
1103
+
1104
+ parsed_data = next(corenlp_parser.parse(input_string))
1105
+
1106
+ corenlp_parser.api_call.assert_called_once_with(
1107
+ "The quick brown fox jumps over the lazy dog",
1108
+ properties={"ssplit.eolonly": "true"},
1109
+ )
1110
+ self.assertEqual(expected_output, parsed_data)
1111
+
1112
+ def test_dependency_parser(self):
1113
+ corenlp_parser = corenlp.CoreNLPDependencyParser()
1114
+
1115
+ api_return_value = {
1116
+ "sentences": [
1117
+ {
1118
+ "basicDependencies": [
1119
+ {
1120
+ "dep": "ROOT",
1121
+ "dependent": 5,
1122
+ "dependentGloss": "jumps",
1123
+ "governor": 0,
1124
+ "governorGloss": "ROOT",
1125
+ },
1126
+ {
1127
+ "dep": "det",
1128
+ "dependent": 1,
1129
+ "dependentGloss": "The",
1130
+ "governor": 4,
1131
+ "governorGloss": "fox",
1132
+ },
1133
+ {
1134
+ "dep": "amod",
1135
+ "dependent": 2,
1136
+ "dependentGloss": "quick",
1137
+ "governor": 4,
1138
+ "governorGloss": "fox",
1139
+ },
1140
+ {
1141
+ "dep": "amod",
1142
+ "dependent": 3,
1143
+ "dependentGloss": "brown",
1144
+ "governor": 4,
1145
+ "governorGloss": "fox",
1146
+ },
1147
+ {
1148
+ "dep": "nsubj",
1149
+ "dependent": 4,
1150
+ "dependentGloss": "fox",
1151
+ "governor": 5,
1152
+ "governorGloss": "jumps",
1153
+ },
1154
+ {
1155
+ "dep": "case",
1156
+ "dependent": 6,
1157
+ "dependentGloss": "over",
1158
+ "governor": 9,
1159
+ "governorGloss": "dog",
1160
+ },
1161
+ {
1162
+ "dep": "det",
1163
+ "dependent": 7,
1164
+ "dependentGloss": "the",
1165
+ "governor": 9,
1166
+ "governorGloss": "dog",
1167
+ },
1168
+ {
1169
+ "dep": "amod",
1170
+ "dependent": 8,
1171
+ "dependentGloss": "lazy",
1172
+ "governor": 9,
1173
+ "governorGloss": "dog",
1174
+ },
1175
+ {
1176
+ "dep": "nmod",
1177
+ "dependent": 9,
1178
+ "dependentGloss": "dog",
1179
+ "governor": 5,
1180
+ "governorGloss": "jumps",
1181
+ },
1182
+ ],
1183
+ "enhancedDependencies": [
1184
+ {
1185
+ "dep": "ROOT",
1186
+ "dependent": 5,
1187
+ "dependentGloss": "jumps",
1188
+ "governor": 0,
1189
+ "governorGloss": "ROOT",
1190
+ },
1191
+ {
1192
+ "dep": "det",
1193
+ "dependent": 1,
1194
+ "dependentGloss": "The",
1195
+ "governor": 4,
1196
+ "governorGloss": "fox",
1197
+ },
1198
+ {
1199
+ "dep": "amod",
1200
+ "dependent": 2,
1201
+ "dependentGloss": "quick",
1202
+ "governor": 4,
1203
+ "governorGloss": "fox",
1204
+ },
1205
+ {
1206
+ "dep": "amod",
1207
+ "dependent": 3,
1208
+ "dependentGloss": "brown",
1209
+ "governor": 4,
1210
+ "governorGloss": "fox",
1211
+ },
1212
+ {
1213
+ "dep": "nsubj",
1214
+ "dependent": 4,
1215
+ "dependentGloss": "fox",
1216
+ "governor": 5,
1217
+ "governorGloss": "jumps",
1218
+ },
1219
+ {
1220
+ "dep": "case",
1221
+ "dependent": 6,
1222
+ "dependentGloss": "over",
1223
+ "governor": 9,
1224
+ "governorGloss": "dog",
1225
+ },
1226
+ {
1227
+ "dep": "det",
1228
+ "dependent": 7,
1229
+ "dependentGloss": "the",
1230
+ "governor": 9,
1231
+ "governorGloss": "dog",
1232
+ },
1233
+ {
1234
+ "dep": "amod",
1235
+ "dependent": 8,
1236
+ "dependentGloss": "lazy",
1237
+ "governor": 9,
1238
+ "governorGloss": "dog",
1239
+ },
1240
+ {
1241
+ "dep": "nmod:over",
1242
+ "dependent": 9,
1243
+ "dependentGloss": "dog",
1244
+ "governor": 5,
1245
+ "governorGloss": "jumps",
1246
+ },
1247
+ ],
1248
+ "enhancedPlusPlusDependencies": [
1249
+ {
1250
+ "dep": "ROOT",
1251
+ "dependent": 5,
1252
+ "dependentGloss": "jumps",
1253
+ "governor": 0,
1254
+ "governorGloss": "ROOT",
1255
+ },
1256
+ {
1257
+ "dep": "det",
1258
+ "dependent": 1,
1259
+ "dependentGloss": "The",
1260
+ "governor": 4,
1261
+ "governorGloss": "fox",
1262
+ },
1263
+ {
1264
+ "dep": "amod",
1265
+ "dependent": 2,
1266
+ "dependentGloss": "quick",
1267
+ "governor": 4,
1268
+ "governorGloss": "fox",
1269
+ },
1270
+ {
1271
+ "dep": "amod",
1272
+ "dependent": 3,
1273
+ "dependentGloss": "brown",
1274
+ "governor": 4,
1275
+ "governorGloss": "fox",
1276
+ },
1277
+ {
1278
+ "dep": "nsubj",
1279
+ "dependent": 4,
1280
+ "dependentGloss": "fox",
1281
+ "governor": 5,
1282
+ "governorGloss": "jumps",
1283
+ },
1284
+ {
1285
+ "dep": "case",
1286
+ "dependent": 6,
1287
+ "dependentGloss": "over",
1288
+ "governor": 9,
1289
+ "governorGloss": "dog",
1290
+ },
1291
+ {
1292
+ "dep": "det",
1293
+ "dependent": 7,
1294
+ "dependentGloss": "the",
1295
+ "governor": 9,
1296
+ "governorGloss": "dog",
1297
+ },
1298
+ {
1299
+ "dep": "amod",
1300
+ "dependent": 8,
1301
+ "dependentGloss": "lazy",
1302
+ "governor": 9,
1303
+ "governorGloss": "dog",
1304
+ },
1305
+ {
1306
+ "dep": "nmod:over",
1307
+ "dependent": 9,
1308
+ "dependentGloss": "dog",
1309
+ "governor": 5,
1310
+ "governorGloss": "jumps",
1311
+ },
1312
+ ],
1313
+ "index": 0,
1314
+ "tokens": [
1315
+ {
1316
+ "after": " ",
1317
+ "before": "",
1318
+ "characterOffsetBegin": 0,
1319
+ "characterOffsetEnd": 3,
1320
+ "index": 1,
1321
+ "lemma": "the",
1322
+ "originalText": "The",
1323
+ "pos": "DT",
1324
+ "word": "The",
1325
+ },
1326
+ {
1327
+ "after": " ",
1328
+ "before": " ",
1329
+ "characterOffsetBegin": 4,
1330
+ "characterOffsetEnd": 9,
1331
+ "index": 2,
1332
+ "lemma": "quick",
1333
+ "originalText": "quick",
1334
+ "pos": "JJ",
1335
+ "word": "quick",
1336
+ },
1337
+ {
1338
+ "after": " ",
1339
+ "before": " ",
1340
+ "characterOffsetBegin": 10,
1341
+ "characterOffsetEnd": 15,
1342
+ "index": 3,
1343
+ "lemma": "brown",
1344
+ "originalText": "brown",
1345
+ "pos": "JJ",
1346
+ "word": "brown",
1347
+ },
1348
+ {
1349
+ "after": " ",
1350
+ "before": " ",
1351
+ "characterOffsetBegin": 16,
1352
+ "characterOffsetEnd": 19,
1353
+ "index": 4,
1354
+ "lemma": "fox",
1355
+ "originalText": "fox",
1356
+ "pos": "NN",
1357
+ "word": "fox",
1358
+ },
1359
+ {
1360
+ "after": " ",
1361
+ "before": " ",
1362
+ "characterOffsetBegin": 20,
1363
+ "characterOffsetEnd": 25,
1364
+ "index": 5,
1365
+ "lemma": "jump",
1366
+ "originalText": "jumps",
1367
+ "pos": "VBZ",
1368
+ "word": "jumps",
1369
+ },
1370
+ {
1371
+ "after": " ",
1372
+ "before": " ",
1373
+ "characterOffsetBegin": 26,
1374
+ "characterOffsetEnd": 30,
1375
+ "index": 6,
1376
+ "lemma": "over",
1377
+ "originalText": "over",
1378
+ "pos": "IN",
1379
+ "word": "over",
1380
+ },
1381
+ {
1382
+ "after": " ",
1383
+ "before": " ",
1384
+ "characterOffsetBegin": 31,
1385
+ "characterOffsetEnd": 34,
1386
+ "index": 7,
1387
+ "lemma": "the",
1388
+ "originalText": "the",
1389
+ "pos": "DT",
1390
+ "word": "the",
1391
+ },
1392
+ {
1393
+ "after": " ",
1394
+ "before": " ",
1395
+ "characterOffsetBegin": 35,
1396
+ "characterOffsetEnd": 39,
1397
+ "index": 8,
1398
+ "lemma": "lazy",
1399
+ "originalText": "lazy",
1400
+ "pos": "JJ",
1401
+ "word": "lazy",
1402
+ },
1403
+ {
1404
+ "after": "",
1405
+ "before": " ",
1406
+ "characterOffsetBegin": 40,
1407
+ "characterOffsetEnd": 43,
1408
+ "index": 9,
1409
+ "lemma": "dog",
1410
+ "originalText": "dog",
1411
+ "pos": "NN",
1412
+ "word": "dog",
1413
+ },
1414
+ ],
1415
+ }
1416
+ ]
1417
+ }
1418
+
1419
+ corenlp_parser.api_call = MagicMock(return_value=api_return_value)
1420
+
1421
+ input_string = "The quick brown fox jumps over the lazy dog".split()
1422
+ expected_output = Tree(
1423
+ "jumps",
1424
+ [
1425
+ Tree("fox", ["The", "quick", "brown"]),
1426
+ Tree("dog", ["over", "the", "lazy"]),
1427
+ ],
1428
+ )
1429
+
1430
+ parsed_data = next(corenlp_parser.parse(input_string))
1431
+
1432
+ corenlp_parser.api_call.assert_called_once_with(
1433
+ "The quick brown fox jumps over the lazy dog",
1434
+ properties={"ssplit.eolonly": "true"},
1435
+ )
1436
+ self.assertEqual(expected_output, parsed_data.tree())
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/test_corpora.py ADDED
@@ -0,0 +1,274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import unittest
2
+
3
+ import pytest
4
+
5
+ from nltk.corpus import ( # mwa_ppdb
6
+ cess_cat,
7
+ cess_esp,
8
+ conll2007,
9
+ floresta,
10
+ indian,
11
+ ptb,
12
+ sinica_treebank,
13
+ udhr,
14
+ )
15
+ from nltk.tree import Tree
16
+
17
+
18
+ class TestUdhr(unittest.TestCase):
19
+ def test_words(self):
20
+ for name in udhr.fileids():
21
+ words = list(udhr.words(name))
22
+ self.assertTrue(words)
23
+
24
+ def test_raw_unicode(self):
25
+ for name in udhr.fileids():
26
+ txt = udhr.raw(name)
27
+ assert not isinstance(txt, bytes), name
28
+
29
+ def test_polish_encoding(self):
30
+ text_pl = udhr.raw("Polish-Latin2")[:164]
31
+ text_ppl = udhr.raw("Polish_Polski-Latin2")[:164]
32
+ expected = """POWSZECHNA DEKLARACJA PRAW CZŁOWIEKA
33
+ [Preamble]
34
+ Trzecia Sesja Ogólnego Zgromadzenia ONZ, obradująca w Paryżu, \
35
+ uchwaliła 10 grudnia 1948 roku jednomyślnie Powszechną"""
36
+ assert text_pl == expected, "Polish-Latin2"
37
+ assert text_ppl == expected, "Polish_Polski-Latin2"
38
+
39
+
40
+ class TestIndian(unittest.TestCase):
41
+ def test_words(self):
42
+ words = indian.words()[:3]
43
+ self.assertEqual(words, ["মহিষের", "সন্তান", ":"])
44
+
45
+ def test_tagged_words(self):
46
+ tagged_words = indian.tagged_words()[:3]
47
+ self.assertEqual(
48
+ tagged_words, [("মহিষের", "NN"), ("সন্তান", "NN"), (":", "SYM")]
49
+ )
50
+
51
+
52
+ class TestCess(unittest.TestCase):
53
+ def test_catalan(self):
54
+ words = cess_cat.words()[:15]
55
+ txt = "El Tribunal_Suprem -Fpa- TS -Fpt- ha confirmat la condemna a quatre anys d' inhabilitació especial"
56
+ self.assertEqual(words, txt.split())
57
+ self.assertEqual(cess_cat.tagged_sents()[0][34][0], "càrrecs")
58
+
59
+ def test_esp(self):
60
+ words = cess_esp.words()[:15]
61
+ txt = "El grupo estatal Electricité_de_France -Fpa- EDF -Fpt- anunció hoy , jueves , la compra del"
62
+ self.assertEqual(words, txt.split())
63
+ self.assertEqual(cess_esp.words()[115], "años")
64
+
65
+
66
+ class TestFloresta(unittest.TestCase):
67
+ def test_words(self):
68
+ words = floresta.words()[:10]
69
+ txt = "Um revivalismo refrescante O 7_e_Meio é um ex-libris de a"
70
+ self.assertEqual(words, txt.split())
71
+
72
+
73
+ class TestSinicaTreebank(unittest.TestCase):
74
+ def test_sents(self):
75
+ first_3_sents = sinica_treebank.sents()[:3]
76
+ self.assertEqual(
77
+ first_3_sents, [["一"], ["友情"], ["嘉珍", "和", "我", "住在", "同一條", "巷子"]]
78
+ )
79
+
80
+ def test_parsed_sents(self):
81
+ parsed_sents = sinica_treebank.parsed_sents()[25]
82
+ self.assertEqual(
83
+ parsed_sents,
84
+ Tree(
85
+ "S",
86
+ [
87
+ Tree("NP", [Tree("Nba", ["嘉珍"])]),
88
+ Tree("V‧地", [Tree("VA11", ["不停"]), Tree("DE", ["的"])]),
89
+ Tree("VA4", ["哭泣"]),
90
+ ],
91
+ ),
92
+ )
93
+
94
+
95
+ class TestCoNLL2007(unittest.TestCase):
96
+ # Reading the CoNLL 2007 Dependency Treebanks
97
+
98
+ def test_sents(self):
99
+ sents = conll2007.sents("esp.train")[0]
100
+ self.assertEqual(
101
+ sents[:6], ["El", "aumento", "del", "índice", "de", "desempleo"]
102
+ )
103
+
104
+ def test_parsed_sents(self):
105
+
106
+ parsed_sents = conll2007.parsed_sents("esp.train")[0]
107
+
108
+ self.assertEqual(
109
+ parsed_sents.tree(),
110
+ Tree(
111
+ "fortaleció",
112
+ [
113
+ Tree(
114
+ "aumento",
115
+ [
116
+ "El",
117
+ Tree(
118
+ "del",
119
+ [
120
+ Tree(
121
+ "índice",
122
+ [
123
+ Tree(
124
+ "de",
125
+ [Tree("desempleo", ["estadounidense"])],
126
+ )
127
+ ],
128
+ )
129
+ ],
130
+ ),
131
+ ],
132
+ ),
133
+ "hoy",
134
+ "considerablemente",
135
+ Tree(
136
+ "al",
137
+ [
138
+ Tree(
139
+ "euro",
140
+ [
141
+ Tree(
142
+ "cotizaba",
143
+ [
144
+ ",",
145
+ "que",
146
+ Tree("a", [Tree("15.35", ["las", "GMT"])]),
147
+ "se",
148
+ Tree(
149
+ "en",
150
+ [
151
+ Tree(
152
+ "mercado",
153
+ [
154
+ "el",
155
+ Tree("de", ["divisas"]),
156
+ Tree("de", ["Fráncfort"]),
157
+ ],
158
+ )
159
+ ],
160
+ ),
161
+ Tree("a", ["0,9452_dólares"]),
162
+ Tree(
163
+ "frente_a",
164
+ [
165
+ ",",
166
+ Tree(
167
+ "0,9349_dólares",
168
+ [
169
+ "los",
170
+ Tree(
171
+ "de",
172
+ [
173
+ Tree(
174
+ "mañana",
175
+ ["esta"],
176
+ )
177
+ ],
178
+ ),
179
+ ],
180
+ ),
181
+ ],
182
+ ),
183
+ ],
184
+ )
185
+ ],
186
+ )
187
+ ],
188
+ ),
189
+ ".",
190
+ ],
191
+ ),
192
+ )
193
+
194
+
195
+ @pytest.mark.skipif(
196
+ not ptb.fileids(),
197
+ reason="A full installation of the Penn Treebank is not available",
198
+ )
199
+ class TestPTB(unittest.TestCase):
200
+ def test_fileids(self):
201
+ self.assertEqual(
202
+ ptb.fileids()[:4],
203
+ [
204
+ "BROWN/CF/CF01.MRG",
205
+ "BROWN/CF/CF02.MRG",
206
+ "BROWN/CF/CF03.MRG",
207
+ "BROWN/CF/CF04.MRG",
208
+ ],
209
+ )
210
+
211
+ def test_words(self):
212
+ self.assertEqual(
213
+ ptb.words("WSJ/00/WSJ_0003.MRG")[:7],
214
+ ["A", "form", "of", "asbestos", "once", "used", "*"],
215
+ )
216
+
217
+ def test_tagged_words(self):
218
+ self.assertEqual(
219
+ ptb.tagged_words("WSJ/00/WSJ_0003.MRG")[:3],
220
+ [("A", "DT"), ("form", "NN"), ("of", "IN")],
221
+ )
222
+
223
+ def test_categories(self):
224
+ self.assertEqual(
225
+ ptb.categories(),
226
+ [
227
+ "adventure",
228
+ "belles_lettres",
229
+ "fiction",
230
+ "humor",
231
+ "lore",
232
+ "mystery",
233
+ "news",
234
+ "romance",
235
+ "science_fiction",
236
+ ],
237
+ )
238
+
239
+ def test_news_fileids(self):
240
+ self.assertEqual(
241
+ ptb.fileids("news")[:3],
242
+ ["WSJ/00/WSJ_0001.MRG", "WSJ/00/WSJ_0002.MRG", "WSJ/00/WSJ_0003.MRG"],
243
+ )
244
+
245
+ def test_category_words(self):
246
+ self.assertEqual(
247
+ ptb.words(categories=["humor", "fiction"])[:6],
248
+ ["Thirty-three", "Scotty", "did", "not", "go", "back"],
249
+ )
250
+
251
+
252
+ @pytest.mark.skip("Skipping test for mwa_ppdb.")
253
+ class TestMWAPPDB(unittest.TestCase):
254
+ def test_fileids(self):
255
+ self.assertEqual(
256
+ mwa_ppdb.fileids(), ["ppdb-1.0-xxxl-lexical.extended.synonyms.uniquepairs"]
257
+ )
258
+
259
+ def test_entries(self):
260
+ self.assertEqual(
261
+ mwa_ppdb.entries()[:10],
262
+ [
263
+ ("10/17/01", "17/10/2001"),
264
+ ("102,70", "102.70"),
265
+ ("13,53", "13.53"),
266
+ ("3.2.5.3.2.1", "3.2.5.3.2.1."),
267
+ ("53,76", "53.76"),
268
+ ("6.9.5", "6.9.5."),
269
+ ("7.7.6.3", "7.7.6.3."),
270
+ ("76,20", "76.20"),
271
+ ("79,85", "79.85"),
272
+ ("93,65", "93.65"),
273
+ ],
274
+ )
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/test_corpus_views.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Corpus View Regression Tests
3
+ """
4
+ import unittest
5
+
6
+ import nltk.data
7
+ from nltk.corpus.reader.util import (
8
+ StreamBackedCorpusView,
9
+ read_line_block,
10
+ read_whitespace_block,
11
+ )
12
+
13
+
14
+ class TestCorpusViews(unittest.TestCase):
15
+
16
+ linetok = nltk.LineTokenizer(blanklines="keep")
17
+ names = [
18
+ "corpora/inaugural/README", # A very short file (160 chars)
19
+ "corpora/inaugural/1793-Washington.txt", # A relatively short file (791 chars)
20
+ "corpora/inaugural/1909-Taft.txt", # A longer file (32k chars)
21
+ ]
22
+
23
+ def data(self):
24
+ for name in self.names:
25
+ f = nltk.data.find(name)
26
+ with f.open() as fp:
27
+ file_data = fp.read().decode("utf8")
28
+ yield f, file_data
29
+
30
+ def test_correct_values(self):
31
+ # Check that corpus views produce the correct sequence of values.
32
+
33
+ for f, file_data in self.data():
34
+ v = StreamBackedCorpusView(f, read_whitespace_block)
35
+ self.assertEqual(list(v), file_data.split())
36
+
37
+ v = StreamBackedCorpusView(f, read_line_block)
38
+ self.assertEqual(list(v), self.linetok.tokenize(file_data))
39
+
40
+ def test_correct_length(self):
41
+ # Check that the corpus views report the correct lengths:
42
+
43
+ for f, file_data in self.data():
44
+ v = StreamBackedCorpusView(f, read_whitespace_block)
45
+ self.assertEqual(len(v), len(file_data.split()))
46
+
47
+ v = StreamBackedCorpusView(f, read_line_block)
48
+ self.assertEqual(len(v), len(self.linetok.tokenize(file_data)))
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/test_data.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ import nltk.data
4
+
5
+
6
+ def test_find_raises_exception():
7
+ with pytest.raises(LookupError):
8
+ nltk.data.find("no_such_resource/foo")
9
+
10
+
11
+ def test_find_raises_exception_with_full_resource_name():
12
+ no_such_thing = "no_such_thing/bar"
13
+ with pytest.raises(LookupError) as exc:
14
+ nltk.data.find(no_such_thing)
15
+ assert no_such_thing in str(exc)