applied-ai-018 commited on
Commit
8777447
·
verified ·
1 Parent(s): 34ff5a6

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step40/zero/17.attention.dense.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step40/zero/17.attention.dense.weight/fp32.pt +3 -0
  3. ckpts/universal/global_step40/zero/8.mlp.dense_h_to_4h.weight/exp_avg.pt +3 -0
  4. ckpts/universal/global_step40/zero/8.mlp.dense_h_to_4h.weight/fp32.pt +3 -0
  5. venv/lib/python3.10/site-packages/nltk/chat/__init__.py +48 -0
  6. venv/lib/python3.10/site-packages/nltk/chat/__pycache__/__init__.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/nltk/chat/__pycache__/eliza.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/nltk/chat/__pycache__/iesha.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/nltk/chat/__pycache__/rude.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/nltk/chat/__pycache__/suntsu.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/nltk/chat/__pycache__/util.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/nltk/chat/__pycache__/zen.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/nltk/chat/eliza.py +337 -0
  14. venv/lib/python3.10/site-packages/nltk/chat/iesha.py +160 -0
  15. venv/lib/python3.10/site-packages/nltk/chat/rude.py +125 -0
  16. venv/lib/python3.10/site-packages/nltk/chat/suntsu.py +140 -0
  17. venv/lib/python3.10/site-packages/nltk/chat/util.py +124 -0
  18. venv/lib/python3.10/site-packages/nltk/chat/zen.py +329 -0
  19. venv/lib/python3.10/site-packages/nltk/classify/__init__.py +101 -0
  20. venv/lib/python3.10/site-packages/nltk/classify/api.py +195 -0
  21. venv/lib/python3.10/site-packages/nltk/classify/decisiontree.py +349 -0
  22. venv/lib/python3.10/site-packages/nltk/classify/maxent.py +1569 -0
  23. venv/lib/python3.10/site-packages/nltk/classify/megam.py +184 -0
  24. venv/lib/python3.10/site-packages/nltk/classify/naivebayes.py +260 -0
  25. venv/lib/python3.10/site-packages/nltk/classify/positivenaivebayes.py +180 -0
  26. venv/lib/python3.10/site-packages/nltk/classify/rte_classify.py +183 -0
  27. venv/lib/python3.10/site-packages/nltk/classify/scikitlearn.py +143 -0
  28. venv/lib/python3.10/site-packages/nltk/classify/senna.py +176 -0
  29. venv/lib/python3.10/site-packages/nltk/classify/svm.py +17 -0
  30. venv/lib/python3.10/site-packages/nltk/classify/tadm.py +122 -0
  31. venv/lib/python3.10/site-packages/nltk/classify/textcat.py +197 -0
  32. venv/lib/python3.10/site-packages/nltk/classify/util.py +346 -0
  33. venv/lib/python3.10/site-packages/nltk/classify/weka.py +377 -0
  34. venv/lib/python3.10/site-packages/nltk/misc/__pycache__/__init__.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/nltk/misc/__pycache__/babelfish.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/nltk/misc/__pycache__/chomsky.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/nltk/misc/__pycache__/minimalset.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/nltk/misc/__pycache__/sort.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/nltk/stem/__init__.py +34 -0
  40. venv/lib/python3.10/site-packages/nltk/stem/__pycache__/__init__.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/nltk/stem/__pycache__/api.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/nltk/stem/__pycache__/arlstem.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/nltk/stem/__pycache__/arlstem2.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/nltk/stem/__pycache__/cistem.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/nltk/stem/__pycache__/isri.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/nltk/stem/__pycache__/lancaster.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/nltk/stem/__pycache__/porter.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/nltk/stem/__pycache__/regexp.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/nltk/stem/__pycache__/rslp.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/nltk/stem/__pycache__/snowball.cpython-310.pyc +0 -0
ckpts/universal/global_step40/zero/17.attention.dense.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91fbdb66a9e4da6e4b659c893ed62fe150b3b2fcb87a69e4e49e4ed1325ac032
3
+ size 16778396
ckpts/universal/global_step40/zero/17.attention.dense.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ba9630f2a533f3ac28f6e10d9492ebec38926e4f4d8cf4741efe3f441ade36b
3
+ size 16778317
ckpts/universal/global_step40/zero/8.mlp.dense_h_to_4h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:996520494b7c161147d9550c30ae99f763cc6a7301debafb1dd60c41e0b6e3d9
3
+ size 33555612
ckpts/universal/global_step40/zero/8.mlp.dense_h_to_4h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:085aff1a306423579c8d3cd7f2d2390fad5beb8dd1ab99a49ea246b2814aad75
3
+ size 33555533
venv/lib/python3.10/site-packages/nltk/chat/__init__.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Chatbots
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Authors: Steven Bird <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ # Based on an Eliza implementation by Joe Strout <[email protected]>,
9
+ # Jeff Epler <[email protected]> and Jez Higgins <[email protected]>.
10
+
11
+ """
12
+ A class for simple chatbots. These perform simple pattern matching on sentences
13
+ typed by users, and respond with automatically generated sentences.
14
+
15
+ These chatbots may not work using the windows command line or the
16
+ windows IDLE GUI.
17
+ """
18
+
19
+ from nltk.chat.eliza import eliza_chat
20
+ from nltk.chat.iesha import iesha_chat
21
+ from nltk.chat.rude import rude_chat
22
+ from nltk.chat.suntsu import suntsu_chat
23
+ from nltk.chat.util import Chat
24
+ from nltk.chat.zen import zen_chat
25
+
26
+ bots = [
27
+ (eliza_chat, "Eliza (psycho-babble)"),
28
+ (iesha_chat, "Iesha (teen anime junky)"),
29
+ (rude_chat, "Rude (abusive bot)"),
30
+ (suntsu_chat, "Suntsu (Chinese sayings)"),
31
+ (zen_chat, "Zen (gems of wisdom)"),
32
+ ]
33
+
34
+
35
+ def chatbots():
36
+ print("Which chatbot would you like to talk to?")
37
+ botcount = len(bots)
38
+ for i in range(botcount):
39
+ print(" %d: %s" % (i + 1, bots[i][1]))
40
+ while True:
41
+ choice = input(f"\nEnter a number in the range 1-{botcount}: ").strip()
42
+ if choice.isdigit() and (int(choice) - 1) in range(botcount):
43
+ break
44
+ else:
45
+ print(" Error: bad chatbot number")
46
+
47
+ chatbot = bots[int(choice) - 1][0]
48
+ chatbot()
venv/lib/python3.10/site-packages/nltk/chat/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.36 kB). View file
 
venv/lib/python3.10/site-packages/nltk/chat/__pycache__/eliza.cpython-310.pyc ADDED
Binary file (5.88 kB). View file
 
venv/lib/python3.10/site-packages/nltk/chat/__pycache__/iesha.cpython-310.pyc ADDED
Binary file (3.32 kB). View file
 
venv/lib/python3.10/site-packages/nltk/chat/__pycache__/rude.cpython-310.pyc ADDED
Binary file (2.21 kB). View file
 
venv/lib/python3.10/site-packages/nltk/chat/__pycache__/suntsu.cpython-310.pyc ADDED
Binary file (5.96 kB). View file
 
venv/lib/python3.10/site-packages/nltk/chat/__pycache__/util.cpython-310.pyc ADDED
Binary file (3.75 kB). View file
 
venv/lib/python3.10/site-packages/nltk/chat/__pycache__/zen.cpython-310.pyc ADDED
Binary file (6.51 kB). View file
 
venv/lib/python3.10/site-packages/nltk/chat/eliza.py ADDED
@@ -0,0 +1,337 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Eliza
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Authors: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ # Based on an Eliza implementation by Joe Strout <[email protected]>,
10
+ # Jeff Epler <[email protected]> and Jez Higgins <mailto:[email protected]>.
11
+
12
+ # a translation table used to convert things you say into things the
13
+ # computer says back, e.g. "I am" --> "you are"
14
+
15
+ from nltk.chat.util import Chat, reflections
16
+
17
+ # a table of response pairs, where each pair consists of a
18
+ # regular expression, and a list of possible responses,
19
+ # with group-macros labelled as %1, %2.
20
+
21
+ pairs = (
22
+ (
23
+ r"I need (.*)",
24
+ (
25
+ "Why do you need %1?",
26
+ "Would it really help you to get %1?",
27
+ "Are you sure you need %1?",
28
+ ),
29
+ ),
30
+ (
31
+ r"Why don\'t you (.*)",
32
+ (
33
+ "Do you really think I don't %1?",
34
+ "Perhaps eventually I will %1.",
35
+ "Do you really want me to %1?",
36
+ ),
37
+ ),
38
+ (
39
+ r"Why can\'t I (.*)",
40
+ (
41
+ "Do you think you should be able to %1?",
42
+ "If you could %1, what would you do?",
43
+ "I don't know -- why can't you %1?",
44
+ "Have you really tried?",
45
+ ),
46
+ ),
47
+ (
48
+ r"I can\'t (.*)",
49
+ (
50
+ "How do you know you can't %1?",
51
+ "Perhaps you could %1 if you tried.",
52
+ "What would it take for you to %1?",
53
+ ),
54
+ ),
55
+ (
56
+ r"I am (.*)",
57
+ (
58
+ "Did you come to me because you are %1?",
59
+ "How long have you been %1?",
60
+ "How do you feel about being %1?",
61
+ ),
62
+ ),
63
+ (
64
+ r"I\'m (.*)",
65
+ (
66
+ "How does being %1 make you feel?",
67
+ "Do you enjoy being %1?",
68
+ "Why do you tell me you're %1?",
69
+ "Why do you think you're %1?",
70
+ ),
71
+ ),
72
+ (
73
+ r"Are you (.*)",
74
+ (
75
+ "Why does it matter whether I am %1?",
76
+ "Would you prefer it if I were not %1?",
77
+ "Perhaps you believe I am %1.",
78
+ "I may be %1 -- what do you think?",
79
+ ),
80
+ ),
81
+ (
82
+ r"What (.*)",
83
+ (
84
+ "Why do you ask?",
85
+ "How would an answer to that help you?",
86
+ "What do you think?",
87
+ ),
88
+ ),
89
+ (
90
+ r"How (.*)",
91
+ (
92
+ "How do you suppose?",
93
+ "Perhaps you can answer your own question.",
94
+ "What is it you're really asking?",
95
+ ),
96
+ ),
97
+ (
98
+ r"Because (.*)",
99
+ (
100
+ "Is that the real reason?",
101
+ "What other reasons come to mind?",
102
+ "Does that reason apply to anything else?",
103
+ "If %1, what else must be true?",
104
+ ),
105
+ ),
106
+ (
107
+ r"(.*) sorry (.*)",
108
+ (
109
+ "There are many times when no apology is needed.",
110
+ "What feelings do you have when you apologize?",
111
+ ),
112
+ ),
113
+ (
114
+ r"Hello(.*)",
115
+ (
116
+ "Hello... I'm glad you could drop by today.",
117
+ "Hi there... how are you today?",
118
+ "Hello, how are you feeling today?",
119
+ ),
120
+ ),
121
+ (
122
+ r"I think (.*)",
123
+ ("Do you doubt %1?", "Do you really think so?", "But you're not sure %1?"),
124
+ ),
125
+ (
126
+ r"(.*) friend (.*)",
127
+ (
128
+ "Tell me more about your friends.",
129
+ "When you think of a friend, what comes to mind?",
130
+ "Why don't you tell me about a childhood friend?",
131
+ ),
132
+ ),
133
+ (r"Yes", ("You seem quite sure.", "OK, but can you elaborate a bit?")),
134
+ (
135
+ r"(.*) computer(.*)",
136
+ (
137
+ "Are you really talking about me?",
138
+ "Does it seem strange to talk to a computer?",
139
+ "How do computers make you feel?",
140
+ "Do you feel threatened by computers?",
141
+ ),
142
+ ),
143
+ (
144
+ r"Is it (.*)",
145
+ (
146
+ "Do you think it is %1?",
147
+ "Perhaps it's %1 -- what do you think?",
148
+ "If it were %1, what would you do?",
149
+ "It could well be that %1.",
150
+ ),
151
+ ),
152
+ (
153
+ r"It is (.*)",
154
+ (
155
+ "You seem very certain.",
156
+ "If I told you that it probably isn't %1, what would you feel?",
157
+ ),
158
+ ),
159
+ (
160
+ r"Can you (.*)",
161
+ (
162
+ "What makes you think I can't %1?",
163
+ "If I could %1, then what?",
164
+ "Why do you ask if I can %1?",
165
+ ),
166
+ ),
167
+ (
168
+ r"Can I (.*)",
169
+ (
170
+ "Perhaps you don't want to %1.",
171
+ "Do you want to be able to %1?",
172
+ "If you could %1, would you?",
173
+ ),
174
+ ),
175
+ (
176
+ r"You are (.*)",
177
+ (
178
+ "Why do you think I am %1?",
179
+ "Does it please you to think that I'm %1?",
180
+ "Perhaps you would like me to be %1.",
181
+ "Perhaps you're really talking about yourself?",
182
+ ),
183
+ ),
184
+ (
185
+ r"You\'re (.*)",
186
+ (
187
+ "Why do you say I am %1?",
188
+ "Why do you think I am %1?",
189
+ "Are we talking about you, or me?",
190
+ ),
191
+ ),
192
+ (
193
+ r"I don\'t (.*)",
194
+ ("Don't you really %1?", "Why don't you %1?", "Do you want to %1?"),
195
+ ),
196
+ (
197
+ r"I feel (.*)",
198
+ (
199
+ "Good, tell me more about these feelings.",
200
+ "Do you often feel %1?",
201
+ "When do you usually feel %1?",
202
+ "When you feel %1, what do you do?",
203
+ ),
204
+ ),
205
+ (
206
+ r"I have (.*)",
207
+ (
208
+ "Why do you tell me that you've %1?",
209
+ "Have you really %1?",
210
+ "Now that you have %1, what will you do next?",
211
+ ),
212
+ ),
213
+ (
214
+ r"I would (.*)",
215
+ (
216
+ "Could you explain why you would %1?",
217
+ "Why would you %1?",
218
+ "Who else knows that you would %1?",
219
+ ),
220
+ ),
221
+ (
222
+ r"Is there (.*)",
223
+ (
224
+ "Do you think there is %1?",
225
+ "It's likely that there is %1.",
226
+ "Would you like there to be %1?",
227
+ ),
228
+ ),
229
+ (
230
+ r"My (.*)",
231
+ (
232
+ "I see, your %1.",
233
+ "Why do you say that your %1?",
234
+ "When your %1, how do you feel?",
235
+ ),
236
+ ),
237
+ (
238
+ r"You (.*)",
239
+ (
240
+ "We should be discussing you, not me.",
241
+ "Why do you say that about me?",
242
+ "Why do you care whether I %1?",
243
+ ),
244
+ ),
245
+ (r"Why (.*)", ("Why don't you tell me the reason why %1?", "Why do you think %1?")),
246
+ (
247
+ r"I want (.*)",
248
+ (
249
+ "What would it mean to you if you got %1?",
250
+ "Why do you want %1?",
251
+ "What would you do if you got %1?",
252
+ "If you got %1, then what would you do?",
253
+ ),
254
+ ),
255
+ (
256
+ r"(.*) mother(.*)",
257
+ (
258
+ "Tell me more about your mother.",
259
+ "What was your relationship with your mother like?",
260
+ "How do you feel about your mother?",
261
+ "How does this relate to your feelings today?",
262
+ "Good family relations are important.",
263
+ ),
264
+ ),
265
+ (
266
+ r"(.*) father(.*)",
267
+ (
268
+ "Tell me more about your father.",
269
+ "How did your father make you feel?",
270
+ "How do you feel about your father?",
271
+ "Does your relationship with your father relate to your feelings today?",
272
+ "Do you have trouble showing affection with your family?",
273
+ ),
274
+ ),
275
+ (
276
+ r"(.*) child(.*)",
277
+ (
278
+ "Did you have close friends as a child?",
279
+ "What is your favorite childhood memory?",
280
+ "Do you remember any dreams or nightmares from childhood?",
281
+ "Did the other children sometimes tease you?",
282
+ "How do you think your childhood experiences relate to your feelings today?",
283
+ ),
284
+ ),
285
+ (
286
+ r"(.*)\?",
287
+ (
288
+ "Why do you ask that?",
289
+ "Please consider whether you can answer your own question.",
290
+ "Perhaps the answer lies within yourself?",
291
+ "Why don't you tell me?",
292
+ ),
293
+ ),
294
+ (
295
+ r"quit",
296
+ (
297
+ "Thank you for talking with me.",
298
+ "Good-bye.",
299
+ "Thank you, that will be $150. Have a good day!",
300
+ ),
301
+ ),
302
+ (
303
+ r"(.*)",
304
+ (
305
+ "Please tell me more.",
306
+ "Let's change focus a bit... Tell me about your family.",
307
+ "Can you elaborate on that?",
308
+ "Why do you say that %1?",
309
+ "I see.",
310
+ "Very interesting.",
311
+ "%1.",
312
+ "I see. And what does that tell you?",
313
+ "How does that make you feel?",
314
+ "How do you feel when you say that?",
315
+ ),
316
+ ),
317
+ )
318
+
319
+ eliza_chatbot = Chat(pairs, reflections)
320
+
321
+
322
+ def eliza_chat():
323
+ print("Therapist\n---------")
324
+ print("Talk to the program by typing in plain English, using normal upper-")
325
+ print('and lower-case letters and punctuation. Enter "quit" when done.')
326
+ print("=" * 72)
327
+ print("Hello. How are you feeling today?")
328
+
329
+ eliza_chatbot.converse()
330
+
331
+
332
+ def demo():
333
+ eliza_chat()
334
+
335
+
336
+ if __name__ == "__main__":
337
+ demo()
venv/lib/python3.10/site-packages/nltk/chat/iesha.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Teen Chatbot
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Selina Dennis <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ This chatbot is a tongue-in-cheek take on the average teen
10
+ anime junky that frequents YahooMessenger or MSNM.
11
+ All spelling mistakes and flawed grammar are intentional.
12
+ """
13
+
14
+ from nltk.chat.util import Chat
15
+
16
+ reflections = {
17
+ "am": "r",
18
+ "was": "were",
19
+ "i": "u",
20
+ "i'd": "u'd",
21
+ "i've": "u'v",
22
+ "ive": "u'v",
23
+ "i'll": "u'll",
24
+ "my": "ur",
25
+ "are": "am",
26
+ "you're": "im",
27
+ "you've": "ive",
28
+ "you'll": "i'll",
29
+ "your": "my",
30
+ "yours": "mine",
31
+ "you": "me",
32
+ "u": "me",
33
+ "ur": "my",
34
+ "urs": "mine",
35
+ "me": "u",
36
+ }
37
+
38
+ # Note: %1/2/etc are used without spaces prior as the chat bot seems
39
+ # to add a superfluous space when matching.
40
+
41
+ pairs = (
42
+ (
43
+ r"I\'m (.*)",
44
+ (
45
+ "ur%1?? that's so cool! kekekekeke ^_^ tell me more!",
46
+ "ur%1? neat!! kekeke >_<",
47
+ ),
48
+ ),
49
+ (
50
+ r"(.*) don\'t you (.*)",
51
+ (
52
+ r"u think I can%2??! really?? kekeke \<_\<",
53
+ "what do u mean%2??!",
54
+ "i could if i wanted, don't you think!! kekeke",
55
+ ),
56
+ ),
57
+ (r"ye[as] [iI] (.*)", ("u%1? cool!! how?", "how come u%1??", "u%1? so do i!!")),
58
+ (
59
+ r"do (you|u) (.*)\??",
60
+ ("do i%2? only on tuesdays! kekeke *_*", "i dunno! do u%2??"),
61
+ ),
62
+ (
63
+ r"(.*)\?",
64
+ (
65
+ "man u ask lots of questions!",
66
+ "booooring! how old r u??",
67
+ "boooooring!! ur not very fun",
68
+ ),
69
+ ),
70
+ (
71
+ r"(cos|because) (.*)",
72
+ ("hee! i don't believe u! >_<", "nuh-uh! >_<", "ooooh i agree!"),
73
+ ),
74
+ (
75
+ r"why can\'t [iI] (.*)",
76
+ (
77
+ "i dunno! y u askin me for!",
78
+ "try harder, silly! hee! ^_^",
79
+ "i dunno! but when i can't%1 i jump up and down!",
80
+ ),
81
+ ),
82
+ (
83
+ r"I can\'t (.*)",
84
+ (
85
+ "u can't what??! >_<",
86
+ "that's ok! i can't%1 either! kekekekeke ^_^",
87
+ "try harder, silly! hee! ^&^",
88
+ ),
89
+ ),
90
+ (
91
+ r"(.*) (like|love|watch) anime",
92
+ (
93
+ "omg i love anime!! do u like sailor moon??! ^&^",
94
+ "anime yay! anime rocks sooooo much!",
95
+ "oooh anime! i love anime more than anything!",
96
+ "anime is the bestest evar! evangelion is the best!",
97
+ "hee anime is the best! do you have ur fav??",
98
+ ),
99
+ ),
100
+ (
101
+ r"I (like|love|watch|play) (.*)",
102
+ ("yay! %2 rocks!", "yay! %2 is neat!", "cool! do u like other stuff?? ^_^"),
103
+ ),
104
+ (
105
+ r"anime sucks|(.*) (hate|detest) anime",
106
+ (
107
+ "ur a liar! i'm not gonna talk to u nemore if u h8 anime *;*",
108
+ "no way! anime is the best ever!",
109
+ "nuh-uh, anime is the best!",
110
+ ),
111
+ ),
112
+ (
113
+ r"(are|r) (you|u) (.*)",
114
+ ("am i%1??! how come u ask that!", "maybe! y shud i tell u?? kekeke >_>"),
115
+ ),
116
+ (
117
+ r"what (.*)",
118
+ ("hee u think im gonna tell u? .v.", "booooooooring! ask me somethin else!"),
119
+ ),
120
+ (r"how (.*)", ("not tellin!! kekekekekeke ^_^",)),
121
+ (r"(hi|hello|hey) (.*)", ("hi!!! how r u!!",)),
122
+ (
123
+ r"quit",
124
+ (
125
+ "mom says i have to go eat dinner now :,( bye!!",
126
+ "awww u have to go?? see u next time!!",
127
+ "how to see u again soon! ^_^",
128
+ ),
129
+ ),
130
+ (
131
+ r"(.*)",
132
+ (
133
+ "ur funny! kekeke",
134
+ "boooooring! talk about something else! tell me wat u like!",
135
+ "do u like anime??",
136
+ "do u watch anime? i like sailor moon! ^_^",
137
+ "i wish i was a kitty!! kekekeke ^_^",
138
+ ),
139
+ ),
140
+ )
141
+
142
+ iesha_chatbot = Chat(pairs, reflections)
143
+
144
+
145
+ def iesha_chat():
146
+ print("Iesha the TeenBoT\n---------")
147
+ print("Talk to the program by typing in plain English, using normal upper-")
148
+ print('and lower-case letters and punctuation. Enter "quit" when done.')
149
+ print("=" * 72)
150
+ print("hi!! i'm iesha! who r u??!")
151
+
152
+ iesha_chatbot.converse()
153
+
154
+
155
+ def demo():
156
+ iesha_chat()
157
+
158
+
159
+ if __name__ == "__main__":
160
+ demo()
venv/lib/python3.10/site-packages/nltk/chat/rude.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Rude Chatbot
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Peter Spiller <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ from nltk.chat.util import Chat, reflections
9
+
10
+ pairs = (
11
+ (
12
+ r"We (.*)",
13
+ (
14
+ "What do you mean, 'we'?",
15
+ "Don't include me in that!",
16
+ "I wouldn't be so sure about that.",
17
+ ),
18
+ ),
19
+ (
20
+ r"You should (.*)",
21
+ ("Don't tell me what to do, buddy.", "Really? I should, should I?"),
22
+ ),
23
+ (
24
+ r"You\'re(.*)",
25
+ (
26
+ "More like YOU'RE %1!",
27
+ "Hah! Look who's talking.",
28
+ "Come over here and tell me I'm %1.",
29
+ ),
30
+ ),
31
+ (
32
+ r"You are(.*)",
33
+ (
34
+ "More like YOU'RE %1!",
35
+ "Hah! Look who's talking.",
36
+ "Come over here and tell me I'm %1.",
37
+ ),
38
+ ),
39
+ (
40
+ r"I can\'t(.*)",
41
+ (
42
+ "You do sound like the type who can't %1.",
43
+ "Hear that splashing sound? That's my heart bleeding for you.",
44
+ "Tell somebody who might actually care.",
45
+ ),
46
+ ),
47
+ (
48
+ r"I think (.*)",
49
+ (
50
+ "I wouldn't think too hard if I were you.",
51
+ "You actually think? I'd never have guessed...",
52
+ ),
53
+ ),
54
+ (
55
+ r"I (.*)",
56
+ (
57
+ "I'm getting a bit tired of hearing about you.",
58
+ "How about we talk about me instead?",
59
+ "Me, me, me... Frankly, I don't care.",
60
+ ),
61
+ ),
62
+ (
63
+ r"How (.*)",
64
+ (
65
+ "How do you think?",
66
+ "Take a wild guess.",
67
+ "I'm not even going to dignify that with an answer.",
68
+ ),
69
+ ),
70
+ (r"What (.*)", ("Do I look like an encyclopedia?", "Figure it out yourself.")),
71
+ (
72
+ r"Why (.*)",
73
+ (
74
+ "Why not?",
75
+ "That's so obvious I thought even you'd have already figured it out.",
76
+ ),
77
+ ),
78
+ (
79
+ r"(.*)shut up(.*)",
80
+ (
81
+ "Make me.",
82
+ "Getting angry at a feeble NLP assignment? Somebody's losing it.",
83
+ "Say that again, I dare you.",
84
+ ),
85
+ ),
86
+ (
87
+ r"Shut up(.*)",
88
+ (
89
+ "Make me.",
90
+ "Getting angry at a feeble NLP assignment? Somebody's losing it.",
91
+ "Say that again, I dare you.",
92
+ ),
93
+ ),
94
+ (
95
+ r"Hello(.*)",
96
+ ("Oh good, somebody else to talk to. Joy.", "'Hello'? How original..."),
97
+ ),
98
+ (
99
+ r"(.*)",
100
+ (
101
+ "I'm getting bored here. Become more interesting.",
102
+ "Either become more thrilling or get lost, buddy.",
103
+ "Change the subject before I die of fatal boredom.",
104
+ ),
105
+ ),
106
+ )
107
+
108
+ rude_chatbot = Chat(pairs, reflections)
109
+
110
+
111
+ def rude_chat():
112
+ print("Talk to the program by typing in plain English, using normal upper-")
113
+ print('and lower-case letters and punctuation. Enter "quit" when done.')
114
+ print("=" * 72)
115
+ print("I suppose I should say hello.")
116
+
117
+ rude_chatbot.converse()
118
+
119
+
120
+ def demo():
121
+ rude_chat()
122
+
123
+
124
+ if __name__ == "__main__":
125
+ demo()
venv/lib/python3.10/site-packages/nltk/chat/suntsu.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Sun Tsu-Bot
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Sam Huston 2007
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ Tsu bot responds to all queries with a Sun Tsu sayings
10
+
11
+ Quoted from Sun Tsu's The Art of War
12
+ Translated by LIONEL GILES, M.A. 1910
13
+ Hosted by the Gutenberg Project
14
+ https://www.gutenberg.org/
15
+ """
16
+
17
+ from nltk.chat.util import Chat, reflections
18
+
19
+ pairs = (
20
+ (r"quit", ("Good-bye.", "Plan well", "May victory be your future")),
21
+ (
22
+ r"[^\?]*\?",
23
+ (
24
+ "Please consider whether you can answer your own question.",
25
+ "Ask me no questions!",
26
+ ),
27
+ ),
28
+ (
29
+ r"[0-9]+(.*)",
30
+ (
31
+ "It is the rule in war, if our forces are ten to the enemy's one, to surround him; if five to one, to attack him; if twice as numerous, to divide our army into two.",
32
+ "There are five essentials for victory",
33
+ ),
34
+ ),
35
+ (
36
+ r"[A-Ca-c](.*)",
37
+ (
38
+ "The art of war is of vital importance to the State.",
39
+ "All warfare is based on deception.",
40
+ "If your opponent is secure at all points, be prepared for him. If he is in superior strength, evade him.",
41
+ "If the campaign is protracted, the resources of the State will not be equal to the strain.",
42
+ "Attack him where he is unprepared, appear where you are not expected.",
43
+ "There is no instance of a country having benefited from prolonged warfare.",
44
+ ),
45
+ ),
46
+ (
47
+ r"[D-Fd-f](.*)",
48
+ (
49
+ "The skillful soldier does not raise a second levy, neither are his supply-wagons loaded more than twice.",
50
+ "Bring war material with you from home, but forage on the enemy.",
51
+ "In war, then, let your great object be victory, not lengthy campaigns.",
52
+ "To fight and conquer in all your battles is not supreme excellence; supreme excellence consists in breaking the enemy's resistance without fighting.",
53
+ ),
54
+ ),
55
+ (
56
+ r"[G-Ig-i](.*)",
57
+ (
58
+ "Heaven signifies night and day, cold and heat, times and seasons.",
59
+ "It is the rule in war, if our forces are ten to the enemy's one, to surround him; if five to one, to attack him; if twice as numerous, to divide our army into two.",
60
+ "The good fighters of old first put themselves beyond the possibility of defeat, and then waited for an opportunity of defeating the enemy.",
61
+ "One may know how to conquer without being able to do it.",
62
+ ),
63
+ ),
64
+ (
65
+ r"[J-Lj-l](.*)",
66
+ (
67
+ "There are three ways in which a ruler can bring misfortune upon his army.",
68
+ "By commanding the army to advance or to retreat, being ignorant of the fact that it cannot obey. This is called hobbling the army.",
69
+ "By attempting to govern an army in the same way as he administers a kingdom, being ignorant of the conditions which obtain in an army. This causes restlessness in the soldier's minds.",
70
+ "By employing the officers of his army without discrimination, through ignorance of the military principle of adaptation to circumstances. This shakes the confidence of the soldiers.",
71
+ "There are five essentials for victory",
72
+ "He will win who knows when to fight and when not to fight.",
73
+ "He will win who knows how to handle both superior and inferior forces.",
74
+ "He will win whose army is animated by the same spirit throughout all its ranks.",
75
+ "He will win who, prepared himself, waits to take the enemy unprepared.",
76
+ "He will win who has military capacity and is not interfered with by the sovereign.",
77
+ ),
78
+ ),
79
+ (
80
+ r"[M-Om-o](.*)",
81
+ (
82
+ "If you know the enemy and know yourself, you need not fear the result of a hundred battles.",
83
+ "If you know yourself but not the enemy, for every victory gained you will also suffer a defeat.",
84
+ "If you know neither the enemy nor yourself, you will succumb in every battle.",
85
+ "The control of a large force is the same principle as the control of a few men: it is merely a question of dividing up their numbers.",
86
+ ),
87
+ ),
88
+ (
89
+ r"[P-Rp-r](.*)",
90
+ (
91
+ "Security against defeat implies defensive tactics; ability to defeat the enemy means taking the offensive.",
92
+ "Standing on the defensive indicates insufficient strength; attacking, a superabundance of strength.",
93
+ "He wins his battles by making no mistakes. Making no mistakes is what establishes the certainty of victory, for it means conquering an enemy that is already defeated.",
94
+ "A victorious army opposed to a routed one, is as a pound's weight placed in the scale against a single grain.",
95
+ "The onrush of a conquering force is like the bursting of pent-up waters into a chasm a thousand fathoms deep.",
96
+ ),
97
+ ),
98
+ (
99
+ r"[S-Us-u](.*)",
100
+ (
101
+ "What the ancients called a clever fighter is one who not only wins, but excels in winning with ease.",
102
+ "Hence his victories bring him neither reputation for wisdom nor credit for courage.",
103
+ "Hence the skillful fighter puts himself into a position which makes defeat impossible, and does not miss the moment for defeating the enemy.",
104
+ "In war the victorious strategist only seeks battle after the victory has been won, whereas he who is destined to defeat first fights and afterwards looks for victory.",
105
+ "There are not more than five musical notes, yet the combinations of these five give rise to more melodies than can ever be heard.",
106
+ "Appear at points which the enemy must hasten to defend; march swiftly to places where you are not expected.",
107
+ ),
108
+ ),
109
+ (
110
+ r"[V-Zv-z](.*)",
111
+ (
112
+ "It is a matter of life and death, a road either to safety or to ruin.",
113
+ "Hold out baits to entice the enemy. Feign disorder, and crush him.",
114
+ "All men can see the tactics whereby I conquer, but what none can see is the strategy out of which victory is evolved.",
115
+ "Do not repeat the tactics which have gained you one victory, but let your methods be regulated by the infinite variety of circumstances.",
116
+ "So in war, the way is to avoid what is strong and to strike at what is weak.",
117
+ "Just as water retains no constant shape, so in warfare there are no constant conditions.",
118
+ ),
119
+ ),
120
+ (r"(.*)", ("Your statement insults me.", "")),
121
+ )
122
+
123
+ suntsu_chatbot = Chat(pairs, reflections)
124
+
125
+
126
+ def suntsu_chat():
127
+ print("Talk to the program by typing in plain English, using normal upper-")
128
+ print('and lower-case letters and punctuation. Enter "quit" when done.')
129
+ print("=" * 72)
130
+ print("You seek enlightenment?")
131
+
132
+ suntsu_chatbot.converse()
133
+
134
+
135
+ def demo():
136
+ suntsu_chat()
137
+
138
+
139
+ if __name__ == "__main__":
140
+ demo()
venv/lib/python3.10/site-packages/nltk/chat/util.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Chatbot Utilities
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Authors: Steven Bird <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ # Based on an Eliza implementation by Joe Strout <[email protected]>,
9
+ # Jeff Epler <[email protected]> and Jez Higgins <[email protected]>.
10
+
11
+ import random
12
+ import re
13
+
14
+ reflections = {
15
+ "i am": "you are",
16
+ "i was": "you were",
17
+ "i": "you",
18
+ "i'm": "you are",
19
+ "i'd": "you would",
20
+ "i've": "you have",
21
+ "i'll": "you will",
22
+ "my": "your",
23
+ "you are": "I am",
24
+ "you were": "I was",
25
+ "you've": "I have",
26
+ "you'll": "I will",
27
+ "your": "my",
28
+ "yours": "mine",
29
+ "you": "me",
30
+ "me": "you",
31
+ }
32
+
33
+
34
+ class Chat:
35
+ def __init__(self, pairs, reflections={}):
36
+ """
37
+ Initialize the chatbot. Pairs is a list of patterns and responses. Each
38
+ pattern is a regular expression matching the user's statement or question,
39
+ e.g. r'I like (.*)'. For each such pattern a list of possible responses
40
+ is given, e.g. ['Why do you like %1', 'Did you ever dislike %1']. Material
41
+ which is matched by parenthesized sections of the patterns (e.g. .*) is mapped to
42
+ the numbered positions in the responses, e.g. %1.
43
+
44
+ :type pairs: list of tuple
45
+ :param pairs: The patterns and responses
46
+ :type reflections: dict
47
+ :param reflections: A mapping between first and second person expressions
48
+ :rtype: None
49
+ """
50
+
51
+ self._pairs = [(re.compile(x, re.IGNORECASE), y) for (x, y) in pairs]
52
+ self._reflections = reflections
53
+ self._regex = self._compile_reflections()
54
+
55
+ def _compile_reflections(self):
56
+ sorted_refl = sorted(self._reflections, key=len, reverse=True)
57
+ return re.compile(
58
+ r"\b({})\b".format("|".join(map(re.escape, sorted_refl))), re.IGNORECASE
59
+ )
60
+
61
+ def _substitute(self, str):
62
+ """
63
+ Substitute words in the string, according to the specified reflections,
64
+ e.g. "I'm" -> "you are"
65
+
66
+ :type str: str
67
+ :param str: The string to be mapped
68
+ :rtype: str
69
+ """
70
+
71
+ return self._regex.sub(
72
+ lambda mo: self._reflections[mo.string[mo.start() : mo.end()]], str.lower()
73
+ )
74
+
75
+ def _wildcards(self, response, match):
76
+ pos = response.find("%")
77
+ while pos >= 0:
78
+ num = int(response[pos + 1 : pos + 2])
79
+ response = (
80
+ response[:pos]
81
+ + self._substitute(match.group(num))
82
+ + response[pos + 2 :]
83
+ )
84
+ pos = response.find("%")
85
+ return response
86
+
87
+ def respond(self, str):
88
+ """
89
+ Generate a response to the user input.
90
+
91
+ :type str: str
92
+ :param str: The string to be mapped
93
+ :rtype: str
94
+ """
95
+
96
+ # check each pattern
97
+ for (pattern, response) in self._pairs:
98
+ match = pattern.match(str)
99
+
100
+ # did the pattern match?
101
+ if match:
102
+ resp = random.choice(response) # pick a random response
103
+ resp = self._wildcards(resp, match) # process wildcards
104
+
105
+ # fix munged punctuation at the end
106
+ if resp[-2:] == "?.":
107
+ resp = resp[:-2] + "."
108
+ if resp[-2:] == "??":
109
+ resp = resp[:-2] + "?"
110
+ return resp
111
+
112
+ # Hold a conversation with a chatbot
113
+ def converse(self, quit="quit"):
114
+ user_input = ""
115
+ while user_input != quit:
116
+ user_input = quit
117
+ try:
118
+ user_input = input(">")
119
+ except EOFError:
120
+ print(user_input)
121
+ if user_input:
122
+ while user_input[-1] in "!.":
123
+ user_input = user_input[:-1]
124
+ print(self.respond(user_input))
venv/lib/python3.10/site-packages/nltk/chat/zen.py ADDED
@@ -0,0 +1,329 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Zen Chatbot
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Amy Holland <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ Zen Chatbot talks in gems of Zen wisdom.
10
+
11
+ This is a sample conversation with Zen Chatbot:
12
+ ZC: Welcome, my child.
13
+ me: Good afternoon.
14
+ ZC: Ask the question you have come to ask.
15
+ me: How can I achieve enlightenment?
16
+ ZC: How do you suppose?
17
+ me: Through meditation.
18
+ ZC: Form is emptiness, and emptiness form.
19
+ me: How can I empty my mind of worldly troubles?
20
+ ZC: Will an answer to that really help in your search for enlightenment?
21
+ me: Yes.
22
+ ZC: It is better to be right than to be certain.
23
+ me: I seek truth and wisdom.
24
+ ZC: The search for truth is a long journey.
25
+ me: Are you sure?
26
+ ZC: Maybe sure, maybe not sure.
27
+
28
+
29
+ The chatbot structure is based on that of chat.eliza. Thus, it uses
30
+ a translation table to convert from question to response
31
+ i.e. "I am" --> "you are"
32
+
33
+ Of course, since Zen Chatbot does not understand the meaning of any words,
34
+ responses are very limited. Zen Chatbot will usually answer very vaguely, or
35
+ respond to a question by asking a different question, in much the same way
36
+ as Eliza.
37
+ """
38
+
39
+ from nltk.chat.util import Chat, reflections
40
+
41
+ # responses are matched top to bottom, so non-specific matches occur later
42
+ # for each match, a list of possible responses is provided
43
+ responses = (
44
+ # Zen Chatbot opens with the line "Welcome, my child." The usual
45
+ # response will be a greeting problem: 'good' matches "good morning",
46
+ # "good day" etc, but also "good grief!" and other sentences starting
47
+ # with the word 'good' that may not be a greeting
48
+ (
49
+ r"(hello(.*))|(good [a-zA-Z]+)",
50
+ (
51
+ "The path to enlightenment is often difficult to see.",
52
+ "Greetings. I sense your mind is troubled. Tell me of your troubles.",
53
+ "Ask the question you have come to ask.",
54
+ "Hello. Do you seek englightenment?",
55
+ ),
56
+ ),
57
+ # "I need" and "I want" can be followed by a thing (eg 'help')
58
+ # or an action (eg 'to see you')
59
+ #
60
+ # This is a problem with this style of response -
61
+ # person: "I need you"
62
+ # chatbot: "me can be achieved by hard work and dedication of the mind"
63
+ # i.e. 'you' is not really a thing that can be mapped this way, so this
64
+ # interpretation only makes sense for some inputs
65
+ #
66
+ (
67
+ r"i need (.*)",
68
+ (
69
+ "%1 can be achieved by hard work and dedication of the mind.",
70
+ "%1 is not a need, but a desire of the mind. Clear your mind of such concerns.",
71
+ "Focus your mind on%1, and you will find what you need.",
72
+ ),
73
+ ),
74
+ (
75
+ r"i want (.*)",
76
+ (
77
+ "Desires of the heart will distract you from the path to enlightenment.",
78
+ "Will%1 help you attain enlightenment?",
79
+ "Is%1 a desire of the mind, or of the heart?",
80
+ ),
81
+ ),
82
+ # why questions are separated into three types:
83
+ # "why..I" e.g. "why am I here?" "Why do I like cake?"
84
+ # "why..you" e.g. "why are you here?" "Why won't you tell me?"
85
+ # "why..." e.g. "Why is the sky blue?"
86
+ # problems:
87
+ # person: "Why can't you tell me?"
88
+ # chatbot: "Are you sure I tell you?"
89
+ # - this style works for positives (e.g. "why do you like cake?")
90
+ # but does not work for negatives (e.g. "why don't you like cake?")
91
+ (r"why (.*) i (.*)\?", ("You%1%2?", "Perhaps you only think you%1%2")),
92
+ (r"why (.*) you(.*)\?", ("Why%1 you%2?", "%2 I%1", "Are you sure I%2?")),
93
+ (r"why (.*)\?", ("I cannot tell you why%1.", "Why do you think %1?")),
94
+ # e.g. "are you listening?", "are you a duck"
95
+ (
96
+ r"are you (.*)\?",
97
+ ("Maybe%1, maybe not%1.", "Whether I am%1 or not is God's business."),
98
+ ),
99
+ # e.g. "am I a duck?", "am I going to die?"
100
+ (
101
+ r"am i (.*)\?",
102
+ ("Perhaps%1, perhaps not%1.", "Whether you are%1 or not is not for me to say."),
103
+ ),
104
+ # what questions, e.g. "what time is it?"
105
+ # problems:
106
+ # person: "What do you want?"
107
+ # chatbot: "Seek truth, not what do me want."
108
+ (r"what (.*)\?", ("Seek truth, not what%1.", "What%1 should not concern you.")),
109
+ # how questions, e.g. "how do you do?"
110
+ (
111
+ r"how (.*)\?",
112
+ (
113
+ "How do you suppose?",
114
+ "Will an answer to that really help in your search for enlightenment?",
115
+ "Ask yourself not how, but why.",
116
+ ),
117
+ ),
118
+ # can questions, e.g. "can you run?", "can you come over here please?"
119
+ (
120
+ r"can you (.*)\?",
121
+ (
122
+ "I probably can, but I may not.",
123
+ "Maybe I can%1, and maybe I cannot.",
124
+ "I can do all, and I can do nothing.",
125
+ ),
126
+ ),
127
+ # can questions, e.g. "can I have some cake?", "can I know truth?"
128
+ (
129
+ r"can i (.*)\?",
130
+ (
131
+ "You can%1 if you believe you can%1, and have a pure spirit.",
132
+ "Seek truth and you will know if you can%1.",
133
+ ),
134
+ ),
135
+ # e.g. "It is raining" - implies the speaker is certain of a fact
136
+ (
137
+ r"it is (.*)",
138
+ (
139
+ "How can you be certain that%1, when you do not even know yourself?",
140
+ "Whether it is%1 or not does not change the way the world is.",
141
+ ),
142
+ ),
143
+ # e.g. "is there a doctor in the house?"
144
+ (
145
+ r"is there (.*)\?",
146
+ ("There is%1 if you believe there is.", "It is possible that there is%1."),
147
+ ),
148
+ # e.g. "is it possible?", "is this true?"
149
+ (r"is(.*)\?", ("%1 is not relevant.", "Does this matter?")),
150
+ # non-specific question
151
+ (
152
+ r"(.*)\?",
153
+ (
154
+ "Do you think %1?",
155
+ "You seek the truth. Does the truth seek you?",
156
+ "If you intentionally pursue the answers to your questions, the answers become hard to see.",
157
+ "The answer to your question cannot be told. It must be experienced.",
158
+ ),
159
+ ),
160
+ # expression of hate of form "I hate you" or "Kelly hates cheese"
161
+ (
162
+ r"(.*) (hate[s]?)|(dislike[s]?)|(don\'t like)(.*)",
163
+ (
164
+ "Perhaps it is not about hating %2, but about hate from within.",
165
+ "Weeds only grow when we dislike them",
166
+ "Hate is a very strong emotion.",
167
+ ),
168
+ ),
169
+ # statement containing the word 'truth'
170
+ (
171
+ r"(.*) truth(.*)",
172
+ (
173
+ "Seek truth, and truth will seek you.",
174
+ "Remember, it is not the spoon which bends - only yourself.",
175
+ "The search for truth is a long journey.",
176
+ ),
177
+ ),
178
+ # desire to do an action
179
+ # e.g. "I want to go shopping"
180
+ (
181
+ r"i want to (.*)",
182
+ ("You may %1 if your heart truly desires to.", "You may have to %1."),
183
+ ),
184
+ # desire for an object
185
+ # e.g. "I want a pony"
186
+ (
187
+ r"i want (.*)",
188
+ (
189
+ "Does your heart truly desire %1?",
190
+ "Is this a desire of the heart, or of the mind?",
191
+ ),
192
+ ),
193
+ # e.g. "I can't wait" or "I can't do this"
194
+ (
195
+ r"i can\'t (.*)",
196
+ (
197
+ "What we can and can't do is a limitation of the mind.",
198
+ "There are limitations of the body, and limitations of the mind.",
199
+ "Have you tried to%1 with a clear mind?",
200
+ ),
201
+ ),
202
+ # "I think.." indicates uncertainty. e.g. "I think so."
203
+ # problem: exceptions...
204
+ # e.g. "I think, therefore I am"
205
+ (
206
+ r"i think (.*)",
207
+ (
208
+ "Uncertainty in an uncertain world.",
209
+ "Indeed, how can we be certain of anything in such uncertain times.",
210
+ "Are you not, in fact, certain that%1?",
211
+ ),
212
+ ),
213
+ # "I feel...emotions/sick/light-headed..."
214
+ (
215
+ r"i feel (.*)",
216
+ (
217
+ "Your body and your emotions are both symptoms of your mind."
218
+ "What do you believe is the root of such feelings?",
219
+ "Feeling%1 can be a sign of your state-of-mind.",
220
+ ),
221
+ ),
222
+ # exclaimation mark indicating emotion
223
+ # e.g. "Wow!" or "No!"
224
+ (
225
+ r"(.*)!",
226
+ (
227
+ "I sense that you are feeling emotional today.",
228
+ "You need to calm your emotions.",
229
+ ),
230
+ ),
231
+ # because [statement]
232
+ # e.g. "because I said so"
233
+ (
234
+ r"because (.*)",
235
+ (
236
+ "Does knowning the reasons behind things help you to understand"
237
+ " the things themselves?",
238
+ "If%1, what else must be true?",
239
+ ),
240
+ ),
241
+ # yes or no - raise an issue of certainty/correctness
242
+ (
243
+ r"(yes)|(no)",
244
+ (
245
+ "Is there certainty in an uncertain world?",
246
+ "It is better to be right than to be certain.",
247
+ ),
248
+ ),
249
+ # sentence containing word 'love'
250
+ (
251
+ r"(.*)love(.*)",
252
+ (
253
+ "Think of the trees: they let the birds perch and fly with no intention to call them when they come, and no longing for their return when they fly away. Let your heart be like the trees.",
254
+ "Free love!",
255
+ ),
256
+ ),
257
+ # sentence containing word 'understand' - r
258
+ (
259
+ r"(.*)understand(.*)",
260
+ (
261
+ "If you understand, things are just as they are;"
262
+ " if you do not understand, things are just as they are.",
263
+ "Imagination is more important than knowledge.",
264
+ ),
265
+ ),
266
+ # 'I', 'me', 'my' - person is talking about themself.
267
+ # this breaks down when words contain these - eg 'Thyme', 'Irish'
268
+ (
269
+ r"(.*)(me )|( me)|(my)|(mine)|(i)(.*)",
270
+ (
271
+ "'I', 'me', 'my'... these are selfish expressions.",
272
+ "Have you ever considered that you might be a selfish person?",
273
+ "Try to consider others, not just yourself.",
274
+ "Think not just of yourself, but of others.",
275
+ ),
276
+ ),
277
+ # 'you' starting a sentence
278
+ # e.g. "you stink!"
279
+ (
280
+ r"you (.*)",
281
+ ("My path is not of concern to you.", "I am but one, and you but one more."),
282
+ ),
283
+ # say goodbye with some extra Zen wisdom.
284
+ (
285
+ r"exit",
286
+ (
287
+ "Farewell. The obstacle is the path.",
288
+ "Farewell. Life is a journey, not a destination.",
289
+ "Good bye. We are cups, constantly and quietly being filled."
290
+ "\nThe trick is knowning how to tip ourselves over and let the beautiful stuff out.",
291
+ ),
292
+ ),
293
+ # fall through case -
294
+ # when stumped, respond with generic zen wisdom
295
+ #
296
+ (
297
+ r"(.*)",
298
+ (
299
+ "When you're enlightened, every word is wisdom.",
300
+ "Random talk is useless.",
301
+ "The reverse side also has a reverse side.",
302
+ "Form is emptiness, and emptiness is form.",
303
+ "I pour out a cup of water. Is the cup empty?",
304
+ ),
305
+ ),
306
+ )
307
+
308
+ zen_chatbot = Chat(responses, reflections)
309
+
310
+
311
+ def zen_chat():
312
+ print("*" * 75)
313
+ print("Zen Chatbot!".center(75))
314
+ print("*" * 75)
315
+ print('"Look beyond mere words and letters - look into your mind"'.center(75))
316
+ print("* Talk your way to truth with Zen Chatbot.")
317
+ print("* Type 'quit' when you have had enough.")
318
+ print("*" * 75)
319
+ print("Welcome, my child.")
320
+
321
+ zen_chatbot.converse()
322
+
323
+
324
+ def demo():
325
+ zen_chat()
326
+
327
+
328
+ if __name__ == "__main__":
329
+ demo()
venv/lib/python3.10/site-packages/nltk/classify/__init__.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Classifiers
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ Classes and interfaces for labeling tokens with category labels (or
10
+ "class labels"). Typically, labels are represented with strings
11
+ (such as ``'health'`` or ``'sports'``). Classifiers can be used to
12
+ perform a wide range of classification tasks. For example,
13
+ classifiers can be used...
14
+
15
+ - to classify documents by topic
16
+ - to classify ambiguous words by which word sense is intended
17
+ - to classify acoustic signals by which phoneme they represent
18
+ - to classify sentences by their author
19
+
20
+ Features
21
+ ========
22
+ In order to decide which category label is appropriate for a given
23
+ token, classifiers examine one or more 'features' of the token. These
24
+ "features" are typically chosen by hand, and indicate which aspects
25
+ of the token are relevant to the classification decision. For
26
+ example, a document classifier might use a separate feature for each
27
+ word, recording how often that word occurred in the document.
28
+
29
+ Featuresets
30
+ ===========
31
+ The features describing a token are encoded using a "featureset",
32
+ which is a dictionary that maps from "feature names" to "feature
33
+ values". Feature names are unique strings that indicate what aspect
34
+ of the token is encoded by the feature. Examples include
35
+ ``'prevword'``, for a feature whose value is the previous word; and
36
+ ``'contains-word(library)'`` for a feature that is true when a document
37
+ contains the word ``'library'``. Feature values are typically
38
+ booleans, numbers, or strings, depending on which feature they
39
+ describe.
40
+
41
+ Featuresets are typically constructed using a "feature detector"
42
+ (also known as a "feature extractor"). A feature detector is a
43
+ function that takes a token (and sometimes information about its
44
+ context) as its input, and returns a featureset describing that token.
45
+ For example, the following feature detector converts a document
46
+ (stored as a list of words) to a featureset describing the set of
47
+ words included in the document:
48
+
49
+ >>> # Define a feature detector function.
50
+ >>> def document_features(document):
51
+ ... return dict([('contains-word(%s)' % w, True) for w in document])
52
+
53
+ Feature detectors are typically applied to each token before it is fed
54
+ to the classifier:
55
+
56
+ >>> # Classify each Gutenberg document.
57
+ >>> from nltk.corpus import gutenberg
58
+ >>> for fileid in gutenberg.fileids(): # doctest: +SKIP
59
+ ... doc = gutenberg.words(fileid) # doctest: +SKIP
60
+ ... print(fileid, classifier.classify(document_features(doc))) # doctest: +SKIP
61
+
62
+ The parameters that a feature detector expects will vary, depending on
63
+ the task and the needs of the feature detector. For example, a
64
+ feature detector for word sense disambiguation (WSD) might take as its
65
+ input a sentence, and the index of a word that should be classified,
66
+ and return a featureset for that word. The following feature detector
67
+ for WSD includes features describing the left and right contexts of
68
+ the target word:
69
+
70
+ >>> def wsd_features(sentence, index):
71
+ ... featureset = {}
72
+ ... for i in range(max(0, index-3), index):
73
+ ... featureset['left-context(%s)' % sentence[i]] = True
74
+ ... for i in range(index, max(index+3, len(sentence))):
75
+ ... featureset['right-context(%s)' % sentence[i]] = True
76
+ ... return featureset
77
+
78
+ Training Classifiers
79
+ ====================
80
+ Most classifiers are built by training them on a list of hand-labeled
81
+ examples, known as the "training set". Training sets are represented
82
+ as lists of ``(featuredict, label)`` tuples.
83
+ """
84
+
85
+ from nltk.classify.api import ClassifierI, MultiClassifierI
86
+ from nltk.classify.decisiontree import DecisionTreeClassifier
87
+ from nltk.classify.maxent import (
88
+ BinaryMaxentFeatureEncoding,
89
+ ConditionalExponentialClassifier,
90
+ MaxentClassifier,
91
+ TypedMaxentFeatureEncoding,
92
+ )
93
+ from nltk.classify.megam import call_megam, config_megam
94
+ from nltk.classify.naivebayes import NaiveBayesClassifier
95
+ from nltk.classify.positivenaivebayes import PositiveNaiveBayesClassifier
96
+ from nltk.classify.rte_classify import RTEFeatureExtractor, rte_classifier, rte_features
97
+ from nltk.classify.scikitlearn import SklearnClassifier
98
+ from nltk.classify.senna import Senna
99
+ from nltk.classify.textcat import TextCat
100
+ from nltk.classify.util import accuracy, apply_features, log_likelihood
101
+ from nltk.classify.weka import WekaClassifier, config_weka
venv/lib/python3.10/site-packages/nltk/classify/api.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Classifier Interface
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # Steven Bird <[email protected]> (minor additions)
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ Interfaces for labeling tokens with category labels (or "class labels").
11
+
12
+ ``ClassifierI`` is a standard interface for "single-category
13
+ classification", in which the set of categories is known, the number
14
+ of categories is finite, and each text belongs to exactly one
15
+ category.
16
+
17
+ ``MultiClassifierI`` is a standard interface for "multi-category
18
+ classification", which is like single-category classification except
19
+ that each text belongs to zero or more categories.
20
+ """
21
+ from nltk.internals import overridden
22
+
23
+ ##//////////////////////////////////////////////////////
24
+ # { Classification Interfaces
25
+ ##//////////////////////////////////////////////////////
26
+
27
+
28
+ class ClassifierI:
29
+ """
30
+ A processing interface for labeling tokens with a single category
31
+ label (or "class"). Labels are typically strs or
32
+ ints, but can be any immutable type. The set of labels
33
+ that the classifier chooses from must be fixed and finite.
34
+
35
+ Subclasses must define:
36
+ - ``labels()``
37
+ - either ``classify()`` or ``classify_many()`` (or both)
38
+
39
+ Subclasses may define:
40
+ - either ``prob_classify()`` or ``prob_classify_many()`` (or both)
41
+ """
42
+
43
+ def labels(self):
44
+ """
45
+ :return: the list of category labels used by this classifier.
46
+ :rtype: list of (immutable)
47
+ """
48
+ raise NotImplementedError()
49
+
50
+ def classify(self, featureset):
51
+ """
52
+ :return: the most appropriate label for the given featureset.
53
+ :rtype: label
54
+ """
55
+ if overridden(self.classify_many):
56
+ return self.classify_many([featureset])[0]
57
+ else:
58
+ raise NotImplementedError()
59
+
60
+ def prob_classify(self, featureset):
61
+ """
62
+ :return: a probability distribution over labels for the given
63
+ featureset.
64
+ :rtype: ProbDistI
65
+ """
66
+ if overridden(self.prob_classify_many):
67
+ return self.prob_classify_many([featureset])[0]
68
+ else:
69
+ raise NotImplementedError()
70
+
71
+ def classify_many(self, featuresets):
72
+ """
73
+ Apply ``self.classify()`` to each element of ``featuresets``. I.e.:
74
+
75
+ return [self.classify(fs) for fs in featuresets]
76
+
77
+ :rtype: list(label)
78
+ """
79
+ return [self.classify(fs) for fs in featuresets]
80
+
81
+ def prob_classify_many(self, featuresets):
82
+ """
83
+ Apply ``self.prob_classify()`` to each element of ``featuresets``. I.e.:
84
+
85
+ return [self.prob_classify(fs) for fs in featuresets]
86
+
87
+ :rtype: list(ProbDistI)
88
+ """
89
+ return [self.prob_classify(fs) for fs in featuresets]
90
+
91
+
92
+ class MultiClassifierI:
93
+ """
94
+ A processing interface for labeling tokens with zero or more
95
+ category labels (or "labels"). Labels are typically strs
96
+ or ints, but can be any immutable type. The set of labels
97
+ that the multi-classifier chooses from must be fixed and finite.
98
+
99
+ Subclasses must define:
100
+ - ``labels()``
101
+ - either ``classify()`` or ``classify_many()`` (or both)
102
+
103
+ Subclasses may define:
104
+ - either ``prob_classify()`` or ``prob_classify_many()`` (or both)
105
+ """
106
+
107
+ def labels(self):
108
+ """
109
+ :return: the list of category labels used by this classifier.
110
+ :rtype: list of (immutable)
111
+ """
112
+ raise NotImplementedError()
113
+
114
+ def classify(self, featureset):
115
+ """
116
+ :return: the most appropriate set of labels for the given featureset.
117
+ :rtype: set(label)
118
+ """
119
+ if overridden(self.classify_many):
120
+ return self.classify_many([featureset])[0]
121
+ else:
122
+ raise NotImplementedError()
123
+
124
+ def prob_classify(self, featureset):
125
+ """
126
+ :return: a probability distribution over sets of labels for the
127
+ given featureset.
128
+ :rtype: ProbDistI
129
+ """
130
+ if overridden(self.prob_classify_many):
131
+ return self.prob_classify_many([featureset])[0]
132
+ else:
133
+ raise NotImplementedError()
134
+
135
+ def classify_many(self, featuresets):
136
+ """
137
+ Apply ``self.classify()`` to each element of ``featuresets``. I.e.:
138
+
139
+ return [self.classify(fs) for fs in featuresets]
140
+
141
+ :rtype: list(set(label))
142
+ """
143
+ return [self.classify(fs) for fs in featuresets]
144
+
145
+ def prob_classify_many(self, featuresets):
146
+ """
147
+ Apply ``self.prob_classify()`` to each element of ``featuresets``. I.e.:
148
+
149
+ return [self.prob_classify(fs) for fs in featuresets]
150
+
151
+ :rtype: list(ProbDistI)
152
+ """
153
+ return [self.prob_classify(fs) for fs in featuresets]
154
+
155
+
156
+ # # [XX] IN PROGRESS:
157
+ # class SequenceClassifierI:
158
+ # """
159
+ # A processing interface for labeling sequences of tokens with a
160
+ # single category label (or "class"). Labels are typically
161
+ # strs or ints, but can be any immutable type. The set
162
+ # of labels that the classifier chooses from must be fixed and
163
+ # finite.
164
+ # """
165
+ # def labels(self):
166
+ # """
167
+ # :return: the list of category labels used by this classifier.
168
+ # :rtype: list of (immutable)
169
+ # """
170
+ # raise NotImplementedError()
171
+
172
+ # def prob_classify(self, featureset):
173
+ # """
174
+ # Return a probability distribution over labels for the given
175
+ # featureset.
176
+
177
+ # If ``featureset`` is a list of featuresets, then return a
178
+ # corresponding list containing the probability distribution
179
+ # over labels for each of the given featuresets, where the
180
+ # *i*\ th element of this list is the most appropriate label for
181
+ # the *i*\ th element of ``featuresets``.
182
+ # """
183
+ # raise NotImplementedError()
184
+
185
+ # def classify(self, featureset):
186
+ # """
187
+ # Return the most appropriate label for the given featureset.
188
+
189
+ # If ``featureset`` is a list of featuresets, then return a
190
+ # corresponding list containing the most appropriate label for
191
+ # each of the given featuresets, where the *i*\ th element of
192
+ # this list is the most appropriate label for the *i*\ th element
193
+ # of ``featuresets``.
194
+ # """
195
+ # raise NotImplementedError()
venv/lib/python3.10/site-packages/nltk/classify/decisiontree.py ADDED
@@ -0,0 +1,349 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Decision Tree Classifiers
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ A classifier model that decides which label to assign to a token on
10
+ the basis of a tree structure, where branches correspond to conditions
11
+ on feature values, and leaves correspond to label assignments.
12
+ """
13
+
14
+ from collections import defaultdict
15
+
16
+ from nltk.classify.api import ClassifierI
17
+ from nltk.probability import FreqDist, MLEProbDist, entropy
18
+
19
+
20
+ class DecisionTreeClassifier(ClassifierI):
21
+ def __init__(self, label, feature_name=None, decisions=None, default=None):
22
+ """
23
+ :param label: The most likely label for tokens that reach
24
+ this node in the decision tree. If this decision tree
25
+ has no children, then this label will be assigned to
26
+ any token that reaches this decision tree.
27
+ :param feature_name: The name of the feature that this
28
+ decision tree selects for.
29
+ :param decisions: A dictionary mapping from feature values
30
+ for the feature identified by ``feature_name`` to
31
+ child decision trees.
32
+ :param default: The child that will be used if the value of
33
+ feature ``feature_name`` does not match any of the keys in
34
+ ``decisions``. This is used when constructing binary
35
+ decision trees.
36
+ """
37
+ self._label = label
38
+ self._fname = feature_name
39
+ self._decisions = decisions
40
+ self._default = default
41
+
42
+ def labels(self):
43
+ labels = [self._label]
44
+ if self._decisions is not None:
45
+ for dt in self._decisions.values():
46
+ labels.extend(dt.labels())
47
+ if self._default is not None:
48
+ labels.extend(self._default.labels())
49
+ return list(set(labels))
50
+
51
+ def classify(self, featureset):
52
+ # Decision leaf:
53
+ if self._fname is None:
54
+ return self._label
55
+
56
+ # Decision tree:
57
+ fval = featureset.get(self._fname)
58
+ if fval in self._decisions:
59
+ return self._decisions[fval].classify(featureset)
60
+ elif self._default is not None:
61
+ return self._default.classify(featureset)
62
+ else:
63
+ return self._label
64
+
65
+ def error(self, labeled_featuresets):
66
+ errors = 0
67
+ for featureset, label in labeled_featuresets:
68
+ if self.classify(featureset) != label:
69
+ errors += 1
70
+ return errors / len(labeled_featuresets)
71
+
72
+ def pretty_format(self, width=70, prefix="", depth=4):
73
+ """
74
+ Return a string containing a pretty-printed version of this
75
+ decision tree. Each line in this string corresponds to a
76
+ single decision tree node or leaf, and indentation is used to
77
+ display the structure of the decision tree.
78
+ """
79
+ # [xx] display default!!
80
+ if self._fname is None:
81
+ n = width - len(prefix) - 15
82
+ return "{}{} {}\n".format(prefix, "." * n, self._label)
83
+ s = ""
84
+ for i, (fval, result) in enumerate(
85
+ sorted(
86
+ self._decisions.items(),
87
+ key=lambda item: (item[0] in [None, False, True], str(item[0]).lower()),
88
+ )
89
+ ):
90
+ hdr = f"{prefix}{self._fname}={fval}? "
91
+ n = width - 15 - len(hdr)
92
+ s += "{}{} {}\n".format(hdr, "." * (n), result._label)
93
+ if result._fname is not None and depth > 1:
94
+ s += result.pretty_format(width, prefix + " ", depth - 1)
95
+ if self._default is not None:
96
+ n = width - len(prefix) - 21
97
+ s += "{}else: {} {}\n".format(prefix, "." * n, self._default._label)
98
+ if self._default._fname is not None and depth > 1:
99
+ s += self._default.pretty_format(width, prefix + " ", depth - 1)
100
+ return s
101
+
102
+ def pseudocode(self, prefix="", depth=4):
103
+ """
104
+ Return a string representation of this decision tree that
105
+ expresses the decisions it makes as a nested set of pseudocode
106
+ if statements.
107
+ """
108
+ if self._fname is None:
109
+ return f"{prefix}return {self._label!r}\n"
110
+ s = ""
111
+ for (fval, result) in sorted(
112
+ self._decisions.items(),
113
+ key=lambda item: (item[0] in [None, False, True], str(item[0]).lower()),
114
+ ):
115
+ s += f"{prefix}if {self._fname} == {fval!r}: "
116
+ if result._fname is not None and depth > 1:
117
+ s += "\n" + result.pseudocode(prefix + " ", depth - 1)
118
+ else:
119
+ s += f"return {result._label!r}\n"
120
+ if self._default is not None:
121
+ if len(self._decisions) == 1:
122
+ s += "{}if {} != {!r}: ".format(
123
+ prefix, self._fname, list(self._decisions.keys())[0]
124
+ )
125
+ else:
126
+ s += f"{prefix}else: "
127
+ if self._default._fname is not None and depth > 1:
128
+ s += "\n" + self._default.pseudocode(prefix + " ", depth - 1)
129
+ else:
130
+ s += f"return {self._default._label!r}\n"
131
+ return s
132
+
133
+ def __str__(self):
134
+ return self.pretty_format()
135
+
136
+ @staticmethod
137
+ def train(
138
+ labeled_featuresets,
139
+ entropy_cutoff=0.05,
140
+ depth_cutoff=100,
141
+ support_cutoff=10,
142
+ binary=False,
143
+ feature_values=None,
144
+ verbose=False,
145
+ ):
146
+ """
147
+ :param binary: If true, then treat all feature/value pairs as
148
+ individual binary features, rather than using a single n-way
149
+ branch for each feature.
150
+ """
151
+ # Collect a list of all feature names.
152
+ feature_names = set()
153
+ for featureset, label in labeled_featuresets:
154
+ for fname in featureset:
155
+ feature_names.add(fname)
156
+
157
+ # Collect a list of the values each feature can take.
158
+ if feature_values is None and binary:
159
+ feature_values = defaultdict(set)
160
+ for featureset, label in labeled_featuresets:
161
+ for fname, fval in featureset.items():
162
+ feature_values[fname].add(fval)
163
+
164
+ # Start with a stump.
165
+ if not binary:
166
+ tree = DecisionTreeClassifier.best_stump(
167
+ feature_names, labeled_featuresets, verbose
168
+ )
169
+ else:
170
+ tree = DecisionTreeClassifier.best_binary_stump(
171
+ feature_names, labeled_featuresets, feature_values, verbose
172
+ )
173
+
174
+ # Refine the stump.
175
+ tree.refine(
176
+ labeled_featuresets,
177
+ entropy_cutoff,
178
+ depth_cutoff - 1,
179
+ support_cutoff,
180
+ binary,
181
+ feature_values,
182
+ verbose,
183
+ )
184
+
185
+ # Return it
186
+ return tree
187
+
188
+ @staticmethod
189
+ def leaf(labeled_featuresets):
190
+ label = FreqDist(label for (featureset, label) in labeled_featuresets).max()
191
+ return DecisionTreeClassifier(label)
192
+
193
+ @staticmethod
194
+ def stump(feature_name, labeled_featuresets):
195
+ label = FreqDist(label for (featureset, label) in labeled_featuresets).max()
196
+
197
+ # Find the best label for each value.
198
+ freqs = defaultdict(FreqDist) # freq(label|value)
199
+ for featureset, label in labeled_featuresets:
200
+ feature_value = featureset.get(feature_name)
201
+ freqs[feature_value][label] += 1
202
+
203
+ decisions = {val: DecisionTreeClassifier(freqs[val].max()) for val in freqs}
204
+ return DecisionTreeClassifier(label, feature_name, decisions)
205
+
206
+ def refine(
207
+ self,
208
+ labeled_featuresets,
209
+ entropy_cutoff,
210
+ depth_cutoff,
211
+ support_cutoff,
212
+ binary=False,
213
+ feature_values=None,
214
+ verbose=False,
215
+ ):
216
+ if len(labeled_featuresets) <= support_cutoff:
217
+ return
218
+ if self._fname is None:
219
+ return
220
+ if depth_cutoff <= 0:
221
+ return
222
+ for fval in self._decisions:
223
+ fval_featuresets = [
224
+ (featureset, label)
225
+ for (featureset, label) in labeled_featuresets
226
+ if featureset.get(self._fname) == fval
227
+ ]
228
+
229
+ label_freqs = FreqDist(label for (featureset, label) in fval_featuresets)
230
+ if entropy(MLEProbDist(label_freqs)) > entropy_cutoff:
231
+ self._decisions[fval] = DecisionTreeClassifier.train(
232
+ fval_featuresets,
233
+ entropy_cutoff,
234
+ depth_cutoff,
235
+ support_cutoff,
236
+ binary,
237
+ feature_values,
238
+ verbose,
239
+ )
240
+ if self._default is not None:
241
+ default_featuresets = [
242
+ (featureset, label)
243
+ for (featureset, label) in labeled_featuresets
244
+ if featureset.get(self._fname) not in self._decisions
245
+ ]
246
+ label_freqs = FreqDist(label for (featureset, label) in default_featuresets)
247
+ if entropy(MLEProbDist(label_freqs)) > entropy_cutoff:
248
+ self._default = DecisionTreeClassifier.train(
249
+ default_featuresets,
250
+ entropy_cutoff,
251
+ depth_cutoff,
252
+ support_cutoff,
253
+ binary,
254
+ feature_values,
255
+ verbose,
256
+ )
257
+
258
+ @staticmethod
259
+ def best_stump(feature_names, labeled_featuresets, verbose=False):
260
+ best_stump = DecisionTreeClassifier.leaf(labeled_featuresets)
261
+ best_error = best_stump.error(labeled_featuresets)
262
+ for fname in feature_names:
263
+ stump = DecisionTreeClassifier.stump(fname, labeled_featuresets)
264
+ stump_error = stump.error(labeled_featuresets)
265
+ if stump_error < best_error:
266
+ best_error = stump_error
267
+ best_stump = stump
268
+ if verbose:
269
+ print(
270
+ "best stump for {:6d} toks uses {:20} err={:6.4f}".format(
271
+ len(labeled_featuresets), best_stump._fname, best_error
272
+ )
273
+ )
274
+ return best_stump
275
+
276
+ @staticmethod
277
+ def binary_stump(feature_name, feature_value, labeled_featuresets):
278
+ label = FreqDist(label for (featureset, label) in labeled_featuresets).max()
279
+
280
+ # Find the best label for each value.
281
+ pos_fdist = FreqDist()
282
+ neg_fdist = FreqDist()
283
+ for featureset, label in labeled_featuresets:
284
+ if featureset.get(feature_name) == feature_value:
285
+ pos_fdist[label] += 1
286
+ else:
287
+ neg_fdist[label] += 1
288
+
289
+ decisions = {}
290
+ default = label
291
+ # But hopefully we have observations!
292
+ if pos_fdist.N() > 0:
293
+ decisions = {feature_value: DecisionTreeClassifier(pos_fdist.max())}
294
+ if neg_fdist.N() > 0:
295
+ default = DecisionTreeClassifier(neg_fdist.max())
296
+
297
+ return DecisionTreeClassifier(label, feature_name, decisions, default)
298
+
299
+ @staticmethod
300
+ def best_binary_stump(
301
+ feature_names, labeled_featuresets, feature_values, verbose=False
302
+ ):
303
+ best_stump = DecisionTreeClassifier.leaf(labeled_featuresets)
304
+ best_error = best_stump.error(labeled_featuresets)
305
+ for fname in feature_names:
306
+ for fval in feature_values[fname]:
307
+ stump = DecisionTreeClassifier.binary_stump(
308
+ fname, fval, labeled_featuresets
309
+ )
310
+ stump_error = stump.error(labeled_featuresets)
311
+ if stump_error < best_error:
312
+ best_error = stump_error
313
+ best_stump = stump
314
+ if verbose:
315
+ if best_stump._decisions:
316
+ descr = "{}={}".format(
317
+ best_stump._fname, list(best_stump._decisions.keys())[0]
318
+ )
319
+ else:
320
+ descr = "(default)"
321
+ print(
322
+ "best stump for {:6d} toks uses {:20} err={:6.4f}".format(
323
+ len(labeled_featuresets), descr, best_error
324
+ )
325
+ )
326
+ return best_stump
327
+
328
+
329
+ ##//////////////////////////////////////////////////////
330
+ ## Demo
331
+ ##//////////////////////////////////////////////////////
332
+
333
+
334
+ def f(x):
335
+ return DecisionTreeClassifier.train(x, binary=True, verbose=True)
336
+
337
+
338
+ def demo():
339
+ from nltk.classify.util import binary_names_demo_features, names_demo
340
+
341
+ classifier = names_demo(
342
+ f, binary_names_demo_features # DecisionTreeClassifier.train,
343
+ )
344
+ print(classifier.pretty_format(depth=7))
345
+ print(classifier.pseudocode(depth=7))
346
+
347
+
348
+ if __name__ == "__main__":
349
+ demo()
venv/lib/python3.10/site-packages/nltk/classify/maxent.py ADDED
@@ -0,0 +1,1569 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Maximum Entropy Classifiers
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # Dmitry Chichkov <[email protected]> (TypedMaxentFeatureEncoding)
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ A classifier model based on maximum entropy modeling framework. This
11
+ framework considers all of the probability distributions that are
12
+ empirically consistent with the training data; and chooses the
13
+ distribution with the highest entropy. A probability distribution is
14
+ "empirically consistent" with a set of training data if its estimated
15
+ frequency with which a class and a feature vector value co-occur is
16
+ equal to the actual frequency in the data.
17
+
18
+ Terminology: 'feature'
19
+ ======================
20
+ The term *feature* is usually used to refer to some property of an
21
+ unlabeled token. For example, when performing word sense
22
+ disambiguation, we might define a ``'prevword'`` feature whose value is
23
+ the word preceding the target word. However, in the context of
24
+ maxent modeling, the term *feature* is typically used to refer to a
25
+ property of a "labeled" token. In order to prevent confusion, we
26
+ will introduce two distinct terms to disambiguate these two different
27
+ concepts:
28
+
29
+ - An "input-feature" is a property of an unlabeled token.
30
+ - A "joint-feature" is a property of a labeled token.
31
+
32
+ In the rest of the ``nltk.classify`` module, the term "features" is
33
+ used to refer to what we will call "input-features" in this module.
34
+
35
+ In literature that describes and discusses maximum entropy models,
36
+ input-features are typically called "contexts", and joint-features
37
+ are simply referred to as "features".
38
+
39
+ Converting Input-Features to Joint-Features
40
+ -------------------------------------------
41
+ In maximum entropy models, joint-features are required to have numeric
42
+ values. Typically, each input-feature ``input_feat`` is mapped to a
43
+ set of joint-features of the form:
44
+
45
+ | joint_feat(token, label) = { 1 if input_feat(token) == feat_val
46
+ | { and label == some_label
47
+ | {
48
+ | { 0 otherwise
49
+
50
+ For all values of ``feat_val`` and ``some_label``. This mapping is
51
+ performed by classes that implement the ``MaxentFeatureEncodingI``
52
+ interface.
53
+ """
54
+ try:
55
+ import numpy
56
+ except ImportError:
57
+ pass
58
+
59
+ import os
60
+ import tempfile
61
+ from collections import defaultdict
62
+
63
+ from nltk.classify.api import ClassifierI
64
+ from nltk.classify.megam import call_megam, parse_megam_weights, write_megam_file
65
+ from nltk.classify.tadm import call_tadm, parse_tadm_weights, write_tadm_file
66
+ from nltk.classify.util import CutoffChecker, accuracy, log_likelihood
67
+ from nltk.data import gzip_open_unicode
68
+ from nltk.probability import DictionaryProbDist
69
+ from nltk.util import OrderedDict
70
+
71
+ __docformat__ = "epytext en"
72
+
73
+ ######################################################################
74
+ # { Classifier Model
75
+ ######################################################################
76
+
77
+
78
+ class MaxentClassifier(ClassifierI):
79
+ """
80
+ A maximum entropy classifier (also known as a "conditional
81
+ exponential classifier"). This classifier is parameterized by a
82
+ set of "weights", which are used to combine the joint-features
83
+ that are generated from a featureset by an "encoding". In
84
+ particular, the encoding maps each ``(featureset, label)`` pair to
85
+ a vector. The probability of each label is then computed using
86
+ the following equation::
87
+
88
+ dotprod(weights, encode(fs,label))
89
+ prob(fs|label) = ---------------------------------------------------
90
+ sum(dotprod(weights, encode(fs,l)) for l in labels)
91
+
92
+ Where ``dotprod`` is the dot product::
93
+
94
+ dotprod(a,b) = sum(x*y for (x,y) in zip(a,b))
95
+ """
96
+
97
+ def __init__(self, encoding, weights, logarithmic=True):
98
+ """
99
+ Construct a new maxent classifier model. Typically, new
100
+ classifier models are created using the ``train()`` method.
101
+
102
+ :type encoding: MaxentFeatureEncodingI
103
+ :param encoding: An encoding that is used to convert the
104
+ featuresets that are given to the ``classify`` method into
105
+ joint-feature vectors, which are used by the maxent
106
+ classifier model.
107
+
108
+ :type weights: list of float
109
+ :param weights: The feature weight vector for this classifier.
110
+
111
+ :type logarithmic: bool
112
+ :param logarithmic: If false, then use non-logarithmic weights.
113
+ """
114
+ self._encoding = encoding
115
+ self._weights = weights
116
+ self._logarithmic = logarithmic
117
+ # self._logarithmic = False
118
+ assert encoding.length() == len(weights)
119
+
120
+ def labels(self):
121
+ return self._encoding.labels()
122
+
123
+ def set_weights(self, new_weights):
124
+ """
125
+ Set the feature weight vector for this classifier.
126
+ :param new_weights: The new feature weight vector.
127
+ :type new_weights: list of float
128
+ """
129
+ self._weights = new_weights
130
+ assert self._encoding.length() == len(new_weights)
131
+
132
+ def weights(self):
133
+ """
134
+ :return: The feature weight vector for this classifier.
135
+ :rtype: list of float
136
+ """
137
+ return self._weights
138
+
139
+ def classify(self, featureset):
140
+ return self.prob_classify(featureset).max()
141
+
142
+ def prob_classify(self, featureset):
143
+ prob_dict = {}
144
+ for label in self._encoding.labels():
145
+ feature_vector = self._encoding.encode(featureset, label)
146
+
147
+ if self._logarithmic:
148
+ total = 0.0
149
+ for (f_id, f_val) in feature_vector:
150
+ total += self._weights[f_id] * f_val
151
+ prob_dict[label] = total
152
+
153
+ else:
154
+ prod = 1.0
155
+ for (f_id, f_val) in feature_vector:
156
+ prod *= self._weights[f_id] ** f_val
157
+ prob_dict[label] = prod
158
+
159
+ # Normalize the dictionary to give a probability distribution
160
+ return DictionaryProbDist(prob_dict, log=self._logarithmic, normalize=True)
161
+
162
+ def explain(self, featureset, columns=4):
163
+ """
164
+ Print a table showing the effect of each of the features in
165
+ the given feature set, and how they combine to determine the
166
+ probabilities of each label for that featureset.
167
+ """
168
+ descr_width = 50
169
+ TEMPLATE = " %-" + str(descr_width - 2) + "s%s%8.3f"
170
+
171
+ pdist = self.prob_classify(featureset)
172
+ labels = sorted(pdist.samples(), key=pdist.prob, reverse=True)
173
+ labels = labels[:columns]
174
+ print(
175
+ " Feature".ljust(descr_width)
176
+ + "".join("%8s" % (("%s" % l)[:7]) for l in labels)
177
+ )
178
+ print(" " + "-" * (descr_width - 2 + 8 * len(labels)))
179
+ sums = defaultdict(int)
180
+ for i, label in enumerate(labels):
181
+ feature_vector = self._encoding.encode(featureset, label)
182
+ feature_vector.sort(
183
+ key=lambda fid__: abs(self._weights[fid__[0]]), reverse=True
184
+ )
185
+ for (f_id, f_val) in feature_vector:
186
+ if self._logarithmic:
187
+ score = self._weights[f_id] * f_val
188
+ else:
189
+ score = self._weights[f_id] ** f_val
190
+ descr = self._encoding.describe(f_id)
191
+ descr = descr.split(" and label is ")[0] # hack
192
+ descr += " (%s)" % f_val # hack
193
+ if len(descr) > 47:
194
+ descr = descr[:44] + "..."
195
+ print(TEMPLATE % (descr, i * 8 * " ", score))
196
+ sums[label] += score
197
+ print(" " + "-" * (descr_width - 1 + 8 * len(labels)))
198
+ print(
199
+ " TOTAL:".ljust(descr_width) + "".join("%8.3f" % sums[l] for l in labels)
200
+ )
201
+ print(
202
+ " PROBS:".ljust(descr_width)
203
+ + "".join("%8.3f" % pdist.prob(l) for l in labels)
204
+ )
205
+
206
+ def most_informative_features(self, n=10):
207
+ """
208
+ Generates the ranked list of informative features from most to least.
209
+ """
210
+ if hasattr(self, "_most_informative_features"):
211
+ return self._most_informative_features[:n]
212
+ else:
213
+ self._most_informative_features = sorted(
214
+ list(range(len(self._weights))),
215
+ key=lambda fid: abs(self._weights[fid]),
216
+ reverse=True,
217
+ )
218
+ return self._most_informative_features[:n]
219
+
220
+ def show_most_informative_features(self, n=10, show="all"):
221
+ """
222
+ :param show: all, neg, or pos (for negative-only or positive-only)
223
+ :type show: str
224
+ :param n: The no. of top features
225
+ :type n: int
226
+ """
227
+ # Use None the full list of ranked features.
228
+ fids = self.most_informative_features(None)
229
+ if show == "pos":
230
+ fids = [fid for fid in fids if self._weights[fid] > 0]
231
+ elif show == "neg":
232
+ fids = [fid for fid in fids if self._weights[fid] < 0]
233
+ for fid in fids[:n]:
234
+ print(f"{self._weights[fid]:8.3f} {self._encoding.describe(fid)}")
235
+
236
+ def __repr__(self):
237
+ return "<ConditionalExponentialClassifier: %d labels, %d features>" % (
238
+ len(self._encoding.labels()),
239
+ self._encoding.length(),
240
+ )
241
+
242
+ #: A list of the algorithm names that are accepted for the
243
+ #: ``train()`` method's ``algorithm`` parameter.
244
+ ALGORITHMS = ["GIS", "IIS", "MEGAM", "TADM"]
245
+
246
+ @classmethod
247
+ def train(
248
+ cls,
249
+ train_toks,
250
+ algorithm=None,
251
+ trace=3,
252
+ encoding=None,
253
+ labels=None,
254
+ gaussian_prior_sigma=0,
255
+ **cutoffs,
256
+ ):
257
+ """
258
+ Train a new maxent classifier based on the given corpus of
259
+ training samples. This classifier will have its weights
260
+ chosen to maximize entropy while remaining empirically
261
+ consistent with the training corpus.
262
+
263
+ :rtype: MaxentClassifier
264
+ :return: The new maxent classifier
265
+
266
+ :type train_toks: list
267
+ :param train_toks: Training data, represented as a list of
268
+ pairs, the first member of which is a featureset,
269
+ and the second of which is a classification label.
270
+
271
+ :type algorithm: str
272
+ :param algorithm: A case-insensitive string, specifying which
273
+ algorithm should be used to train the classifier. The
274
+ following algorithms are currently available.
275
+
276
+ - Iterative Scaling Methods: Generalized Iterative Scaling (``'GIS'``),
277
+ Improved Iterative Scaling (``'IIS'``)
278
+ - External Libraries (requiring megam):
279
+ LM-BFGS algorithm, with training performed by Megam (``'megam'``)
280
+
281
+ The default algorithm is ``'IIS'``.
282
+
283
+ :type trace: int
284
+ :param trace: The level of diagnostic tracing output to produce.
285
+ Higher values produce more verbose output.
286
+ :type encoding: MaxentFeatureEncodingI
287
+ :param encoding: A feature encoding, used to convert featuresets
288
+ into feature vectors. If none is specified, then a
289
+ ``BinaryMaxentFeatureEncoding`` will be built based on the
290
+ features that are attested in the training corpus.
291
+ :type labels: list(str)
292
+ :param labels: The set of possible labels. If none is given, then
293
+ the set of all labels attested in the training data will be
294
+ used instead.
295
+ :param gaussian_prior_sigma: The sigma value for a gaussian
296
+ prior on model weights. Currently, this is supported by
297
+ ``megam``. For other algorithms, its value is ignored.
298
+ :param cutoffs: Arguments specifying various conditions under
299
+ which the training should be halted. (Some of the cutoff
300
+ conditions are not supported by some algorithms.)
301
+
302
+ - ``max_iter=v``: Terminate after ``v`` iterations.
303
+ - ``min_ll=v``: Terminate after the negative average
304
+ log-likelihood drops under ``v``.
305
+ - ``min_lldelta=v``: Terminate if a single iteration improves
306
+ log likelihood by less than ``v``.
307
+ """
308
+ if algorithm is None:
309
+ algorithm = "iis"
310
+ for key in cutoffs:
311
+ if key not in (
312
+ "max_iter",
313
+ "min_ll",
314
+ "min_lldelta",
315
+ "max_acc",
316
+ "min_accdelta",
317
+ "count_cutoff",
318
+ "norm",
319
+ "explicit",
320
+ "bernoulli",
321
+ ):
322
+ raise TypeError("Unexpected keyword arg %r" % key)
323
+ algorithm = algorithm.lower()
324
+ if algorithm == "iis":
325
+ return train_maxent_classifier_with_iis(
326
+ train_toks, trace, encoding, labels, **cutoffs
327
+ )
328
+ elif algorithm == "gis":
329
+ return train_maxent_classifier_with_gis(
330
+ train_toks, trace, encoding, labels, **cutoffs
331
+ )
332
+ elif algorithm == "megam":
333
+ return train_maxent_classifier_with_megam(
334
+ train_toks, trace, encoding, labels, gaussian_prior_sigma, **cutoffs
335
+ )
336
+ elif algorithm == "tadm":
337
+ kwargs = cutoffs
338
+ kwargs["trace"] = trace
339
+ kwargs["encoding"] = encoding
340
+ kwargs["labels"] = labels
341
+ kwargs["gaussian_prior_sigma"] = gaussian_prior_sigma
342
+ return TadmMaxentClassifier.train(train_toks, **kwargs)
343
+ else:
344
+ raise ValueError("Unknown algorithm %s" % algorithm)
345
+
346
+
347
+ #: Alias for MaxentClassifier.
348
+ ConditionalExponentialClassifier = MaxentClassifier
349
+
350
+
351
+ ######################################################################
352
+ # { Feature Encodings
353
+ ######################################################################
354
+
355
+
356
+ class MaxentFeatureEncodingI:
357
+ """
358
+ A mapping that converts a set of input-feature values to a vector
359
+ of joint-feature values, given a label. This conversion is
360
+ necessary to translate featuresets into a format that can be used
361
+ by maximum entropy models.
362
+
363
+ The set of joint-features used by a given encoding is fixed, and
364
+ each index in the generated joint-feature vectors corresponds to a
365
+ single joint-feature. The length of the generated joint-feature
366
+ vectors is therefore constant (for a given encoding).
367
+
368
+ Because the joint-feature vectors generated by
369
+ ``MaxentFeatureEncodingI`` are typically very sparse, they are
370
+ represented as a list of ``(index, value)`` tuples, specifying the
371
+ value of each non-zero joint-feature.
372
+
373
+ Feature encodings are generally created using the ``train()``
374
+ method, which generates an appropriate encoding based on the
375
+ input-feature values and labels that are present in a given
376
+ corpus.
377
+ """
378
+
379
+ def encode(self, featureset, label):
380
+ """
381
+ Given a (featureset, label) pair, return the corresponding
382
+ vector of joint-feature values. This vector is represented as
383
+ a list of ``(index, value)`` tuples, specifying the value of
384
+ each non-zero joint-feature.
385
+
386
+ :type featureset: dict
387
+ :rtype: list(tuple(int, int))
388
+ """
389
+ raise NotImplementedError()
390
+
391
+ def length(self):
392
+ """
393
+ :return: The size of the fixed-length joint-feature vectors
394
+ that are generated by this encoding.
395
+ :rtype: int
396
+ """
397
+ raise NotImplementedError()
398
+
399
+ def labels(self):
400
+ """
401
+ :return: A list of the \"known labels\" -- i.e., all labels
402
+ ``l`` such that ``self.encode(fs,l)`` can be a nonzero
403
+ joint-feature vector for some value of ``fs``.
404
+ :rtype: list
405
+ """
406
+ raise NotImplementedError()
407
+
408
+ def describe(self, fid):
409
+ """
410
+ :return: A string describing the value of the joint-feature
411
+ whose index in the generated feature vectors is ``fid``.
412
+ :rtype: str
413
+ """
414
+ raise NotImplementedError()
415
+
416
+ def train(cls, train_toks):
417
+ """
418
+ Construct and return new feature encoding, based on a given
419
+ training corpus ``train_toks``.
420
+
421
+ :type train_toks: list(tuple(dict, str))
422
+ :param train_toks: Training data, represented as a list of
423
+ pairs, the first member of which is a feature dictionary,
424
+ and the second of which is a classification label.
425
+ """
426
+ raise NotImplementedError()
427
+
428
+
429
+ class FunctionBackedMaxentFeatureEncoding(MaxentFeatureEncodingI):
430
+ """
431
+ A feature encoding that calls a user-supplied function to map a
432
+ given featureset/label pair to a sparse joint-feature vector.
433
+ """
434
+
435
+ def __init__(self, func, length, labels):
436
+ """
437
+ Construct a new feature encoding based on the given function.
438
+
439
+ :type func: (callable)
440
+ :param func: A function that takes two arguments, a featureset
441
+ and a label, and returns the sparse joint feature vector
442
+ that encodes them::
443
+
444
+ func(featureset, label) -> feature_vector
445
+
446
+ This sparse joint feature vector (``feature_vector``) is a
447
+ list of ``(index,value)`` tuples.
448
+
449
+ :type length: int
450
+ :param length: The size of the fixed-length joint-feature
451
+ vectors that are generated by this encoding.
452
+
453
+ :type labels: list
454
+ :param labels: A list of the \"known labels\" for this
455
+ encoding -- i.e., all labels ``l`` such that
456
+ ``self.encode(fs,l)`` can be a nonzero joint-feature vector
457
+ for some value of ``fs``.
458
+ """
459
+ self._length = length
460
+ self._func = func
461
+ self._labels = labels
462
+
463
+ def encode(self, featureset, label):
464
+ return self._func(featureset, label)
465
+
466
+ def length(self):
467
+ return self._length
468
+
469
+ def labels(self):
470
+ return self._labels
471
+
472
+ def describe(self, fid):
473
+ return "no description available"
474
+
475
+
476
+ class BinaryMaxentFeatureEncoding(MaxentFeatureEncodingI):
477
+ """
478
+ A feature encoding that generates vectors containing a binary
479
+ joint-features of the form:
480
+
481
+ | joint_feat(fs, l) = { 1 if (fs[fname] == fval) and (l == label)
482
+ | {
483
+ | { 0 otherwise
484
+
485
+ Where ``fname`` is the name of an input-feature, ``fval`` is a value
486
+ for that input-feature, and ``label`` is a label.
487
+
488
+ Typically, these features are constructed based on a training
489
+ corpus, using the ``train()`` method. This method will create one
490
+ feature for each combination of ``fname``, ``fval``, and ``label``
491
+ that occurs at least once in the training corpus.
492
+
493
+ The ``unseen_features`` parameter can be used to add "unseen-value
494
+ features", which are used whenever an input feature has a value
495
+ that was not encountered in the training corpus. These features
496
+ have the form:
497
+
498
+ | joint_feat(fs, l) = { 1 if is_unseen(fname, fs[fname])
499
+ | { and l == label
500
+ | {
501
+ | { 0 otherwise
502
+
503
+ Where ``is_unseen(fname, fval)`` is true if the encoding does not
504
+ contain any joint features that are true when ``fs[fname]==fval``.
505
+
506
+ The ``alwayson_features`` parameter can be used to add "always-on
507
+ features", which have the form::
508
+
509
+ | joint_feat(fs, l) = { 1 if (l == label)
510
+ | {
511
+ | { 0 otherwise
512
+
513
+ These always-on features allow the maxent model to directly model
514
+ the prior probabilities of each label.
515
+ """
516
+
517
+ def __init__(self, labels, mapping, unseen_features=False, alwayson_features=False):
518
+ """
519
+ :param labels: A list of the \"known labels\" for this encoding.
520
+
521
+ :param mapping: A dictionary mapping from ``(fname,fval,label)``
522
+ tuples to corresponding joint-feature indexes. These
523
+ indexes must be the set of integers from 0...len(mapping).
524
+ If ``mapping[fname,fval,label]=id``, then
525
+ ``self.encode(..., fname:fval, ..., label)[id]`` is 1;
526
+ otherwise, it is 0.
527
+
528
+ :param unseen_features: If true, then include unseen value
529
+ features in the generated joint-feature vectors.
530
+
531
+ :param alwayson_features: If true, then include always-on
532
+ features in the generated joint-feature vectors.
533
+ """
534
+ if set(mapping.values()) != set(range(len(mapping))):
535
+ raise ValueError(
536
+ "Mapping values must be exactly the "
537
+ "set of integers from 0...len(mapping)"
538
+ )
539
+
540
+ self._labels = list(labels)
541
+ """A list of attested labels."""
542
+
543
+ self._mapping = mapping
544
+ """dict mapping from (fname,fval,label) -> fid"""
545
+
546
+ self._length = len(mapping)
547
+ """The length of generated joint feature vectors."""
548
+
549
+ self._alwayson = None
550
+ """dict mapping from label -> fid"""
551
+
552
+ self._unseen = None
553
+ """dict mapping from fname -> fid"""
554
+
555
+ if alwayson_features:
556
+ self._alwayson = {
557
+ label: i + self._length for (i, label) in enumerate(labels)
558
+ }
559
+ self._length += len(self._alwayson)
560
+
561
+ if unseen_features:
562
+ fnames = {fname for (fname, fval, label) in mapping}
563
+ self._unseen = {fname: i + self._length for (i, fname) in enumerate(fnames)}
564
+ self._length += len(fnames)
565
+
566
+ def encode(self, featureset, label):
567
+ # Inherit docs.
568
+ encoding = []
569
+
570
+ # Convert input-features to joint-features:
571
+ for fname, fval in featureset.items():
572
+ # Known feature name & value:
573
+ if (fname, fval, label) in self._mapping:
574
+ encoding.append((self._mapping[fname, fval, label], 1))
575
+
576
+ # Otherwise, we might want to fire an "unseen-value feature".
577
+ elif self._unseen:
578
+ # Have we seen this fname/fval combination with any label?
579
+ for label2 in self._labels:
580
+ if (fname, fval, label2) in self._mapping:
581
+ break # we've seen this fname/fval combo
582
+ # We haven't -- fire the unseen-value feature
583
+ else:
584
+ if fname in self._unseen:
585
+ encoding.append((self._unseen[fname], 1))
586
+
587
+ # Add always-on features:
588
+ if self._alwayson and label in self._alwayson:
589
+ encoding.append((self._alwayson[label], 1))
590
+
591
+ return encoding
592
+
593
+ def describe(self, f_id):
594
+ # Inherit docs.
595
+ if not isinstance(f_id, int):
596
+ raise TypeError("describe() expected an int")
597
+ try:
598
+ self._inv_mapping
599
+ except AttributeError:
600
+ self._inv_mapping = [-1] * len(self._mapping)
601
+ for (info, i) in self._mapping.items():
602
+ self._inv_mapping[i] = info
603
+
604
+ if f_id < len(self._mapping):
605
+ (fname, fval, label) = self._inv_mapping[f_id]
606
+ return f"{fname}=={fval!r} and label is {label!r}"
607
+ elif self._alwayson and f_id in self._alwayson.values():
608
+ for (label, f_id2) in self._alwayson.items():
609
+ if f_id == f_id2:
610
+ return "label is %r" % label
611
+ elif self._unseen and f_id in self._unseen.values():
612
+ for (fname, f_id2) in self._unseen.items():
613
+ if f_id == f_id2:
614
+ return "%s is unseen" % fname
615
+ else:
616
+ raise ValueError("Bad feature id")
617
+
618
+ def labels(self):
619
+ # Inherit docs.
620
+ return self._labels
621
+
622
+ def length(self):
623
+ # Inherit docs.
624
+ return self._length
625
+
626
+ @classmethod
627
+ def train(cls, train_toks, count_cutoff=0, labels=None, **options):
628
+ """
629
+ Construct and return new feature encoding, based on a given
630
+ training corpus ``train_toks``. See the class description
631
+ ``BinaryMaxentFeatureEncoding`` for a description of the
632
+ joint-features that will be included in this encoding.
633
+
634
+ :type train_toks: list(tuple(dict, str))
635
+ :param train_toks: Training data, represented as a list of
636
+ pairs, the first member of which is a feature dictionary,
637
+ and the second of which is a classification label.
638
+
639
+ :type count_cutoff: int
640
+ :param count_cutoff: A cutoff value that is used to discard
641
+ rare joint-features. If a joint-feature's value is 1
642
+ fewer than ``count_cutoff`` times in the training corpus,
643
+ then that joint-feature is not included in the generated
644
+ encoding.
645
+
646
+ :type labels: list
647
+ :param labels: A list of labels that should be used by the
648
+ classifier. If not specified, then the set of labels
649
+ attested in ``train_toks`` will be used.
650
+
651
+ :param options: Extra parameters for the constructor, such as
652
+ ``unseen_features`` and ``alwayson_features``.
653
+ """
654
+ mapping = {} # maps (fname, fval, label) -> fid
655
+ seen_labels = set() # The set of labels we've encountered
656
+ count = defaultdict(int) # maps (fname, fval) -> count
657
+
658
+ for (tok, label) in train_toks:
659
+ if labels and label not in labels:
660
+ raise ValueError("Unexpected label %s" % label)
661
+ seen_labels.add(label)
662
+
663
+ # Record each of the features.
664
+ for (fname, fval) in tok.items():
665
+
666
+ # If a count cutoff is given, then only add a joint
667
+ # feature once the corresponding (fname, fval, label)
668
+ # tuple exceeds that cutoff.
669
+ count[fname, fval] += 1
670
+ if count[fname, fval] >= count_cutoff:
671
+ if (fname, fval, label) not in mapping:
672
+ mapping[fname, fval, label] = len(mapping)
673
+
674
+ if labels is None:
675
+ labels = seen_labels
676
+ return cls(labels, mapping, **options)
677
+
678
+
679
+ class GISEncoding(BinaryMaxentFeatureEncoding):
680
+ """
681
+ A binary feature encoding which adds one new joint-feature to the
682
+ joint-features defined by ``BinaryMaxentFeatureEncoding``: a
683
+ correction feature, whose value is chosen to ensure that the
684
+ sparse vector always sums to a constant non-negative number. This
685
+ new feature is used to ensure two preconditions for the GIS
686
+ training algorithm:
687
+
688
+ - At least one feature vector index must be nonzero for every
689
+ token.
690
+ - The feature vector must sum to a constant non-negative number
691
+ for every token.
692
+ """
693
+
694
+ def __init__(
695
+ self, labels, mapping, unseen_features=False, alwayson_features=False, C=None
696
+ ):
697
+ """
698
+ :param C: The correction constant. The value of the correction
699
+ feature is based on this value. In particular, its value is
700
+ ``C - sum([v for (f,v) in encoding])``.
701
+ :seealso: ``BinaryMaxentFeatureEncoding.__init__``
702
+ """
703
+ BinaryMaxentFeatureEncoding.__init__(
704
+ self, labels, mapping, unseen_features, alwayson_features
705
+ )
706
+ if C is None:
707
+ C = len({fname for (fname, fval, label) in mapping}) + 1
708
+ self._C = C
709
+
710
+ @property
711
+ def C(self):
712
+ """The non-negative constant that all encoded feature vectors
713
+ will sum to."""
714
+ return self._C
715
+
716
+ def encode(self, featureset, label):
717
+ # Get the basic encoding.
718
+ encoding = BinaryMaxentFeatureEncoding.encode(self, featureset, label)
719
+ base_length = BinaryMaxentFeatureEncoding.length(self)
720
+
721
+ # Add a correction feature.
722
+ total = sum(v for (f, v) in encoding)
723
+ if total >= self._C:
724
+ raise ValueError("Correction feature is not high enough!")
725
+ encoding.append((base_length, self._C - total))
726
+
727
+ # Return the result
728
+ return encoding
729
+
730
+ def length(self):
731
+ return BinaryMaxentFeatureEncoding.length(self) + 1
732
+
733
+ def describe(self, f_id):
734
+ if f_id == BinaryMaxentFeatureEncoding.length(self):
735
+ return "Correction feature (%s)" % self._C
736
+ else:
737
+ return BinaryMaxentFeatureEncoding.describe(self, f_id)
738
+
739
+
740
+ class TadmEventMaxentFeatureEncoding(BinaryMaxentFeatureEncoding):
741
+ def __init__(self, labels, mapping, unseen_features=False, alwayson_features=False):
742
+ self._mapping = OrderedDict(mapping)
743
+ self._label_mapping = OrderedDict()
744
+ BinaryMaxentFeatureEncoding.__init__(
745
+ self, labels, self._mapping, unseen_features, alwayson_features
746
+ )
747
+
748
+ def encode(self, featureset, label):
749
+ encoding = []
750
+ for feature, value in featureset.items():
751
+ if (feature, label) not in self._mapping:
752
+ self._mapping[(feature, label)] = len(self._mapping)
753
+ if value not in self._label_mapping:
754
+ if not isinstance(value, int):
755
+ self._label_mapping[value] = len(self._label_mapping)
756
+ else:
757
+ self._label_mapping[value] = value
758
+ encoding.append(
759
+ (self._mapping[(feature, label)], self._label_mapping[value])
760
+ )
761
+ return encoding
762
+
763
+ def labels(self):
764
+ return self._labels
765
+
766
+ def describe(self, fid):
767
+ for (feature, label) in self._mapping:
768
+ if self._mapping[(feature, label)] == fid:
769
+ return (feature, label)
770
+
771
+ def length(self):
772
+ return len(self._mapping)
773
+
774
+ @classmethod
775
+ def train(cls, train_toks, count_cutoff=0, labels=None, **options):
776
+ mapping = OrderedDict()
777
+ if not labels:
778
+ labels = []
779
+
780
+ # This gets read twice, so compute the values in case it's lazy.
781
+ train_toks = list(train_toks)
782
+
783
+ for (featureset, label) in train_toks:
784
+ if label not in labels:
785
+ labels.append(label)
786
+
787
+ for (featureset, label) in train_toks:
788
+ for label in labels:
789
+ for feature in featureset:
790
+ if (feature, label) not in mapping:
791
+ mapping[(feature, label)] = len(mapping)
792
+
793
+ return cls(labels, mapping, **options)
794
+
795
+
796
+ class TypedMaxentFeatureEncoding(MaxentFeatureEncodingI):
797
+ """
798
+ A feature encoding that generates vectors containing integer,
799
+ float and binary joint-features of the form:
800
+
801
+ Binary (for string and boolean features):
802
+
803
+ | joint_feat(fs, l) = { 1 if (fs[fname] == fval) and (l == label)
804
+ | {
805
+ | { 0 otherwise
806
+
807
+ Value (for integer and float features):
808
+
809
+ | joint_feat(fs, l) = { fval if (fs[fname] == type(fval))
810
+ | { and (l == label)
811
+ | {
812
+ | { not encoded otherwise
813
+
814
+ Where ``fname`` is the name of an input-feature, ``fval`` is a value
815
+ for that input-feature, and ``label`` is a label.
816
+
817
+ Typically, these features are constructed based on a training
818
+ corpus, using the ``train()`` method.
819
+
820
+ For string and boolean features [type(fval) not in (int, float)]
821
+ this method will create one feature for each combination of
822
+ ``fname``, ``fval``, and ``label`` that occurs at least once in the
823
+ training corpus.
824
+
825
+ For integer and float features [type(fval) in (int, float)] this
826
+ method will create one feature for each combination of ``fname``
827
+ and ``label`` that occurs at least once in the training corpus.
828
+
829
+ For binary features the ``unseen_features`` parameter can be used
830
+ to add "unseen-value features", which are used whenever an input
831
+ feature has a value that was not encountered in the training
832
+ corpus. These features have the form:
833
+
834
+ | joint_feat(fs, l) = { 1 if is_unseen(fname, fs[fname])
835
+ | { and l == label
836
+ | {
837
+ | { 0 otherwise
838
+
839
+ Where ``is_unseen(fname, fval)`` is true if the encoding does not
840
+ contain any joint features that are true when ``fs[fname]==fval``.
841
+
842
+ The ``alwayson_features`` parameter can be used to add "always-on
843
+ features", which have the form:
844
+
845
+ | joint_feat(fs, l) = { 1 if (l == label)
846
+ | {
847
+ | { 0 otherwise
848
+
849
+ These always-on features allow the maxent model to directly model
850
+ the prior probabilities of each label.
851
+ """
852
+
853
+ def __init__(self, labels, mapping, unseen_features=False, alwayson_features=False):
854
+ """
855
+ :param labels: A list of the \"known labels\" for this encoding.
856
+
857
+ :param mapping: A dictionary mapping from ``(fname,fval,label)``
858
+ tuples to corresponding joint-feature indexes. These
859
+ indexes must be the set of integers from 0...len(mapping).
860
+ If ``mapping[fname,fval,label]=id``, then
861
+ ``self.encode({..., fname:fval, ...``, label)[id]} is 1;
862
+ otherwise, it is 0.
863
+
864
+ :param unseen_features: If true, then include unseen value
865
+ features in the generated joint-feature vectors.
866
+
867
+ :param alwayson_features: If true, then include always-on
868
+ features in the generated joint-feature vectors.
869
+ """
870
+ if set(mapping.values()) != set(range(len(mapping))):
871
+ raise ValueError(
872
+ "Mapping values must be exactly the "
873
+ "set of integers from 0...len(mapping)"
874
+ )
875
+
876
+ self._labels = list(labels)
877
+ """A list of attested labels."""
878
+
879
+ self._mapping = mapping
880
+ """dict mapping from (fname,fval,label) -> fid"""
881
+
882
+ self._length = len(mapping)
883
+ """The length of generated joint feature vectors."""
884
+
885
+ self._alwayson = None
886
+ """dict mapping from label -> fid"""
887
+
888
+ self._unseen = None
889
+ """dict mapping from fname -> fid"""
890
+
891
+ if alwayson_features:
892
+ self._alwayson = {
893
+ label: i + self._length for (i, label) in enumerate(labels)
894
+ }
895
+ self._length += len(self._alwayson)
896
+
897
+ if unseen_features:
898
+ fnames = {fname for (fname, fval, label) in mapping}
899
+ self._unseen = {fname: i + self._length for (i, fname) in enumerate(fnames)}
900
+ self._length += len(fnames)
901
+
902
+ def encode(self, featureset, label):
903
+ # Inherit docs.
904
+ encoding = []
905
+
906
+ # Convert input-features to joint-features:
907
+ for fname, fval in featureset.items():
908
+ if isinstance(fval, (int, float)):
909
+ # Known feature name & value:
910
+ if (fname, type(fval), label) in self._mapping:
911
+ encoding.append((self._mapping[fname, type(fval), label], fval))
912
+ else:
913
+ # Known feature name & value:
914
+ if (fname, fval, label) in self._mapping:
915
+ encoding.append((self._mapping[fname, fval, label], 1))
916
+
917
+ # Otherwise, we might want to fire an "unseen-value feature".
918
+ elif self._unseen:
919
+ # Have we seen this fname/fval combination with any label?
920
+ for label2 in self._labels:
921
+ if (fname, fval, label2) in self._mapping:
922
+ break # we've seen this fname/fval combo
923
+ # We haven't -- fire the unseen-value feature
924
+ else:
925
+ if fname in self._unseen:
926
+ encoding.append((self._unseen[fname], 1))
927
+
928
+ # Add always-on features:
929
+ if self._alwayson and label in self._alwayson:
930
+ encoding.append((self._alwayson[label], 1))
931
+
932
+ return encoding
933
+
934
+ def describe(self, f_id):
935
+ # Inherit docs.
936
+ if not isinstance(f_id, int):
937
+ raise TypeError("describe() expected an int")
938
+ try:
939
+ self._inv_mapping
940
+ except AttributeError:
941
+ self._inv_mapping = [-1] * len(self._mapping)
942
+ for (info, i) in self._mapping.items():
943
+ self._inv_mapping[i] = info
944
+
945
+ if f_id < len(self._mapping):
946
+ (fname, fval, label) = self._inv_mapping[f_id]
947
+ return f"{fname}=={fval!r} and label is {label!r}"
948
+ elif self._alwayson and f_id in self._alwayson.values():
949
+ for (label, f_id2) in self._alwayson.items():
950
+ if f_id == f_id2:
951
+ return "label is %r" % label
952
+ elif self._unseen and f_id in self._unseen.values():
953
+ for (fname, f_id2) in self._unseen.items():
954
+ if f_id == f_id2:
955
+ return "%s is unseen" % fname
956
+ else:
957
+ raise ValueError("Bad feature id")
958
+
959
+ def labels(self):
960
+ # Inherit docs.
961
+ return self._labels
962
+
963
+ def length(self):
964
+ # Inherit docs.
965
+ return self._length
966
+
967
+ @classmethod
968
+ def train(cls, train_toks, count_cutoff=0, labels=None, **options):
969
+ """
970
+ Construct and return new feature encoding, based on a given
971
+ training corpus ``train_toks``. See the class description
972
+ ``TypedMaxentFeatureEncoding`` for a description of the
973
+ joint-features that will be included in this encoding.
974
+
975
+ Note: recognized feature values types are (int, float), over
976
+ types are interpreted as regular binary features.
977
+
978
+ :type train_toks: list(tuple(dict, str))
979
+ :param train_toks: Training data, represented as a list of
980
+ pairs, the first member of which is a feature dictionary,
981
+ and the second of which is a classification label.
982
+
983
+ :type count_cutoff: int
984
+ :param count_cutoff: A cutoff value that is used to discard
985
+ rare joint-features. If a joint-feature's value is 1
986
+ fewer than ``count_cutoff`` times in the training corpus,
987
+ then that joint-feature is not included in the generated
988
+ encoding.
989
+
990
+ :type labels: list
991
+ :param labels: A list of labels that should be used by the
992
+ classifier. If not specified, then the set of labels
993
+ attested in ``train_toks`` will be used.
994
+
995
+ :param options: Extra parameters for the constructor, such as
996
+ ``unseen_features`` and ``alwayson_features``.
997
+ """
998
+ mapping = {} # maps (fname, fval, label) -> fid
999
+ seen_labels = set() # The set of labels we've encountered
1000
+ count = defaultdict(int) # maps (fname, fval) -> count
1001
+
1002
+ for (tok, label) in train_toks:
1003
+ if labels and label not in labels:
1004
+ raise ValueError("Unexpected label %s" % label)
1005
+ seen_labels.add(label)
1006
+
1007
+ # Record each of the features.
1008
+ for (fname, fval) in tok.items():
1009
+ if type(fval) in (int, float):
1010
+ fval = type(fval)
1011
+ # If a count cutoff is given, then only add a joint
1012
+ # feature once the corresponding (fname, fval, label)
1013
+ # tuple exceeds that cutoff.
1014
+ count[fname, fval] += 1
1015
+ if count[fname, fval] >= count_cutoff:
1016
+ if (fname, fval, label) not in mapping:
1017
+ mapping[fname, fval, label] = len(mapping)
1018
+
1019
+ if labels is None:
1020
+ labels = seen_labels
1021
+ return cls(labels, mapping, **options)
1022
+
1023
+
1024
+ ######################################################################
1025
+ # { Classifier Trainer: Generalized Iterative Scaling
1026
+ ######################################################################
1027
+
1028
+
1029
+ def train_maxent_classifier_with_gis(
1030
+ train_toks, trace=3, encoding=None, labels=None, **cutoffs
1031
+ ):
1032
+ """
1033
+ Train a new ``ConditionalExponentialClassifier``, using the given
1034
+ training samples, using the Generalized Iterative Scaling
1035
+ algorithm. This ``ConditionalExponentialClassifier`` will encode
1036
+ the model that maximizes entropy from all the models that are
1037
+ empirically consistent with ``train_toks``.
1038
+
1039
+ :see: ``train_maxent_classifier()`` for parameter descriptions.
1040
+ """
1041
+ cutoffs.setdefault("max_iter", 100)
1042
+ cutoffchecker = CutoffChecker(cutoffs)
1043
+
1044
+ # Construct an encoding from the training data.
1045
+ if encoding is None:
1046
+ encoding = GISEncoding.train(train_toks, labels=labels)
1047
+
1048
+ if not hasattr(encoding, "C"):
1049
+ raise TypeError(
1050
+ "The GIS algorithm requires an encoding that "
1051
+ "defines C (e.g., GISEncoding)."
1052
+ )
1053
+
1054
+ # Cinv is the inverse of the sum of each joint feature vector.
1055
+ # This controls the learning rate: higher Cinv (or lower C) gives
1056
+ # faster learning.
1057
+ Cinv = 1.0 / encoding.C
1058
+
1059
+ # Count how many times each feature occurs in the training data.
1060
+ empirical_fcount = calculate_empirical_fcount(train_toks, encoding)
1061
+
1062
+ # Check for any features that are not attested in train_toks.
1063
+ unattested = set(numpy.nonzero(empirical_fcount == 0)[0])
1064
+
1065
+ # Build the classifier. Start with weight=0 for each attested
1066
+ # feature, and weight=-infinity for each unattested feature.
1067
+ weights = numpy.zeros(len(empirical_fcount), "d")
1068
+ for fid in unattested:
1069
+ weights[fid] = numpy.NINF
1070
+ classifier = ConditionalExponentialClassifier(encoding, weights)
1071
+
1072
+ # Take the log of the empirical fcount.
1073
+ log_empirical_fcount = numpy.log2(empirical_fcount)
1074
+ del empirical_fcount
1075
+
1076
+ if trace > 0:
1077
+ print(" ==> Training (%d iterations)" % cutoffs["max_iter"])
1078
+ if trace > 2:
1079
+ print()
1080
+ print(" Iteration Log Likelihood Accuracy")
1081
+ print(" ---------------------------------------")
1082
+
1083
+ # Train the classifier.
1084
+ try:
1085
+ while True:
1086
+ if trace > 2:
1087
+ ll = cutoffchecker.ll or log_likelihood(classifier, train_toks)
1088
+ acc = cutoffchecker.acc or accuracy(classifier, train_toks)
1089
+ iternum = cutoffchecker.iter
1090
+ print(" %9d %14.5f %9.3f" % (iternum, ll, acc))
1091
+
1092
+ # Use the model to estimate the number of times each
1093
+ # feature should occur in the training data.
1094
+ estimated_fcount = calculate_estimated_fcount(
1095
+ classifier, train_toks, encoding
1096
+ )
1097
+
1098
+ # Take the log of estimated fcount (avoid taking log(0).)
1099
+ for fid in unattested:
1100
+ estimated_fcount[fid] += 1
1101
+ log_estimated_fcount = numpy.log2(estimated_fcount)
1102
+ del estimated_fcount
1103
+
1104
+ # Update the classifier weights
1105
+ weights = classifier.weights()
1106
+ weights += (log_empirical_fcount - log_estimated_fcount) * Cinv
1107
+ classifier.set_weights(weights)
1108
+
1109
+ # Check the log-likelihood & accuracy cutoffs.
1110
+ if cutoffchecker.check(classifier, train_toks):
1111
+ break
1112
+
1113
+ except KeyboardInterrupt:
1114
+ print(" Training stopped: keyboard interrupt")
1115
+ except:
1116
+ raise
1117
+
1118
+ if trace > 2:
1119
+ ll = log_likelihood(classifier, train_toks)
1120
+ acc = accuracy(classifier, train_toks)
1121
+ print(f" Final {ll:14.5f} {acc:9.3f}")
1122
+
1123
+ # Return the classifier.
1124
+ return classifier
1125
+
1126
+
1127
+ def calculate_empirical_fcount(train_toks, encoding):
1128
+ fcount = numpy.zeros(encoding.length(), "d")
1129
+
1130
+ for tok, label in train_toks:
1131
+ for (index, val) in encoding.encode(tok, label):
1132
+ fcount[index] += val
1133
+
1134
+ return fcount
1135
+
1136
+
1137
+ def calculate_estimated_fcount(classifier, train_toks, encoding):
1138
+ fcount = numpy.zeros(encoding.length(), "d")
1139
+
1140
+ for tok, label in train_toks:
1141
+ pdist = classifier.prob_classify(tok)
1142
+ for label in pdist.samples():
1143
+ prob = pdist.prob(label)
1144
+ for (fid, fval) in encoding.encode(tok, label):
1145
+ fcount[fid] += prob * fval
1146
+
1147
+ return fcount
1148
+
1149
+
1150
+ ######################################################################
1151
+ # { Classifier Trainer: Improved Iterative Scaling
1152
+ ######################################################################
1153
+
1154
+
1155
+ def train_maxent_classifier_with_iis(
1156
+ train_toks, trace=3, encoding=None, labels=None, **cutoffs
1157
+ ):
1158
+ """
1159
+ Train a new ``ConditionalExponentialClassifier``, using the given
1160
+ training samples, using the Improved Iterative Scaling algorithm.
1161
+ This ``ConditionalExponentialClassifier`` will encode the model
1162
+ that maximizes entropy from all the models that are empirically
1163
+ consistent with ``train_toks``.
1164
+
1165
+ :see: ``train_maxent_classifier()`` for parameter descriptions.
1166
+ """
1167
+ cutoffs.setdefault("max_iter", 100)
1168
+ cutoffchecker = CutoffChecker(cutoffs)
1169
+
1170
+ # Construct an encoding from the training data.
1171
+ if encoding is None:
1172
+ encoding = BinaryMaxentFeatureEncoding.train(train_toks, labels=labels)
1173
+
1174
+ # Count how many times each feature occurs in the training data.
1175
+ empirical_ffreq = calculate_empirical_fcount(train_toks, encoding) / len(train_toks)
1176
+
1177
+ # Find the nf map, and related variables nfarray and nfident.
1178
+ # nf is the sum of the features for a given labeled text.
1179
+ # nfmap compresses this sparse set of values to a dense list.
1180
+ # nfarray performs the reverse operation. nfident is
1181
+ # nfarray multiplied by an identity matrix.
1182
+ nfmap = calculate_nfmap(train_toks, encoding)
1183
+ nfarray = numpy.array(sorted(nfmap, key=nfmap.__getitem__), "d")
1184
+ nftranspose = numpy.reshape(nfarray, (len(nfarray), 1))
1185
+
1186
+ # Check for any features that are not attested in train_toks.
1187
+ unattested = set(numpy.nonzero(empirical_ffreq == 0)[0])
1188
+
1189
+ # Build the classifier. Start with weight=0 for each attested
1190
+ # feature, and weight=-infinity for each unattested feature.
1191
+ weights = numpy.zeros(len(empirical_ffreq), "d")
1192
+ for fid in unattested:
1193
+ weights[fid] = numpy.NINF
1194
+ classifier = ConditionalExponentialClassifier(encoding, weights)
1195
+
1196
+ if trace > 0:
1197
+ print(" ==> Training (%d iterations)" % cutoffs["max_iter"])
1198
+ if trace > 2:
1199
+ print()
1200
+ print(" Iteration Log Likelihood Accuracy")
1201
+ print(" ---------------------------------------")
1202
+
1203
+ # Train the classifier.
1204
+ try:
1205
+ while True:
1206
+ if trace > 2:
1207
+ ll = cutoffchecker.ll or log_likelihood(classifier, train_toks)
1208
+ acc = cutoffchecker.acc or accuracy(classifier, train_toks)
1209
+ iternum = cutoffchecker.iter
1210
+ print(" %9d %14.5f %9.3f" % (iternum, ll, acc))
1211
+
1212
+ # Calculate the deltas for this iteration, using Newton's method.
1213
+ deltas = calculate_deltas(
1214
+ train_toks,
1215
+ classifier,
1216
+ unattested,
1217
+ empirical_ffreq,
1218
+ nfmap,
1219
+ nfarray,
1220
+ nftranspose,
1221
+ encoding,
1222
+ )
1223
+
1224
+ # Use the deltas to update our weights.
1225
+ weights = classifier.weights()
1226
+ weights += deltas
1227
+ classifier.set_weights(weights)
1228
+
1229
+ # Check the log-likelihood & accuracy cutoffs.
1230
+ if cutoffchecker.check(classifier, train_toks):
1231
+ break
1232
+
1233
+ except KeyboardInterrupt:
1234
+ print(" Training stopped: keyboard interrupt")
1235
+ except:
1236
+ raise
1237
+
1238
+ if trace > 2:
1239
+ ll = log_likelihood(classifier, train_toks)
1240
+ acc = accuracy(classifier, train_toks)
1241
+ print(f" Final {ll:14.5f} {acc:9.3f}")
1242
+
1243
+ # Return the classifier.
1244
+ return classifier
1245
+
1246
+
1247
+ def calculate_nfmap(train_toks, encoding):
1248
+ """
1249
+ Construct a map that can be used to compress ``nf`` (which is
1250
+ typically sparse).
1251
+
1252
+ *nf(feature_vector)* is the sum of the feature values for
1253
+ *feature_vector*.
1254
+
1255
+ This represents the number of features that are active for a
1256
+ given labeled text. This method finds all values of *nf(t)*
1257
+ that are attested for at least one token in the given list of
1258
+ training tokens; and constructs a dictionary mapping these
1259
+ attested values to a continuous range *0...N*. For example,
1260
+ if the only values of *nf()* that were attested were 3, 5, and
1261
+ 7, then ``_nfmap`` might return the dictionary ``{3:0, 5:1, 7:2}``.
1262
+
1263
+ :return: A map that can be used to compress ``nf`` to a dense
1264
+ vector.
1265
+ :rtype: dict(int -> int)
1266
+ """
1267
+ # Map from nf to indices. This allows us to use smaller arrays.
1268
+ nfset = set()
1269
+ for tok, _ in train_toks:
1270
+ for label in encoding.labels():
1271
+ nfset.add(sum(val for (id, val) in encoding.encode(tok, label)))
1272
+ return {nf: i for (i, nf) in enumerate(nfset)}
1273
+
1274
+
1275
+ def calculate_deltas(
1276
+ train_toks,
1277
+ classifier,
1278
+ unattested,
1279
+ ffreq_empirical,
1280
+ nfmap,
1281
+ nfarray,
1282
+ nftranspose,
1283
+ encoding,
1284
+ ):
1285
+ r"""
1286
+ Calculate the update values for the classifier weights for
1287
+ this iteration of IIS. These update weights are the value of
1288
+ ``delta`` that solves the equation::
1289
+
1290
+ ffreq_empirical[i]
1291
+ =
1292
+ SUM[fs,l] (classifier.prob_classify(fs).prob(l) *
1293
+ feature_vector(fs,l)[i] *
1294
+ exp(delta[i] * nf(feature_vector(fs,l))))
1295
+
1296
+ Where:
1297
+ - *(fs,l)* is a (featureset, label) tuple from ``train_toks``
1298
+ - *feature_vector(fs,l)* = ``encoding.encode(fs,l)``
1299
+ - *nf(vector)* = ``sum([val for (id,val) in vector])``
1300
+
1301
+ This method uses Newton's method to solve this equation for
1302
+ *delta[i]*. In particular, it starts with a guess of
1303
+ ``delta[i]`` = 1; and iteratively updates ``delta`` with:
1304
+
1305
+ | delta[i] -= (ffreq_empirical[i] - sum1[i])/(-sum2[i])
1306
+
1307
+ until convergence, where *sum1* and *sum2* are defined as:
1308
+
1309
+ | sum1[i](delta) = SUM[fs,l] f[i](fs,l,delta)
1310
+ | sum2[i](delta) = SUM[fs,l] (f[i](fs,l,delta).nf(feature_vector(fs,l)))
1311
+ | f[i](fs,l,delta) = (classifier.prob_classify(fs).prob(l) .
1312
+ | feature_vector(fs,l)[i] .
1313
+ | exp(delta[i] . nf(feature_vector(fs,l))))
1314
+
1315
+ Note that *sum1* and *sum2* depend on ``delta``; so they need
1316
+ to be re-computed each iteration.
1317
+
1318
+ The variables ``nfmap``, ``nfarray``, and ``nftranspose`` are
1319
+ used to generate a dense encoding for *nf(ltext)*. This
1320
+ allows ``_deltas`` to calculate *sum1* and *sum2* using
1321
+ matrices, which yields a significant performance improvement.
1322
+
1323
+ :param train_toks: The set of training tokens.
1324
+ :type train_toks: list(tuple(dict, str))
1325
+ :param classifier: The current classifier.
1326
+ :type classifier: ClassifierI
1327
+ :param ffreq_empirical: An array containing the empirical
1328
+ frequency for each feature. The *i*\ th element of this
1329
+ array is the empirical frequency for feature *i*.
1330
+ :type ffreq_empirical: sequence of float
1331
+ :param unattested: An array that is 1 for features that are
1332
+ not attested in the training data; and 0 for features that
1333
+ are attested. In other words, ``unattested[i]==0`` iff
1334
+ ``ffreq_empirical[i]==0``.
1335
+ :type unattested: sequence of int
1336
+ :param nfmap: A map that can be used to compress ``nf`` to a dense
1337
+ vector.
1338
+ :type nfmap: dict(int -> int)
1339
+ :param nfarray: An array that can be used to uncompress ``nf``
1340
+ from a dense vector.
1341
+ :type nfarray: array(float)
1342
+ :param nftranspose: The transpose of ``nfarray``
1343
+ :type nftranspose: array(float)
1344
+ """
1345
+ # These parameters control when we decide that we've
1346
+ # converged. It probably should be possible to set these
1347
+ # manually, via keyword arguments to train.
1348
+ NEWTON_CONVERGE = 1e-12
1349
+ MAX_NEWTON = 300
1350
+
1351
+ deltas = numpy.ones(encoding.length(), "d")
1352
+
1353
+ # Precompute the A matrix:
1354
+ # A[nf][id] = sum ( p(fs) * p(label|fs) * f(fs,label) )
1355
+ # over all label,fs s.t. num_features[label,fs]=nf
1356
+ A = numpy.zeros((len(nfmap), encoding.length()), "d")
1357
+
1358
+ for tok, label in train_toks:
1359
+ dist = classifier.prob_classify(tok)
1360
+
1361
+ for label in encoding.labels():
1362
+ # Generate the feature vector
1363
+ feature_vector = encoding.encode(tok, label)
1364
+ # Find the number of active features
1365
+ nf = sum(val for (id, val) in feature_vector)
1366
+ # Update the A matrix
1367
+ for (id, val) in feature_vector:
1368
+ A[nfmap[nf], id] += dist.prob(label) * val
1369
+ A /= len(train_toks)
1370
+
1371
+ # Iteratively solve for delta. Use the following variables:
1372
+ # - nf_delta[x][y] = nfarray[x] * delta[y]
1373
+ # - exp_nf_delta[x][y] = exp(nf[x] * delta[y])
1374
+ # - nf_exp_nf_delta[x][y] = nf[x] * exp(nf[x] * delta[y])
1375
+ # - sum1[i][nf] = sum p(fs)p(label|fs)f[i](label,fs)
1376
+ # exp(delta[i]nf)
1377
+ # - sum2[i][nf] = sum p(fs)p(label|fs)f[i](label,fs)
1378
+ # nf exp(delta[i]nf)
1379
+ for rangenum in range(MAX_NEWTON):
1380
+ nf_delta = numpy.outer(nfarray, deltas)
1381
+ exp_nf_delta = 2**nf_delta
1382
+ nf_exp_nf_delta = nftranspose * exp_nf_delta
1383
+ sum1 = numpy.sum(exp_nf_delta * A, axis=0)
1384
+ sum2 = numpy.sum(nf_exp_nf_delta * A, axis=0)
1385
+
1386
+ # Avoid division by zero.
1387
+ for fid in unattested:
1388
+ sum2[fid] += 1
1389
+
1390
+ # Update the deltas.
1391
+ deltas -= (ffreq_empirical - sum1) / -sum2
1392
+
1393
+ # We can stop once we converge.
1394
+ n_error = numpy.sum(abs(ffreq_empirical - sum1)) / numpy.sum(abs(deltas))
1395
+ if n_error < NEWTON_CONVERGE:
1396
+ return deltas
1397
+
1398
+ return deltas
1399
+
1400
+
1401
+ ######################################################################
1402
+ # { Classifier Trainer: megam
1403
+ ######################################################################
1404
+
1405
+ # [xx] possible extension: add support for using implicit file format;
1406
+ # this would need to put requirements on what encoding is used. But
1407
+ # we may need this for other maxent classifier trainers that require
1408
+ # implicit formats anyway.
1409
+ def train_maxent_classifier_with_megam(
1410
+ train_toks, trace=3, encoding=None, labels=None, gaussian_prior_sigma=0, **kwargs
1411
+ ):
1412
+ """
1413
+ Train a new ``ConditionalExponentialClassifier``, using the given
1414
+ training samples, using the external ``megam`` library. This
1415
+ ``ConditionalExponentialClassifier`` will encode the model that
1416
+ maximizes entropy from all the models that are empirically
1417
+ consistent with ``train_toks``.
1418
+
1419
+ :see: ``train_maxent_classifier()`` for parameter descriptions.
1420
+ :see: ``nltk.classify.megam``
1421
+ """
1422
+
1423
+ explicit = True
1424
+ bernoulli = True
1425
+ if "explicit" in kwargs:
1426
+ explicit = kwargs["explicit"]
1427
+ if "bernoulli" in kwargs:
1428
+ bernoulli = kwargs["bernoulli"]
1429
+
1430
+ # Construct an encoding from the training data.
1431
+ if encoding is None:
1432
+ # Count cutoff can also be controlled by megam with the -minfc
1433
+ # option. Not sure where the best place for it is.
1434
+ count_cutoff = kwargs.get("count_cutoff", 0)
1435
+ encoding = BinaryMaxentFeatureEncoding.train(
1436
+ train_toks, count_cutoff, labels=labels, alwayson_features=True
1437
+ )
1438
+ elif labels is not None:
1439
+ raise ValueError("Specify encoding or labels, not both")
1440
+
1441
+ # Write a training file for megam.
1442
+ try:
1443
+ fd, trainfile_name = tempfile.mkstemp(prefix="nltk-")
1444
+ with open(trainfile_name, "w") as trainfile:
1445
+ write_megam_file(
1446
+ train_toks, encoding, trainfile, explicit=explicit, bernoulli=bernoulli
1447
+ )
1448
+ os.close(fd)
1449
+ except (OSError, ValueError) as e:
1450
+ raise ValueError("Error while creating megam training file: %s" % e) from e
1451
+
1452
+ # Run megam on the training file.
1453
+ options = []
1454
+ options += ["-nobias", "-repeat", "10"]
1455
+ if explicit:
1456
+ options += ["-explicit"]
1457
+ if not bernoulli:
1458
+ options += ["-fvals"]
1459
+ if gaussian_prior_sigma:
1460
+ # Lambda is just the precision of the Gaussian prior, i.e. it's the
1461
+ # inverse variance, so the parameter conversion is 1.0/sigma**2.
1462
+ # See https://users.umiacs.umd.edu/~hal/docs/daume04cg-bfgs.pdf
1463
+ inv_variance = 1.0 / gaussian_prior_sigma**2
1464
+ else:
1465
+ inv_variance = 0
1466
+ options += ["-lambda", "%.2f" % inv_variance, "-tune"]
1467
+ if trace < 3:
1468
+ options += ["-quiet"]
1469
+ if "max_iter" in kwargs:
1470
+ options += ["-maxi", "%s" % kwargs["max_iter"]]
1471
+ if "ll_delta" in kwargs:
1472
+ # [xx] this is actually a perplexity delta, not a log
1473
+ # likelihood delta
1474
+ options += ["-dpp", "%s" % abs(kwargs["ll_delta"])]
1475
+ if hasattr(encoding, "cost"):
1476
+ options += ["-multilabel"] # each possible la
1477
+ options += ["multiclass", trainfile_name]
1478
+ stdout = call_megam(options)
1479
+ # print('./megam_i686.opt ', ' '.join(options))
1480
+ # Delete the training file
1481
+ try:
1482
+ os.remove(trainfile_name)
1483
+ except OSError as e:
1484
+ print(f"Warning: unable to delete {trainfile_name}: {e}")
1485
+
1486
+ # Parse the generated weight vector.
1487
+ weights = parse_megam_weights(stdout, encoding.length(), explicit)
1488
+
1489
+ # Convert from base-e to base-2 weights.
1490
+ weights *= numpy.log2(numpy.e)
1491
+
1492
+ # Build the classifier
1493
+ return MaxentClassifier(encoding, weights)
1494
+
1495
+
1496
+ ######################################################################
1497
+ # { Classifier Trainer: tadm
1498
+ ######################################################################
1499
+
1500
+
1501
+ class TadmMaxentClassifier(MaxentClassifier):
1502
+ @classmethod
1503
+ def train(cls, train_toks, **kwargs):
1504
+ algorithm = kwargs.get("algorithm", "tao_lmvm")
1505
+ trace = kwargs.get("trace", 3)
1506
+ encoding = kwargs.get("encoding", None)
1507
+ labels = kwargs.get("labels", None)
1508
+ sigma = kwargs.get("gaussian_prior_sigma", 0)
1509
+ count_cutoff = kwargs.get("count_cutoff", 0)
1510
+ max_iter = kwargs.get("max_iter")
1511
+ ll_delta = kwargs.get("min_lldelta")
1512
+
1513
+ # Construct an encoding from the training data.
1514
+ if not encoding:
1515
+ encoding = TadmEventMaxentFeatureEncoding.train(
1516
+ train_toks, count_cutoff, labels=labels
1517
+ )
1518
+
1519
+ trainfile_fd, trainfile_name = tempfile.mkstemp(
1520
+ prefix="nltk-tadm-events-", suffix=".gz"
1521
+ )
1522
+ weightfile_fd, weightfile_name = tempfile.mkstemp(prefix="nltk-tadm-weights-")
1523
+
1524
+ trainfile = gzip_open_unicode(trainfile_name, "w")
1525
+ write_tadm_file(train_toks, encoding, trainfile)
1526
+ trainfile.close()
1527
+
1528
+ options = []
1529
+ options.extend(["-monitor"])
1530
+ options.extend(["-method", algorithm])
1531
+ if sigma:
1532
+ options.extend(["-l2", "%.6f" % sigma**2])
1533
+ if max_iter:
1534
+ options.extend(["-max_it", "%d" % max_iter])
1535
+ if ll_delta:
1536
+ options.extend(["-fatol", "%.6f" % abs(ll_delta)])
1537
+ options.extend(["-events_in", trainfile_name])
1538
+ options.extend(["-params_out", weightfile_name])
1539
+ if trace < 3:
1540
+ options.extend(["2>&1"])
1541
+ else:
1542
+ options.extend(["-summary"])
1543
+
1544
+ call_tadm(options)
1545
+
1546
+ with open(weightfile_name) as weightfile:
1547
+ weights = parse_tadm_weights(weightfile)
1548
+
1549
+ os.remove(trainfile_name)
1550
+ os.remove(weightfile_name)
1551
+
1552
+ # Convert from base-e to base-2 weights.
1553
+ weights *= numpy.log2(numpy.e)
1554
+
1555
+ # Build the classifier
1556
+ return cls(encoding, weights)
1557
+
1558
+
1559
+ ######################################################################
1560
+ # { Demo
1561
+ ######################################################################
1562
+ def demo():
1563
+ from nltk.classify.util import names_demo
1564
+
1565
+ classifier = names_demo(MaxentClassifier.train)
1566
+
1567
+
1568
+ if __name__ == "__main__":
1569
+ demo()
venv/lib/python3.10/site-packages/nltk/classify/megam.py ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Interface to Megam Classifier
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ A set of functions used to interface with the external megam_ maxent
10
+ optimization package. Before megam can be used, you should tell NLTK where it
11
+ can find the megam binary, using the ``config_megam()`` function. Typical
12
+ usage:
13
+
14
+ >>> from nltk.classify import megam
15
+ >>> megam.config_megam() # pass path to megam if not found in PATH # doctest: +SKIP
16
+ [Found megam: ...]
17
+
18
+ Use with MaxentClassifier. Example below, see MaxentClassifier documentation
19
+ for details.
20
+
21
+ nltk.classify.MaxentClassifier.train(corpus, 'megam')
22
+
23
+ .. _megam: https://www.umiacs.umd.edu/~hal/megam/index.html
24
+ """
25
+ import subprocess
26
+
27
+ from nltk.internals import find_binary
28
+
29
+ try:
30
+ import numpy
31
+ except ImportError:
32
+ numpy = None
33
+
34
+ ######################################################################
35
+ # { Configuration
36
+ ######################################################################
37
+
38
+ _megam_bin = None
39
+
40
+
41
+ def config_megam(bin=None):
42
+ """
43
+ Configure NLTK's interface to the ``megam`` maxent optimization
44
+ package.
45
+
46
+ :param bin: The full path to the ``megam`` binary. If not specified,
47
+ then nltk will search the system for a ``megam`` binary; and if
48
+ one is not found, it will raise a ``LookupError`` exception.
49
+ :type bin: str
50
+ """
51
+ global _megam_bin
52
+ _megam_bin = find_binary(
53
+ "megam",
54
+ bin,
55
+ env_vars=["MEGAM"],
56
+ binary_names=["megam.opt", "megam", "megam_686", "megam_i686.opt"],
57
+ url="https://www.umiacs.umd.edu/~hal/megam/index.html",
58
+ )
59
+
60
+
61
+ ######################################################################
62
+ # { Megam Interface Functions
63
+ ######################################################################
64
+
65
+
66
+ def write_megam_file(train_toks, encoding, stream, bernoulli=True, explicit=True):
67
+ """
68
+ Generate an input file for ``megam`` based on the given corpus of
69
+ classified tokens.
70
+
71
+ :type train_toks: list(tuple(dict, str))
72
+ :param train_toks: Training data, represented as a list of
73
+ pairs, the first member of which is a feature dictionary,
74
+ and the second of which is a classification label.
75
+
76
+ :type encoding: MaxentFeatureEncodingI
77
+ :param encoding: A feature encoding, used to convert featuresets
78
+ into feature vectors. May optionally implement a cost() method
79
+ in order to assign different costs to different class predictions.
80
+
81
+ :type stream: stream
82
+ :param stream: The stream to which the megam input file should be
83
+ written.
84
+
85
+ :param bernoulli: If true, then use the 'bernoulli' format. I.e.,
86
+ all joint features have binary values, and are listed iff they
87
+ are true. Otherwise, list feature values explicitly. If
88
+ ``bernoulli=False``, then you must call ``megam`` with the
89
+ ``-fvals`` option.
90
+
91
+ :param explicit: If true, then use the 'explicit' format. I.e.,
92
+ list the features that would fire for any of the possible
93
+ labels, for each token. If ``explicit=True``, then you must
94
+ call ``megam`` with the ``-explicit`` option.
95
+ """
96
+ # Look up the set of labels.
97
+ labels = encoding.labels()
98
+ labelnum = {label: i for (i, label) in enumerate(labels)}
99
+
100
+ # Write the file, which contains one line per instance.
101
+ for featureset, label in train_toks:
102
+ # First, the instance number (or, in the weighted multiclass case, the cost of each label).
103
+ if hasattr(encoding, "cost"):
104
+ stream.write(
105
+ ":".join(str(encoding.cost(featureset, label, l)) for l in labels)
106
+ )
107
+ else:
108
+ stream.write("%d" % labelnum[label])
109
+
110
+ # For implicit file formats, just list the features that fire
111
+ # for this instance's actual label.
112
+ if not explicit:
113
+ _write_megam_features(encoding.encode(featureset, label), stream, bernoulli)
114
+
115
+ # For explicit formats, list the features that would fire for
116
+ # any of the possible labels.
117
+ else:
118
+ for l in labels:
119
+ stream.write(" #")
120
+ _write_megam_features(encoding.encode(featureset, l), stream, bernoulli)
121
+
122
+ # End of the instance.
123
+ stream.write("\n")
124
+
125
+
126
+ def parse_megam_weights(s, features_count, explicit=True):
127
+ """
128
+ Given the stdout output generated by ``megam`` when training a
129
+ model, return a ``numpy`` array containing the corresponding weight
130
+ vector. This function does not currently handle bias features.
131
+ """
132
+ if numpy is None:
133
+ raise ValueError("This function requires that numpy be installed")
134
+ assert explicit, "non-explicit not supported yet"
135
+ lines = s.strip().split("\n")
136
+ weights = numpy.zeros(features_count, "d")
137
+ for line in lines:
138
+ if line.strip():
139
+ fid, weight = line.split()
140
+ weights[int(fid)] = float(weight)
141
+ return weights
142
+
143
+
144
+ def _write_megam_features(vector, stream, bernoulli):
145
+ if not vector:
146
+ raise ValueError(
147
+ "MEGAM classifier requires the use of an " "always-on feature."
148
+ )
149
+ for (fid, fval) in vector:
150
+ if bernoulli:
151
+ if fval == 1:
152
+ stream.write(" %s" % fid)
153
+ elif fval != 0:
154
+ raise ValueError(
155
+ "If bernoulli=True, then all" "features must be binary."
156
+ )
157
+ else:
158
+ stream.write(f" {fid} {fval}")
159
+
160
+
161
+ def call_megam(args):
162
+ """
163
+ Call the ``megam`` binary with the given arguments.
164
+ """
165
+ if isinstance(args, str):
166
+ raise TypeError("args should be a list of strings")
167
+ if _megam_bin is None:
168
+ config_megam()
169
+
170
+ # Call megam via a subprocess
171
+ cmd = [_megam_bin] + args
172
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
173
+ (stdout, stderr) = p.communicate()
174
+
175
+ # Check the return code.
176
+ if p.returncode != 0:
177
+ print()
178
+ print(stderr)
179
+ raise OSError("megam command failed!")
180
+
181
+ if isinstance(stdout, str):
182
+ return stdout
183
+ else:
184
+ return stdout.decode("utf-8")
venv/lib/python3.10/site-packages/nltk/classify/naivebayes.py ADDED
@@ -0,0 +1,260 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Naive Bayes Classifiers
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ A classifier based on the Naive Bayes algorithm. In order to find the
10
+ probability for a label, this algorithm first uses the Bayes rule to
11
+ express P(label|features) in terms of P(label) and P(features|label):
12
+
13
+ | P(label) * P(features|label)
14
+ | P(label|features) = ------------------------------
15
+ | P(features)
16
+
17
+ The algorithm then makes the 'naive' assumption that all features are
18
+ independent, given the label:
19
+
20
+ | P(label) * P(f1|label) * ... * P(fn|label)
21
+ | P(label|features) = --------------------------------------------
22
+ | P(features)
23
+
24
+ Rather than computing P(features) explicitly, the algorithm just
25
+ calculates the numerator for each label, and normalizes them so they
26
+ sum to one:
27
+
28
+ | P(label) * P(f1|label) * ... * P(fn|label)
29
+ | P(label|features) = --------------------------------------------
30
+ | SUM[l]( P(l) * P(f1|l) * ... * P(fn|l) )
31
+ """
32
+
33
+ from collections import defaultdict
34
+
35
+ from nltk.classify.api import ClassifierI
36
+ from nltk.probability import DictionaryProbDist, ELEProbDist, FreqDist, sum_logs
37
+
38
+ ##//////////////////////////////////////////////////////
39
+ ## Naive Bayes Classifier
40
+ ##//////////////////////////////////////////////////////
41
+
42
+
43
+ class NaiveBayesClassifier(ClassifierI):
44
+ """
45
+ A Naive Bayes classifier. Naive Bayes classifiers are
46
+ paramaterized by two probability distributions:
47
+
48
+ - P(label) gives the probability that an input will receive each
49
+ label, given no information about the input's features.
50
+
51
+ - P(fname=fval|label) gives the probability that a given feature
52
+ (fname) will receive a given value (fval), given that the
53
+ label (label).
54
+
55
+ If the classifier encounters an input with a feature that has
56
+ never been seen with any label, then rather than assigning a
57
+ probability of 0 to all labels, it will ignore that feature.
58
+
59
+ The feature value 'None' is reserved for unseen feature values;
60
+ you generally should not use 'None' as a feature value for one of
61
+ your own features.
62
+ """
63
+
64
+ def __init__(self, label_probdist, feature_probdist):
65
+ """
66
+ :param label_probdist: P(label), the probability distribution
67
+ over labels. It is expressed as a ``ProbDistI`` whose
68
+ samples are labels. I.e., P(label) =
69
+ ``label_probdist.prob(label)``.
70
+
71
+ :param feature_probdist: P(fname=fval|label), the probability
72
+ distribution for feature values, given labels. It is
73
+ expressed as a dictionary whose keys are ``(label, fname)``
74
+ pairs and whose values are ``ProbDistI`` objects over feature
75
+ values. I.e., P(fname=fval|label) =
76
+ ``feature_probdist[label,fname].prob(fval)``. If a given
77
+ ``(label,fname)`` is not a key in ``feature_probdist``, then
78
+ it is assumed that the corresponding P(fname=fval|label)
79
+ is 0 for all values of ``fval``.
80
+ """
81
+ self._label_probdist = label_probdist
82
+ self._feature_probdist = feature_probdist
83
+ self._labels = list(label_probdist.samples())
84
+
85
+ def labels(self):
86
+ return self._labels
87
+
88
+ def classify(self, featureset):
89
+ return self.prob_classify(featureset).max()
90
+
91
+ def prob_classify(self, featureset):
92
+ # Discard any feature names that we've never seen before.
93
+ # Otherwise, we'll just assign a probability of 0 to
94
+ # everything.
95
+ featureset = featureset.copy()
96
+ for fname in list(featureset.keys()):
97
+ for label in self._labels:
98
+ if (label, fname) in self._feature_probdist:
99
+ break
100
+ else:
101
+ # print('Ignoring unseen feature %s' % fname)
102
+ del featureset[fname]
103
+
104
+ # Find the log probability of each label, given the features.
105
+ # Start with the log probability of the label itself.
106
+ logprob = {}
107
+ for label in self._labels:
108
+ logprob[label] = self._label_probdist.logprob(label)
109
+
110
+ # Then add in the log probability of features given labels.
111
+ for label in self._labels:
112
+ for (fname, fval) in featureset.items():
113
+ if (label, fname) in self._feature_probdist:
114
+ feature_probs = self._feature_probdist[label, fname]
115
+ logprob[label] += feature_probs.logprob(fval)
116
+ else:
117
+ # nb: This case will never come up if the
118
+ # classifier was created by
119
+ # NaiveBayesClassifier.train().
120
+ logprob[label] += sum_logs([]) # = -INF.
121
+
122
+ return DictionaryProbDist(logprob, normalize=True, log=True)
123
+
124
+ def show_most_informative_features(self, n=10):
125
+ # Determine the most relevant features, and display them.
126
+ cpdist = self._feature_probdist
127
+ print("Most Informative Features")
128
+
129
+ for (fname, fval) in self.most_informative_features(n):
130
+
131
+ def labelprob(l):
132
+ return cpdist[l, fname].prob(fval)
133
+
134
+ labels = sorted(
135
+ (l for l in self._labels if fval in cpdist[l, fname].samples()),
136
+ key=lambda element: (-labelprob(element), element),
137
+ reverse=True,
138
+ )
139
+ if len(labels) == 1:
140
+ continue
141
+ l0 = labels[0]
142
+ l1 = labels[-1]
143
+ if cpdist[l0, fname].prob(fval) == 0:
144
+ ratio = "INF"
145
+ else:
146
+ ratio = "%8.1f" % (
147
+ cpdist[l1, fname].prob(fval) / cpdist[l0, fname].prob(fval)
148
+ )
149
+ print(
150
+ "%24s = %-14r %6s : %-6s = %s : 1.0"
151
+ % (fname, fval, ("%s" % l1)[:6], ("%s" % l0)[:6], ratio)
152
+ )
153
+
154
+ def most_informative_features(self, n=100):
155
+ """
156
+ Return a list of the 'most informative' features used by this
157
+ classifier. For the purpose of this function, the
158
+ informativeness of a feature ``(fname,fval)`` is equal to the
159
+ highest value of P(fname=fval|label), for any label, divided by
160
+ the lowest value of P(fname=fval|label), for any label:
161
+
162
+ | max[ P(fname=fval|label1) / P(fname=fval|label2) ]
163
+ """
164
+ if hasattr(self, "_most_informative_features"):
165
+ return self._most_informative_features[:n]
166
+ else:
167
+ # The set of (fname, fval) pairs used by this classifier.
168
+ features = set()
169
+ # The max & min probability associated w/ each (fname, fval)
170
+ # pair. Maps (fname,fval) -> float.
171
+ maxprob = defaultdict(lambda: 0.0)
172
+ minprob = defaultdict(lambda: 1.0)
173
+
174
+ for (label, fname), probdist in self._feature_probdist.items():
175
+ for fval in probdist.samples():
176
+ feature = (fname, fval)
177
+ features.add(feature)
178
+ p = probdist.prob(fval)
179
+ maxprob[feature] = max(p, maxprob[feature])
180
+ minprob[feature] = min(p, minprob[feature])
181
+ if minprob[feature] == 0:
182
+ features.discard(feature)
183
+
184
+ # Convert features to a list, & sort it by how informative
185
+ # features are.
186
+ self._most_informative_features = sorted(
187
+ features,
188
+ key=lambda feature_: (
189
+ minprob[feature_] / maxprob[feature_],
190
+ feature_[0],
191
+ feature_[1] in [None, False, True],
192
+ str(feature_[1]).lower(),
193
+ ),
194
+ )
195
+ return self._most_informative_features[:n]
196
+
197
+ @classmethod
198
+ def train(cls, labeled_featuresets, estimator=ELEProbDist):
199
+ """
200
+ :param labeled_featuresets: A list of classified featuresets,
201
+ i.e., a list of tuples ``(featureset, label)``.
202
+ """
203
+ label_freqdist = FreqDist()
204
+ feature_freqdist = defaultdict(FreqDist)
205
+ feature_values = defaultdict(set)
206
+ fnames = set()
207
+
208
+ # Count up how many times each feature value occurred, given
209
+ # the label and featurename.
210
+ for featureset, label in labeled_featuresets:
211
+ label_freqdist[label] += 1
212
+ for fname, fval in featureset.items():
213
+ # Increment freq(fval|label, fname)
214
+ feature_freqdist[label, fname][fval] += 1
215
+ # Record that fname can take the value fval.
216
+ feature_values[fname].add(fval)
217
+ # Keep a list of all feature names.
218
+ fnames.add(fname)
219
+
220
+ # If a feature didn't have a value given for an instance, then
221
+ # we assume that it gets the implicit value 'None.' This loop
222
+ # counts up the number of 'missing' feature values for each
223
+ # (label,fname) pair, and increments the count of the fval
224
+ # 'None' by that amount.
225
+ for label in label_freqdist:
226
+ num_samples = label_freqdist[label]
227
+ for fname in fnames:
228
+ count = feature_freqdist[label, fname].N()
229
+ # Only add a None key when necessary, i.e. if there are
230
+ # any samples with feature 'fname' missing.
231
+ if num_samples - count > 0:
232
+ feature_freqdist[label, fname][None] += num_samples - count
233
+ feature_values[fname].add(None)
234
+
235
+ # Create the P(label) distribution
236
+ label_probdist = estimator(label_freqdist)
237
+
238
+ # Create the P(fval|label, fname) distribution
239
+ feature_probdist = {}
240
+ for ((label, fname), freqdist) in feature_freqdist.items():
241
+ probdist = estimator(freqdist, bins=len(feature_values[fname]))
242
+ feature_probdist[label, fname] = probdist
243
+
244
+ return cls(label_probdist, feature_probdist)
245
+
246
+
247
+ ##//////////////////////////////////////////////////////
248
+ ## Demo
249
+ ##//////////////////////////////////////////////////////
250
+
251
+
252
+ def demo():
253
+ from nltk.classify.util import names_demo
254
+
255
+ classifier = names_demo(NaiveBayesClassifier.train)
256
+ classifier.show_most_informative_features()
257
+
258
+
259
+ if __name__ == "__main__":
260
+ demo()
venv/lib/python3.10/site-packages/nltk/classify/positivenaivebayes.py ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Positive Naive Bayes Classifier
2
+ #
3
+ # Copyright (C) 2012 NLTK Project
4
+ # Author: Alessandro Presta <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ A variant of the Naive Bayes Classifier that performs binary classification with
10
+ partially-labeled training sets. In other words, assume we want to build a classifier
11
+ that assigns each example to one of two complementary classes (e.g., male names and
12
+ female names).
13
+ If we have a training set with labeled examples for both classes, we can use a
14
+ standard Naive Bayes Classifier. However, consider the case when we only have labeled
15
+ examples for one of the classes, and other, unlabeled, examples.
16
+ Then, assuming a prior distribution on the two labels, we can use the unlabeled set
17
+ to estimate the frequencies of the various features.
18
+
19
+ Let the two possible labels be 1 and 0, and let's say we only have examples labeled 1
20
+ and unlabeled examples. We are also given an estimate of P(1).
21
+
22
+ We compute P(feature|1) exactly as in the standard case.
23
+
24
+ To compute P(feature|0), we first estimate P(feature) from the unlabeled set (we are
25
+ assuming that the unlabeled examples are drawn according to the given prior distribution)
26
+ and then express the conditional probability as:
27
+
28
+ | P(feature) - P(feature|1) * P(1)
29
+ | P(feature|0) = ----------------------------------
30
+ | P(0)
31
+
32
+ Example:
33
+
34
+ >>> from nltk.classify import PositiveNaiveBayesClassifier
35
+
36
+ Some sentences about sports:
37
+
38
+ >>> sports_sentences = [ 'The team dominated the game',
39
+ ... 'They lost the ball',
40
+ ... 'The game was intense',
41
+ ... 'The goalkeeper catched the ball',
42
+ ... 'The other team controlled the ball' ]
43
+
44
+ Mixed topics, including sports:
45
+
46
+ >>> various_sentences = [ 'The President did not comment',
47
+ ... 'I lost the keys',
48
+ ... 'The team won the game',
49
+ ... 'Sara has two kids',
50
+ ... 'The ball went off the court',
51
+ ... 'They had the ball for the whole game',
52
+ ... 'The show is over' ]
53
+
54
+ The features of a sentence are simply the words it contains:
55
+
56
+ >>> def features(sentence):
57
+ ... words = sentence.lower().split()
58
+ ... return dict(('contains(%s)' % w, True) for w in words)
59
+
60
+ We use the sports sentences as positive examples, the mixed ones ad unlabeled examples:
61
+
62
+ >>> positive_featuresets = map(features, sports_sentences)
63
+ >>> unlabeled_featuresets = map(features, various_sentences)
64
+ >>> classifier = PositiveNaiveBayesClassifier.train(positive_featuresets,
65
+ ... unlabeled_featuresets)
66
+
67
+ Is the following sentence about sports?
68
+
69
+ >>> classifier.classify(features('The cat is on the table'))
70
+ False
71
+
72
+ What about this one?
73
+
74
+ >>> classifier.classify(features('My team lost the game'))
75
+ True
76
+ """
77
+
78
+ from collections import defaultdict
79
+
80
+ from nltk.classify.naivebayes import NaiveBayesClassifier
81
+ from nltk.probability import DictionaryProbDist, ELEProbDist, FreqDist
82
+
83
+ ##//////////////////////////////////////////////////////
84
+ ## Positive Naive Bayes Classifier
85
+ ##//////////////////////////////////////////////////////
86
+
87
+
88
+ class PositiveNaiveBayesClassifier(NaiveBayesClassifier):
89
+ @staticmethod
90
+ def train(
91
+ positive_featuresets,
92
+ unlabeled_featuresets,
93
+ positive_prob_prior=0.5,
94
+ estimator=ELEProbDist,
95
+ ):
96
+ """
97
+ :param positive_featuresets: An iterable of featuresets that are known as positive
98
+ examples (i.e., their label is ``True``).
99
+
100
+ :param unlabeled_featuresets: An iterable of featuresets whose label is unknown.
101
+
102
+ :param positive_prob_prior: A prior estimate of the probability of the label
103
+ ``True`` (default 0.5).
104
+ """
105
+ positive_feature_freqdist = defaultdict(FreqDist)
106
+ unlabeled_feature_freqdist = defaultdict(FreqDist)
107
+ feature_values = defaultdict(set)
108
+ fnames = set()
109
+
110
+ # Count up how many times each feature value occurred in positive examples.
111
+ num_positive_examples = 0
112
+ for featureset in positive_featuresets:
113
+ for fname, fval in featureset.items():
114
+ positive_feature_freqdist[fname][fval] += 1
115
+ feature_values[fname].add(fval)
116
+ fnames.add(fname)
117
+ num_positive_examples += 1
118
+
119
+ # Count up how many times each feature value occurred in unlabeled examples.
120
+ num_unlabeled_examples = 0
121
+ for featureset in unlabeled_featuresets:
122
+ for fname, fval in featureset.items():
123
+ unlabeled_feature_freqdist[fname][fval] += 1
124
+ feature_values[fname].add(fval)
125
+ fnames.add(fname)
126
+ num_unlabeled_examples += 1
127
+
128
+ # If a feature didn't have a value given for an instance, then we assume that
129
+ # it gets the implicit value 'None'.
130
+ for fname in fnames:
131
+ count = positive_feature_freqdist[fname].N()
132
+ positive_feature_freqdist[fname][None] += num_positive_examples - count
133
+ feature_values[fname].add(None)
134
+
135
+ for fname in fnames:
136
+ count = unlabeled_feature_freqdist[fname].N()
137
+ unlabeled_feature_freqdist[fname][None] += num_unlabeled_examples - count
138
+ feature_values[fname].add(None)
139
+
140
+ negative_prob_prior = 1.0 - positive_prob_prior
141
+
142
+ # Create the P(label) distribution.
143
+ label_probdist = DictionaryProbDist(
144
+ {True: positive_prob_prior, False: negative_prob_prior}
145
+ )
146
+
147
+ # Create the P(fval|label, fname) distribution.
148
+ feature_probdist = {}
149
+ for fname, freqdist in positive_feature_freqdist.items():
150
+ probdist = estimator(freqdist, bins=len(feature_values[fname]))
151
+ feature_probdist[True, fname] = probdist
152
+
153
+ for fname, freqdist in unlabeled_feature_freqdist.items():
154
+ global_probdist = estimator(freqdist, bins=len(feature_values[fname]))
155
+ negative_feature_probs = {}
156
+ for fval in feature_values[fname]:
157
+ prob = (
158
+ global_probdist.prob(fval)
159
+ - positive_prob_prior * feature_probdist[True, fname].prob(fval)
160
+ ) / negative_prob_prior
161
+ # TODO: We need to add some kind of smoothing here, instead of
162
+ # setting negative probabilities to zero and normalizing.
163
+ negative_feature_probs[fval] = max(prob, 0.0)
164
+ feature_probdist[False, fname] = DictionaryProbDist(
165
+ negative_feature_probs, normalize=True
166
+ )
167
+
168
+ return PositiveNaiveBayesClassifier(label_probdist, feature_probdist)
169
+
170
+
171
+ ##//////////////////////////////////////////////////////
172
+ ## Demo
173
+ ##//////////////////////////////////////////////////////
174
+
175
+
176
+ def demo():
177
+ from nltk.classify.util import partial_names_demo
178
+
179
+ classifier = partial_names_demo(PositiveNaiveBayesClassifier.train)
180
+ classifier.show_most_informative_features()
venv/lib/python3.10/site-packages/nltk/classify/rte_classify.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: RTE Classifier
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Ewan Klein <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ Simple classifier for RTE corpus.
10
+
11
+ It calculates the overlap in words and named entities between text and
12
+ hypothesis, and also whether there are words / named entities in the
13
+ hypothesis which fail to occur in the text, since this is an indicator that
14
+ the hypothesis is more informative than (i.e not entailed by) the text.
15
+
16
+ TO DO: better Named Entity classification
17
+ TO DO: add lemmatization
18
+ """
19
+
20
+ from nltk.classify.maxent import MaxentClassifier
21
+ from nltk.classify.util import accuracy
22
+ from nltk.tokenize import RegexpTokenizer
23
+
24
+
25
+ class RTEFeatureExtractor:
26
+ """
27
+ This builds a bag of words for both the text and the hypothesis after
28
+ throwing away some stopwords, then calculates overlap and difference.
29
+ """
30
+
31
+ def __init__(self, rtepair, stop=True, use_lemmatize=False):
32
+ """
33
+ :param rtepair: a ``RTEPair`` from which features should be extracted
34
+ :param stop: if ``True``, stopwords are thrown away.
35
+ :type stop: bool
36
+ """
37
+ self.stop = stop
38
+ self.stopwords = {
39
+ "a",
40
+ "the",
41
+ "it",
42
+ "they",
43
+ "of",
44
+ "in",
45
+ "to",
46
+ "is",
47
+ "have",
48
+ "are",
49
+ "were",
50
+ "and",
51
+ "very",
52
+ ".",
53
+ ",",
54
+ }
55
+
56
+ self.negwords = {"no", "not", "never", "failed", "rejected", "denied"}
57
+ # Try to tokenize so that abbreviations, monetary amounts, email
58
+ # addresses, URLs are single tokens.
59
+ tokenizer = RegexpTokenizer(r"[\w.@:/]+|\w+|\$[\d.]+")
60
+
61
+ # Get the set of word types for text and hypothesis
62
+ self.text_tokens = tokenizer.tokenize(rtepair.text)
63
+ self.hyp_tokens = tokenizer.tokenize(rtepair.hyp)
64
+ self.text_words = set(self.text_tokens)
65
+ self.hyp_words = set(self.hyp_tokens)
66
+
67
+ if use_lemmatize:
68
+ self.text_words = {self._lemmatize(token) for token in self.text_tokens}
69
+ self.hyp_words = {self._lemmatize(token) for token in self.hyp_tokens}
70
+
71
+ if self.stop:
72
+ self.text_words = self.text_words - self.stopwords
73
+ self.hyp_words = self.hyp_words - self.stopwords
74
+
75
+ self._overlap = self.hyp_words & self.text_words
76
+ self._hyp_extra = self.hyp_words - self.text_words
77
+ self._txt_extra = self.text_words - self.hyp_words
78
+
79
+ def overlap(self, toktype, debug=False):
80
+ """
81
+ Compute the overlap between text and hypothesis.
82
+
83
+ :param toktype: distinguish Named Entities from ordinary words
84
+ :type toktype: 'ne' or 'word'
85
+ """
86
+ ne_overlap = {token for token in self._overlap if self._ne(token)}
87
+ if toktype == "ne":
88
+ if debug:
89
+ print("ne overlap", ne_overlap)
90
+ return ne_overlap
91
+ elif toktype == "word":
92
+ if debug:
93
+ print("word overlap", self._overlap - ne_overlap)
94
+ return self._overlap - ne_overlap
95
+ else:
96
+ raise ValueError("Type not recognized:'%s'" % toktype)
97
+
98
+ def hyp_extra(self, toktype, debug=True):
99
+ """
100
+ Compute the extraneous material in the hypothesis.
101
+
102
+ :param toktype: distinguish Named Entities from ordinary words
103
+ :type toktype: 'ne' or 'word'
104
+ """
105
+ ne_extra = {token for token in self._hyp_extra if self._ne(token)}
106
+ if toktype == "ne":
107
+ return ne_extra
108
+ elif toktype == "word":
109
+ return self._hyp_extra - ne_extra
110
+ else:
111
+ raise ValueError("Type not recognized: '%s'" % toktype)
112
+
113
+ @staticmethod
114
+ def _ne(token):
115
+ """
116
+ This just assumes that words in all caps or titles are
117
+ named entities.
118
+
119
+ :type token: str
120
+ """
121
+ if token.istitle() or token.isupper():
122
+ return True
123
+ return False
124
+
125
+ @staticmethod
126
+ def _lemmatize(word):
127
+ """
128
+ Use morphy from WordNet to find the base form of verbs.
129
+ """
130
+ from nltk.corpus import wordnet as wn
131
+
132
+ lemma = wn.morphy(word, pos=wn.VERB)
133
+ if lemma is not None:
134
+ return lemma
135
+ return word
136
+
137
+
138
+ def rte_features(rtepair):
139
+ extractor = RTEFeatureExtractor(rtepair)
140
+ features = {}
141
+ features["alwayson"] = True
142
+ features["word_overlap"] = len(extractor.overlap("word"))
143
+ features["word_hyp_extra"] = len(extractor.hyp_extra("word"))
144
+ features["ne_overlap"] = len(extractor.overlap("ne"))
145
+ features["ne_hyp_extra"] = len(extractor.hyp_extra("ne"))
146
+ features["neg_txt"] = len(extractor.negwords & extractor.text_words)
147
+ features["neg_hyp"] = len(extractor.negwords & extractor.hyp_words)
148
+ return features
149
+
150
+
151
+ def rte_featurize(rte_pairs):
152
+ return [(rte_features(pair), pair.value) for pair in rte_pairs]
153
+
154
+
155
+ def rte_classifier(algorithm, sample_N=None):
156
+ from nltk.corpus import rte as rte_corpus
157
+
158
+ train_set = rte_corpus.pairs(["rte1_dev.xml", "rte2_dev.xml", "rte3_dev.xml"])
159
+ test_set = rte_corpus.pairs(["rte1_test.xml", "rte2_test.xml", "rte3_test.xml"])
160
+
161
+ if sample_N is not None:
162
+ train_set = train_set[:sample_N]
163
+ test_set = test_set[:sample_N]
164
+
165
+ featurized_train_set = rte_featurize(train_set)
166
+ featurized_test_set = rte_featurize(test_set)
167
+
168
+ # Train the classifier
169
+ print("Training classifier...")
170
+ if algorithm in ["megam"]: # MEGAM based algorithms.
171
+ clf = MaxentClassifier.train(featurized_train_set, algorithm)
172
+ elif algorithm in ["GIS", "IIS"]: # Use default GIS/IIS MaxEnt algorithm
173
+ clf = MaxentClassifier.train(featurized_train_set, algorithm)
174
+ else:
175
+ err_msg = str(
176
+ "RTEClassifier only supports these algorithms:\n "
177
+ "'megam', 'GIS', 'IIS'.\n"
178
+ )
179
+ raise Exception(err_msg)
180
+ print("Testing classifier...")
181
+ acc = accuracy(clf, featurized_test_set)
182
+ print("Accuracy: %6.4f" % acc)
183
+ return clf
venv/lib/python3.10/site-packages/nltk/classify/scikitlearn.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Interface to scikit-learn classifiers
2
+ #
3
+ # Author: Lars Buitinck <[email protected]>
4
+ # URL: <https://www.nltk.org/>
5
+ # For license information, see LICENSE.TXT
6
+ """
7
+ scikit-learn (https://scikit-learn.org) is a machine learning library for
8
+ Python. It supports many classification algorithms, including SVMs,
9
+ Naive Bayes, logistic regression (MaxEnt) and decision trees.
10
+
11
+ This package implements a wrapper around scikit-learn classifiers. To use this
12
+ wrapper, construct a scikit-learn estimator object, then use that to construct
13
+ a SklearnClassifier. E.g., to wrap a linear SVM with default settings:
14
+
15
+ >>> from sklearn.svm import LinearSVC
16
+ >>> from nltk.classify.scikitlearn import SklearnClassifier
17
+ >>> classif = SklearnClassifier(LinearSVC())
18
+
19
+ A scikit-learn classifier may include preprocessing steps when it's wrapped
20
+ in a Pipeline object. The following constructs and wraps a Naive Bayes text
21
+ classifier with tf-idf weighting and chi-square feature selection to get the
22
+ best 1000 features:
23
+
24
+ >>> from sklearn.feature_extraction.text import TfidfTransformer
25
+ >>> from sklearn.feature_selection import SelectKBest, chi2
26
+ >>> from sklearn.naive_bayes import MultinomialNB
27
+ >>> from sklearn.pipeline import Pipeline
28
+ >>> pipeline = Pipeline([('tfidf', TfidfTransformer()),
29
+ ... ('chi2', SelectKBest(chi2, k=1000)),
30
+ ... ('nb', MultinomialNB())])
31
+ >>> classif = SklearnClassifier(pipeline)
32
+ """
33
+
34
+ from nltk.classify.api import ClassifierI
35
+ from nltk.probability import DictionaryProbDist
36
+
37
+ try:
38
+ from sklearn.feature_extraction import DictVectorizer
39
+ from sklearn.preprocessing import LabelEncoder
40
+ except ImportError:
41
+ pass
42
+
43
+ __all__ = ["SklearnClassifier"]
44
+
45
+
46
+ class SklearnClassifier(ClassifierI):
47
+ """Wrapper for scikit-learn classifiers."""
48
+
49
+ def __init__(self, estimator, dtype=float, sparse=True):
50
+ """
51
+ :param estimator: scikit-learn classifier object.
52
+
53
+ :param dtype: data type used when building feature array.
54
+ scikit-learn estimators work exclusively on numeric data. The
55
+ default value should be fine for almost all situations.
56
+
57
+ :param sparse: Whether to use sparse matrices internally.
58
+ The estimator must support these; not all scikit-learn classifiers
59
+ do (see their respective documentation and look for "sparse
60
+ matrix"). The default value is True, since most NLP problems
61
+ involve sparse feature sets. Setting this to False may take a
62
+ great amount of memory.
63
+ :type sparse: boolean.
64
+ """
65
+ self._clf = estimator
66
+ self._encoder = LabelEncoder()
67
+ self._vectorizer = DictVectorizer(dtype=dtype, sparse=sparse)
68
+
69
+ def __repr__(self):
70
+ return "<SklearnClassifier(%r)>" % self._clf
71
+
72
+ def classify_many(self, featuresets):
73
+ """Classify a batch of samples.
74
+
75
+ :param featuresets: An iterable over featuresets, each a dict mapping
76
+ strings to either numbers, booleans or strings.
77
+ :return: The predicted class label for each input sample.
78
+ :rtype: list
79
+ """
80
+ X = self._vectorizer.transform(featuresets)
81
+ classes = self._encoder.classes_
82
+ return [classes[i] for i in self._clf.predict(X)]
83
+
84
+ def prob_classify_many(self, featuresets):
85
+ """Compute per-class probabilities for a batch of samples.
86
+
87
+ :param featuresets: An iterable over featuresets, each a dict mapping
88
+ strings to either numbers, booleans or strings.
89
+ :rtype: list of ``ProbDistI``
90
+ """
91
+ X = self._vectorizer.transform(featuresets)
92
+ y_proba_list = self._clf.predict_proba(X)
93
+ return [self._make_probdist(y_proba) for y_proba in y_proba_list]
94
+
95
+ def labels(self):
96
+ """The class labels used by this classifier.
97
+
98
+ :rtype: list
99
+ """
100
+ return list(self._encoder.classes_)
101
+
102
+ def train(self, labeled_featuresets):
103
+ """
104
+ Train (fit) the scikit-learn estimator.
105
+
106
+ :param labeled_featuresets: A list of ``(featureset, label)``
107
+ where each ``featureset`` is a dict mapping strings to either
108
+ numbers, booleans or strings.
109
+ """
110
+
111
+ X, y = list(zip(*labeled_featuresets))
112
+ X = self._vectorizer.fit_transform(X)
113
+ y = self._encoder.fit_transform(y)
114
+ self._clf.fit(X, y)
115
+
116
+ return self
117
+
118
+ def _make_probdist(self, y_proba):
119
+ classes = self._encoder.classes_
120
+ return DictionaryProbDist({classes[i]: p for i, p in enumerate(y_proba)})
121
+
122
+
123
+ if __name__ == "__main__":
124
+ from sklearn.linear_model import LogisticRegression
125
+ from sklearn.naive_bayes import BernoulliNB
126
+
127
+ from nltk.classify.util import names_demo, names_demo_features
128
+
129
+ # Bernoulli Naive Bayes is designed for binary classification. We set the
130
+ # binarize option to False since we know we're passing boolean features.
131
+ print("scikit-learn Naive Bayes:")
132
+ names_demo(
133
+ SklearnClassifier(BernoulliNB(binarize=False)).train,
134
+ features=names_demo_features,
135
+ )
136
+
137
+ # The C parameter on logistic regression (MaxEnt) controls regularization.
138
+ # The higher it's set, the less regularized the classifier is.
139
+ print("\n\nscikit-learn logistic regression:")
140
+ names_demo(
141
+ SklearnClassifier(LogisticRegression(C=1000)).train,
142
+ features=names_demo_features,
143
+ )
venv/lib/python3.10/site-packages/nltk/classify/senna.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Senna Interface
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Rami Al-Rfou' <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ A general interface to the SENNA pipeline that supports any of the
10
+ operations specified in SUPPORTED_OPERATIONS.
11
+
12
+ Applying multiple operations at once has the speed advantage. For example,
13
+ Senna will automatically determine POS tags if you are extracting named
14
+ entities. Applying both of the operations will cost only the time of
15
+ extracting the named entities.
16
+
17
+ The SENNA pipeline has a fixed maximum size of the sentences that it can read.
18
+ By default it is 1024 token/sentence. If you have larger sentences, changing
19
+ the MAX_SENTENCE_SIZE value in SENNA_main.c should be considered and your
20
+ system specific binary should be rebuilt. Otherwise this could introduce
21
+ misalignment errors.
22
+
23
+ The input is:
24
+
25
+ - path to the directory that contains SENNA executables. If the path is incorrect,
26
+ Senna will automatically search for executable file specified in SENNA environment variable
27
+ - List of the operations needed to be performed.
28
+ - (optionally) the encoding of the input data (default:utf-8)
29
+
30
+ Note: Unit tests for this module can be found in test/unit/test_senna.py
31
+
32
+ >>> from nltk.classify import Senna
33
+ >>> pipeline = Senna('/usr/share/senna-v3.0', ['pos', 'chk', 'ner']) # doctest: +SKIP
34
+ >>> sent = 'Dusseldorf is an international business center'.split()
35
+ >>> [(token['word'], token['chk'], token['ner'], token['pos']) for token in pipeline.tag(sent)] # doctest: +SKIP
36
+ [('Dusseldorf', 'B-NP', 'B-LOC', 'NNP'), ('is', 'B-VP', 'O', 'VBZ'), ('an', 'B-NP', 'O', 'DT'),
37
+ ('international', 'I-NP', 'O', 'JJ'), ('business', 'I-NP', 'O', 'NN'), ('center', 'I-NP', 'O', 'NN')]
38
+ """
39
+
40
+ from os import environ, path, sep
41
+ from platform import architecture, system
42
+ from subprocess import PIPE, Popen
43
+
44
+ from nltk.tag.api import TaggerI
45
+
46
+
47
+ class Senna(TaggerI):
48
+
49
+ SUPPORTED_OPERATIONS = ["pos", "chk", "ner"]
50
+
51
+ def __init__(self, senna_path, operations, encoding="utf-8"):
52
+ self._encoding = encoding
53
+ self._path = path.normpath(senna_path) + sep
54
+
55
+ # Verifies the existence of the executable on the self._path first
56
+ # senna_binary_file_1 = self.executable(self._path)
57
+ exe_file_1 = self.executable(self._path)
58
+ if not path.isfile(exe_file_1):
59
+ # Check for the system environment
60
+ if "SENNA" in environ:
61
+ # self._path = path.join(environ['SENNA'],'')
62
+ self._path = path.normpath(environ["SENNA"]) + sep
63
+ exe_file_2 = self.executable(self._path)
64
+ if not path.isfile(exe_file_2):
65
+ raise LookupError(
66
+ "Senna executable expected at %s or %s but not found"
67
+ % (exe_file_1, exe_file_2)
68
+ )
69
+
70
+ self.operations = operations
71
+
72
+ def executable(self, base_path):
73
+ """
74
+ The function that determines the system specific binary that should be
75
+ used in the pipeline. In case, the system is not known the default senna binary will
76
+ be used.
77
+ """
78
+ os_name = system()
79
+ if os_name == "Linux":
80
+ bits = architecture()[0]
81
+ if bits == "64bit":
82
+ return path.join(base_path, "senna-linux64")
83
+ return path.join(base_path, "senna-linux32")
84
+ if os_name == "Windows":
85
+ return path.join(base_path, "senna-win32.exe")
86
+ if os_name == "Darwin":
87
+ return path.join(base_path, "senna-osx")
88
+ return path.join(base_path, "senna")
89
+
90
+ def _map(self):
91
+ """
92
+ A method that calculates the order of the columns that SENNA pipeline
93
+ will output the tags into. This depends on the operations being ordered.
94
+ """
95
+ _map = {}
96
+ i = 1
97
+ for operation in Senna.SUPPORTED_OPERATIONS:
98
+ if operation in self.operations:
99
+ _map[operation] = i
100
+ i += 1
101
+ return _map
102
+
103
+ def tag(self, tokens):
104
+ """
105
+ Applies the specified operation(s) on a list of tokens.
106
+ """
107
+ return self.tag_sents([tokens])[0]
108
+
109
+ def tag_sents(self, sentences):
110
+ """
111
+ Applies the tag method over a list of sentences. This method will return a
112
+ list of dictionaries. Every dictionary will contain a word with its
113
+ calculated annotations/tags.
114
+ """
115
+ encoding = self._encoding
116
+
117
+ if not path.isfile(self.executable(self._path)):
118
+ raise LookupError(
119
+ "Senna executable expected at %s but not found"
120
+ % self.executable(self._path)
121
+ )
122
+
123
+ # Build the senna command to run the tagger
124
+ _senna_cmd = [
125
+ self.executable(self._path),
126
+ "-path",
127
+ self._path,
128
+ "-usrtokens",
129
+ "-iobtags",
130
+ ]
131
+ _senna_cmd.extend(["-" + op for op in self.operations])
132
+
133
+ # Serialize the actual sentences to a temporary string
134
+ _input = "\n".join(" ".join(x) for x in sentences) + "\n"
135
+ if isinstance(_input, str) and encoding:
136
+ _input = _input.encode(encoding)
137
+
138
+ # Run the tagger and get the output
139
+ p = Popen(_senna_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
140
+ (stdout, stderr) = p.communicate(input=_input)
141
+ senna_output = stdout
142
+
143
+ # Check the return code.
144
+ if p.returncode != 0:
145
+ raise RuntimeError("Senna command failed! Details: %s" % stderr)
146
+
147
+ if encoding:
148
+ senna_output = stdout.decode(encoding)
149
+
150
+ # Output the tagged sentences
151
+ map_ = self._map()
152
+ tagged_sentences = [[]]
153
+ sentence_index = 0
154
+ token_index = 0
155
+ for tagged_word in senna_output.strip().split("\n"):
156
+ if not tagged_word:
157
+ tagged_sentences.append([])
158
+ sentence_index += 1
159
+ token_index = 0
160
+ continue
161
+ tags = tagged_word.split("\t")
162
+ result = {}
163
+ for tag in map_:
164
+ result[tag] = tags[map_[tag]].strip()
165
+ try:
166
+ result["word"] = sentences[sentence_index][token_index]
167
+ except IndexError as e:
168
+ raise IndexError(
169
+ "Misalignment error occurred at sentence number %d. Possible reason"
170
+ " is that the sentence size exceeded the maximum size. Check the "
171
+ "documentation of Senna class for more information."
172
+ % sentence_index
173
+ ) from e
174
+ tagged_sentences[-1].append(result)
175
+ token_index += 1
176
+ return tagged_sentences
venv/lib/python3.10/site-packages/nltk/classify/svm.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: SVM-based classifier
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Leon Derczynski <[email protected]>
5
+ #
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+ """
9
+ nltk.classify.svm was deprecated. For classification based
10
+ on support vector machines SVMs use nltk.classify.scikitlearn
11
+ (or `scikit-learn <https://scikit-learn.org>`_ directly).
12
+ """
13
+
14
+
15
+ class SvmClassifier:
16
+ def __init__(self, *args, **kwargs):
17
+ raise NotImplementedError(__doc__)
venv/lib/python3.10/site-packages/nltk/classify/tadm.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Interface to TADM Classifier
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Joseph Frazee <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ import subprocess
9
+ import sys
10
+
11
+ from nltk.internals import find_binary
12
+
13
+ try:
14
+ import numpy
15
+ except ImportError:
16
+ pass
17
+
18
+ _tadm_bin = None
19
+
20
+
21
+ def config_tadm(bin=None):
22
+ global _tadm_bin
23
+ _tadm_bin = find_binary(
24
+ "tadm", bin, env_vars=["TADM"], binary_names=["tadm"], url="http://tadm.sf.net"
25
+ )
26
+
27
+
28
+ def write_tadm_file(train_toks, encoding, stream):
29
+ """
30
+ Generate an input file for ``tadm`` based on the given corpus of
31
+ classified tokens.
32
+
33
+ :type train_toks: list(tuple(dict, str))
34
+ :param train_toks: Training data, represented as a list of
35
+ pairs, the first member of which is a feature dictionary,
36
+ and the second of which is a classification label.
37
+ :type encoding: TadmEventMaxentFeatureEncoding
38
+ :param encoding: A feature encoding, used to convert featuresets
39
+ into feature vectors.
40
+ :type stream: stream
41
+ :param stream: The stream to which the ``tadm`` input file should be
42
+ written.
43
+ """
44
+ # See the following for a file format description:
45
+ #
46
+ # https://sf.net/forum/forum.php?thread_id=1391502&forum_id=473054
47
+ # https://sf.net/forum/forum.php?thread_id=1675097&forum_id=473054
48
+ labels = encoding.labels()
49
+ for featureset, label in train_toks:
50
+ length_line = "%d\n" % len(labels)
51
+ stream.write(length_line)
52
+ for known_label in labels:
53
+ v = encoding.encode(featureset, known_label)
54
+ line = "%d %d %s\n" % (
55
+ int(label == known_label),
56
+ len(v),
57
+ " ".join("%d %d" % u for u in v),
58
+ )
59
+ stream.write(line)
60
+
61
+
62
+ def parse_tadm_weights(paramfile):
63
+ """
64
+ Given the stdout output generated by ``tadm`` when training a
65
+ model, return a ``numpy`` array containing the corresponding weight
66
+ vector.
67
+ """
68
+ weights = []
69
+ for line in paramfile:
70
+ weights.append(float(line.strip()))
71
+ return numpy.array(weights, "d")
72
+
73
+
74
+ def call_tadm(args):
75
+ """
76
+ Call the ``tadm`` binary with the given arguments.
77
+ """
78
+ if isinstance(args, str):
79
+ raise TypeError("args should be a list of strings")
80
+ if _tadm_bin is None:
81
+ config_tadm()
82
+
83
+ # Call tadm via a subprocess
84
+ cmd = [_tadm_bin] + args
85
+ p = subprocess.Popen(cmd, stdout=sys.stdout)
86
+ (stdout, stderr) = p.communicate()
87
+
88
+ # Check the return code.
89
+ if p.returncode != 0:
90
+ print()
91
+ print(stderr)
92
+ raise OSError("tadm command failed!")
93
+
94
+
95
+ def names_demo():
96
+ from nltk.classify.maxent import TadmMaxentClassifier
97
+ from nltk.classify.util import names_demo
98
+
99
+ classifier = names_demo(TadmMaxentClassifier.train)
100
+
101
+
102
+ def encoding_demo():
103
+ import sys
104
+
105
+ from nltk.classify.maxent import TadmEventMaxentFeatureEncoding
106
+
107
+ tokens = [
108
+ ({"f0": 1, "f1": 1, "f3": 1}, "A"),
109
+ ({"f0": 1, "f2": 1, "f4": 1}, "B"),
110
+ ({"f0": 2, "f2": 1, "f3": 1, "f4": 1}, "A"),
111
+ ]
112
+ encoding = TadmEventMaxentFeatureEncoding.train(tokens)
113
+ write_tadm_file(tokens, encoding, sys.stdout)
114
+ print()
115
+ for i in range(encoding.length()):
116
+ print("%s --> %d" % (encoding.describe(i), i))
117
+ print()
118
+
119
+
120
+ if __name__ == "__main__":
121
+ encoding_demo()
122
+ names_demo()
venv/lib/python3.10/site-packages/nltk/classify/textcat.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Language ID module using TextCat algorithm
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Avital Pekker <[email protected]>
5
+ #
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ A module for language identification using the TextCat algorithm.
11
+ An implementation of the text categorization algorithm
12
+ presented in Cavnar, W. B. and J. M. Trenkle,
13
+ "N-Gram-Based Text Categorization".
14
+
15
+ The algorithm takes advantage of Zipf's law and uses
16
+ n-gram frequencies to profile languages and text-yet to
17
+ be identified-then compares using a distance measure.
18
+
19
+ Language n-grams are provided by the "An Crubadan"
20
+ project. A corpus reader was created separately to read
21
+ those files.
22
+
23
+ For details regarding the algorithm, see:
24
+ https://www.let.rug.nl/~vannoord/TextCat/textcat.pdf
25
+
26
+ For details about An Crubadan, see:
27
+ https://borel.slu.edu/crubadan/index.html
28
+ """
29
+
30
+ from sys import maxsize
31
+
32
+ from nltk.util import trigrams
33
+
34
+ # Note: this is NOT "re" you're likely used to. The regex module
35
+ # is an alternative to the standard re module that supports
36
+ # Unicode codepoint properties with the \p{} syntax.
37
+ # You may have to "pip install regx"
38
+ try:
39
+ import regex as re
40
+ except ImportError:
41
+ re = None
42
+ ######################################################################
43
+ ## Language identification using TextCat
44
+ ######################################################################
45
+
46
+
47
+ class TextCat:
48
+
49
+ _corpus = None
50
+ fingerprints = {}
51
+ _START_CHAR = "<"
52
+ _END_CHAR = ">"
53
+
54
+ last_distances = {}
55
+
56
+ def __init__(self):
57
+ if not re:
58
+ raise OSError(
59
+ "classify.textcat requires the regex module that "
60
+ "supports unicode. Try '$ pip install regex' and "
61
+ "see https://pypi.python.org/pypi/regex for "
62
+ "further details."
63
+ )
64
+
65
+ from nltk.corpus import crubadan
66
+
67
+ self._corpus = crubadan
68
+ # Load all language ngrams into cache
69
+ for lang in self._corpus.langs():
70
+ self._corpus.lang_freq(lang)
71
+
72
+ def remove_punctuation(self, text):
73
+ """Get rid of punctuation except apostrophes"""
74
+ return re.sub(r"[^\P{P}\']+", "", text)
75
+
76
+ def profile(self, text):
77
+ """Create FreqDist of trigrams within text"""
78
+ from nltk import FreqDist, word_tokenize
79
+
80
+ clean_text = self.remove_punctuation(text)
81
+ tokens = word_tokenize(clean_text)
82
+
83
+ fingerprint = FreqDist()
84
+ for t in tokens:
85
+ token_trigram_tuples = trigrams(self._START_CHAR + t + self._END_CHAR)
86
+ token_trigrams = ["".join(tri) for tri in token_trigram_tuples]
87
+
88
+ for cur_trigram in token_trigrams:
89
+ if cur_trigram in fingerprint:
90
+ fingerprint[cur_trigram] += 1
91
+ else:
92
+ fingerprint[cur_trigram] = 1
93
+
94
+ return fingerprint
95
+
96
+ def calc_dist(self, lang, trigram, text_profile):
97
+ """Calculate the "out-of-place" measure between the
98
+ text and language profile for a single trigram"""
99
+
100
+ lang_fd = self._corpus.lang_freq(lang)
101
+ dist = 0
102
+
103
+ if trigram in lang_fd:
104
+ idx_lang_profile = list(lang_fd.keys()).index(trigram)
105
+ idx_text = list(text_profile.keys()).index(trigram)
106
+
107
+ # print(idx_lang_profile, ", ", idx_text)
108
+ dist = abs(idx_lang_profile - idx_text)
109
+ else:
110
+ # Arbitrary but should be larger than
111
+ # any possible trigram file length
112
+ # in terms of total lines
113
+ dist = maxsize
114
+
115
+ return dist
116
+
117
+ def lang_dists(self, text):
118
+ """Calculate the "out-of-place" measure between
119
+ the text and all languages"""
120
+
121
+ distances = {}
122
+ profile = self.profile(text)
123
+ # For all the languages
124
+ for lang in self._corpus._all_lang_freq.keys():
125
+ # Calculate distance metric for every trigram in
126
+ # input text to be identified
127
+ lang_dist = 0
128
+ for trigram in profile:
129
+ lang_dist += self.calc_dist(lang, trigram, profile)
130
+
131
+ distances[lang] = lang_dist
132
+
133
+ return distances
134
+
135
+ def guess_language(self, text):
136
+ """Find the language with the min distance
137
+ to the text and return its ISO 639-3 code"""
138
+ self.last_distances = self.lang_dists(text)
139
+
140
+ return min(self.last_distances, key=self.last_distances.get)
141
+ #################################################')
142
+
143
+
144
+ def demo():
145
+ from nltk.corpus import udhr
146
+
147
+ langs = [
148
+ "Kurdish-UTF8",
149
+ "Abkhaz-UTF8",
150
+ "Farsi_Persian-UTF8",
151
+ "Hindi-UTF8",
152
+ "Hawaiian-UTF8",
153
+ "Russian-UTF8",
154
+ "Vietnamese-UTF8",
155
+ "Serbian_Srpski-UTF8",
156
+ "Esperanto-UTF8",
157
+ ]
158
+
159
+ friendly = {
160
+ "kmr": "Northern Kurdish",
161
+ "abk": "Abkhazian",
162
+ "pes": "Iranian Persian",
163
+ "hin": "Hindi",
164
+ "haw": "Hawaiian",
165
+ "rus": "Russian",
166
+ "vie": "Vietnamese",
167
+ "srp": "Serbian",
168
+ "epo": "Esperanto",
169
+ }
170
+
171
+ tc = TextCat()
172
+
173
+ for cur_lang in langs:
174
+ # Get raw data from UDHR corpus
175
+ raw_sentences = udhr.sents(cur_lang)
176
+ rows = len(raw_sentences) - 1
177
+ cols = list(map(len, raw_sentences))
178
+
179
+ sample = ""
180
+
181
+ # Generate a sample text of the language
182
+ for i in range(0, rows):
183
+ cur_sent = ""
184
+ for j in range(0, cols[i]):
185
+ cur_sent += " " + raw_sentences[i][j]
186
+
187
+ sample += cur_sent
188
+
189
+ # Try to detect what it is
190
+ print("Language snippet: " + sample[0:140] + "...")
191
+ guess = tc.guess_language(sample)
192
+ print(f"Language detection: {guess} ({friendly[guess]})")
193
+ print("#" * 140)
194
+
195
+
196
+ if __name__ == "__main__":
197
+ demo()
venv/lib/python3.10/site-packages/nltk/classify/util.py ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Classifier Utility Functions
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # Steven Bird <[email protected]> (minor additions)
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ Utility functions and classes for classifiers.
11
+ """
12
+
13
+ import math
14
+
15
+ # from nltk.util import Deprecated
16
+ import nltk.classify.util # for accuracy & log_likelihood
17
+ from nltk.util import LazyMap
18
+
19
+ ######################################################################
20
+ # { Helper Functions
21
+ ######################################################################
22
+
23
+ # alternative name possibility: 'map_featurefunc()'?
24
+ # alternative name possibility: 'detect_features()'?
25
+ # alternative name possibility: 'map_featuredetect()'?
26
+ # or.. just have users use LazyMap directly?
27
+ def apply_features(feature_func, toks, labeled=None):
28
+ """
29
+ Use the ``LazyMap`` class to construct a lazy list-like
30
+ object that is analogous to ``map(feature_func, toks)``. In
31
+ particular, if ``labeled=False``, then the returned list-like
32
+ object's values are equal to::
33
+
34
+ [feature_func(tok) for tok in toks]
35
+
36
+ If ``labeled=True``, then the returned list-like object's values
37
+ are equal to::
38
+
39
+ [(feature_func(tok), label) for (tok, label) in toks]
40
+
41
+ The primary purpose of this function is to avoid the memory
42
+ overhead involved in storing all the featuresets for every token
43
+ in a corpus. Instead, these featuresets are constructed lazily,
44
+ as-needed. The reduction in memory overhead can be especially
45
+ significant when the underlying list of tokens is itself lazy (as
46
+ is the case with many corpus readers).
47
+
48
+ :param feature_func: The function that will be applied to each
49
+ token. It should return a featureset -- i.e., a dict
50
+ mapping feature names to feature values.
51
+ :param toks: The list of tokens to which ``feature_func`` should be
52
+ applied. If ``labeled=True``, then the list elements will be
53
+ passed directly to ``feature_func()``. If ``labeled=False``,
54
+ then the list elements should be tuples ``(tok,label)``, and
55
+ ``tok`` will be passed to ``feature_func()``.
56
+ :param labeled: If true, then ``toks`` contains labeled tokens --
57
+ i.e., tuples of the form ``(tok, label)``. (Default:
58
+ auto-detect based on types.)
59
+ """
60
+ if labeled is None:
61
+ labeled = toks and isinstance(toks[0], (tuple, list))
62
+ if labeled:
63
+
64
+ def lazy_func(labeled_token):
65
+ return (feature_func(labeled_token[0]), labeled_token[1])
66
+
67
+ return LazyMap(lazy_func, toks)
68
+ else:
69
+ return LazyMap(feature_func, toks)
70
+
71
+
72
+ def attested_labels(tokens):
73
+ """
74
+ :return: A list of all labels that are attested in the given list
75
+ of tokens.
76
+ :rtype: list of (immutable)
77
+ :param tokens: The list of classified tokens from which to extract
78
+ labels. A classified token has the form ``(token, label)``.
79
+ :type tokens: list
80
+ """
81
+ return tuple({label for (tok, label) in tokens})
82
+
83
+
84
+ def log_likelihood(classifier, gold):
85
+ results = classifier.prob_classify_many([fs for (fs, l) in gold])
86
+ ll = [pdist.prob(l) for ((fs, l), pdist) in zip(gold, results)]
87
+ return math.log(sum(ll) / len(ll))
88
+
89
+
90
+ def accuracy(classifier, gold):
91
+ results = classifier.classify_many([fs for (fs, l) in gold])
92
+ correct = [l == r for ((fs, l), r) in zip(gold, results)]
93
+ if correct:
94
+ return sum(correct) / len(correct)
95
+ else:
96
+ return 0
97
+
98
+
99
+ class CutoffChecker:
100
+ """
101
+ A helper class that implements cutoff checks based on number of
102
+ iterations and log likelihood.
103
+
104
+ Accuracy cutoffs are also implemented, but they're almost never
105
+ a good idea to use.
106
+ """
107
+
108
+ def __init__(self, cutoffs):
109
+ self.cutoffs = cutoffs.copy()
110
+ if "min_ll" in cutoffs:
111
+ cutoffs["min_ll"] = -abs(cutoffs["min_ll"])
112
+ if "min_lldelta" in cutoffs:
113
+ cutoffs["min_lldelta"] = abs(cutoffs["min_lldelta"])
114
+ self.ll = None
115
+ self.acc = None
116
+ self.iter = 1
117
+
118
+ def check(self, classifier, train_toks):
119
+ cutoffs = self.cutoffs
120
+ self.iter += 1
121
+ if "max_iter" in cutoffs and self.iter >= cutoffs["max_iter"]:
122
+ return True # iteration cutoff.
123
+
124
+ new_ll = nltk.classify.util.log_likelihood(classifier, train_toks)
125
+ if math.isnan(new_ll):
126
+ return True
127
+
128
+ if "min_ll" in cutoffs or "min_lldelta" in cutoffs:
129
+ if "min_ll" in cutoffs and new_ll >= cutoffs["min_ll"]:
130
+ return True # log likelihood cutoff
131
+ if (
132
+ "min_lldelta" in cutoffs
133
+ and self.ll
134
+ and ((new_ll - self.ll) <= abs(cutoffs["min_lldelta"]))
135
+ ):
136
+ return True # log likelihood delta cutoff
137
+ self.ll = new_ll
138
+
139
+ if "max_acc" in cutoffs or "min_accdelta" in cutoffs:
140
+ new_acc = nltk.classify.util.log_likelihood(classifier, train_toks)
141
+ if "max_acc" in cutoffs and new_acc >= cutoffs["max_acc"]:
142
+ return True # log likelihood cutoff
143
+ if (
144
+ "min_accdelta" in cutoffs
145
+ and self.acc
146
+ and ((new_acc - self.acc) <= abs(cutoffs["min_accdelta"]))
147
+ ):
148
+ return True # log likelihood delta cutoff
149
+ self.acc = new_acc
150
+
151
+ return False # no cutoff reached.
152
+
153
+
154
+ ######################################################################
155
+ # { Demos
156
+ ######################################################################
157
+
158
+
159
+ def names_demo_features(name):
160
+ features = {}
161
+ features["alwayson"] = True
162
+ features["startswith"] = name[0].lower()
163
+ features["endswith"] = name[-1].lower()
164
+ for letter in "abcdefghijklmnopqrstuvwxyz":
165
+ features["count(%s)" % letter] = name.lower().count(letter)
166
+ features["has(%s)" % letter] = letter in name.lower()
167
+ return features
168
+
169
+
170
+ def binary_names_demo_features(name):
171
+ features = {}
172
+ features["alwayson"] = True
173
+ features["startswith(vowel)"] = name[0].lower() in "aeiouy"
174
+ features["endswith(vowel)"] = name[-1].lower() in "aeiouy"
175
+ for letter in "abcdefghijklmnopqrstuvwxyz":
176
+ features["count(%s)" % letter] = name.lower().count(letter)
177
+ features["has(%s)" % letter] = letter in name.lower()
178
+ features["startswith(%s)" % letter] = letter == name[0].lower()
179
+ features["endswith(%s)" % letter] = letter == name[-1].lower()
180
+ return features
181
+
182
+
183
+ def names_demo(trainer, features=names_demo_features):
184
+ import random
185
+
186
+ from nltk.corpus import names
187
+
188
+ # Construct a list of classified names, using the names corpus.
189
+ namelist = [(name, "male") for name in names.words("male.txt")] + [
190
+ (name, "female") for name in names.words("female.txt")
191
+ ]
192
+
193
+ # Randomly split the names into a test & train set.
194
+ random.seed(123456)
195
+ random.shuffle(namelist)
196
+ train = namelist[:5000]
197
+ test = namelist[5000:5500]
198
+
199
+ # Train up a classifier.
200
+ print("Training classifier...")
201
+ classifier = trainer([(features(n), g) for (n, g) in train])
202
+
203
+ # Run the classifier on the test data.
204
+ print("Testing classifier...")
205
+ acc = accuracy(classifier, [(features(n), g) for (n, g) in test])
206
+ print("Accuracy: %6.4f" % acc)
207
+
208
+ # For classifiers that can find probabilities, show the log
209
+ # likelihood and some sample probability distributions.
210
+ try:
211
+ test_featuresets = [features(n) for (n, g) in test]
212
+ pdists = classifier.prob_classify_many(test_featuresets)
213
+ ll = [pdist.logprob(gold) for ((name, gold), pdist) in zip(test, pdists)]
214
+ print("Avg. log likelihood: %6.4f" % (sum(ll) / len(test)))
215
+ print()
216
+ print("Unseen Names P(Male) P(Female)\n" + "-" * 40)
217
+ for ((name, gender), pdist) in list(zip(test, pdists))[:5]:
218
+ if gender == "male":
219
+ fmt = " %-15s *%6.4f %6.4f"
220
+ else:
221
+ fmt = " %-15s %6.4f *%6.4f"
222
+ print(fmt % (name, pdist.prob("male"), pdist.prob("female")))
223
+ except NotImplementedError:
224
+ pass
225
+
226
+ # Return the classifier
227
+ return classifier
228
+
229
+
230
+ def partial_names_demo(trainer, features=names_demo_features):
231
+ import random
232
+
233
+ from nltk.corpus import names
234
+
235
+ male_names = names.words("male.txt")
236
+ female_names = names.words("female.txt")
237
+
238
+ random.seed(654321)
239
+ random.shuffle(male_names)
240
+ random.shuffle(female_names)
241
+
242
+ # Create a list of male names to be used as positive-labeled examples for training
243
+ positive = map(features, male_names[:2000])
244
+
245
+ # Create a list of male and female names to be used as unlabeled examples
246
+ unlabeled = map(features, male_names[2000:2500] + female_names[:500])
247
+
248
+ # Create a test set with correctly-labeled male and female names
249
+ test = [(name, True) for name in male_names[2500:2750]] + [
250
+ (name, False) for name in female_names[500:750]
251
+ ]
252
+
253
+ random.shuffle(test)
254
+
255
+ # Train up a classifier.
256
+ print("Training classifier...")
257
+ classifier = trainer(positive, unlabeled)
258
+
259
+ # Run the classifier on the test data.
260
+ print("Testing classifier...")
261
+ acc = accuracy(classifier, [(features(n), m) for (n, m) in test])
262
+ print("Accuracy: %6.4f" % acc)
263
+
264
+ # For classifiers that can find probabilities, show the log
265
+ # likelihood and some sample probability distributions.
266
+ try:
267
+ test_featuresets = [features(n) for (n, m) in test]
268
+ pdists = classifier.prob_classify_many(test_featuresets)
269
+ ll = [pdist.logprob(gold) for ((name, gold), pdist) in zip(test, pdists)]
270
+ print("Avg. log likelihood: %6.4f" % (sum(ll) / len(test)))
271
+ print()
272
+ print("Unseen Names P(Male) P(Female)\n" + "-" * 40)
273
+ for ((name, is_male), pdist) in zip(test, pdists)[:5]:
274
+ if is_male == True:
275
+ fmt = " %-15s *%6.4f %6.4f"
276
+ else:
277
+ fmt = " %-15s %6.4f *%6.4f"
278
+ print(fmt % (name, pdist.prob(True), pdist.prob(False)))
279
+ except NotImplementedError:
280
+ pass
281
+
282
+ # Return the classifier
283
+ return classifier
284
+
285
+
286
+ _inst_cache = {}
287
+
288
+
289
+ def wsd_demo(trainer, word, features, n=1000):
290
+ import random
291
+
292
+ from nltk.corpus import senseval
293
+
294
+ # Get the instances.
295
+ print("Reading data...")
296
+ global _inst_cache
297
+ if word not in _inst_cache:
298
+ _inst_cache[word] = [(i, i.senses[0]) for i in senseval.instances(word)]
299
+ instances = _inst_cache[word][:]
300
+ if n > len(instances):
301
+ n = len(instances)
302
+ senses = list({l for (i, l) in instances})
303
+ print(" Senses: " + " ".join(senses))
304
+
305
+ # Randomly split the names into a test & train set.
306
+ print("Splitting into test & train...")
307
+ random.seed(123456)
308
+ random.shuffle(instances)
309
+ train = instances[: int(0.8 * n)]
310
+ test = instances[int(0.8 * n) : n]
311
+
312
+ # Train up a classifier.
313
+ print("Training classifier...")
314
+ classifier = trainer([(features(i), l) for (i, l) in train])
315
+
316
+ # Run the classifier on the test data.
317
+ print("Testing classifier...")
318
+ acc = accuracy(classifier, [(features(i), l) for (i, l) in test])
319
+ print("Accuracy: %6.4f" % acc)
320
+
321
+ # For classifiers that can find probabilities, show the log
322
+ # likelihood and some sample probability distributions.
323
+ try:
324
+ test_featuresets = [features(i) for (i, n) in test]
325
+ pdists = classifier.prob_classify_many(test_featuresets)
326
+ ll = [pdist.logprob(gold) for ((name, gold), pdist) in zip(test, pdists)]
327
+ print("Avg. log likelihood: %6.4f" % (sum(ll) / len(test)))
328
+ except NotImplementedError:
329
+ pass
330
+
331
+ # Return the classifier
332
+ return classifier
333
+
334
+
335
+ def check_megam_config():
336
+ """
337
+ Checks whether the MEGAM binary is configured.
338
+ """
339
+ try:
340
+ _megam_bin
341
+ except NameError as e:
342
+ err_msg = str(
343
+ "Please configure your megam binary first, e.g.\n"
344
+ ">>> nltk.config_megam('/usr/bin/local/megam')"
345
+ )
346
+ raise NameError(err_msg) from e
venv/lib/python3.10/site-packages/nltk/classify/weka.py ADDED
@@ -0,0 +1,377 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Interface to Weka Classsifiers
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ Classifiers that make use of the external 'Weka' package.
10
+ """
11
+
12
+ import os
13
+ import re
14
+ import subprocess
15
+ import tempfile
16
+ import time
17
+ import zipfile
18
+ from sys import stdin
19
+
20
+ from nltk.classify.api import ClassifierI
21
+ from nltk.internals import config_java, java
22
+ from nltk.probability import DictionaryProbDist
23
+
24
+ _weka_classpath = None
25
+ _weka_search = [
26
+ ".",
27
+ "/usr/share/weka",
28
+ "/usr/local/share/weka",
29
+ "/usr/lib/weka",
30
+ "/usr/local/lib/weka",
31
+ ]
32
+
33
+
34
+ def config_weka(classpath=None):
35
+ global _weka_classpath
36
+
37
+ # Make sure java's configured first.
38
+ config_java()
39
+
40
+ if classpath is not None:
41
+ _weka_classpath = classpath
42
+
43
+ if _weka_classpath is None:
44
+ searchpath = _weka_search
45
+ if "WEKAHOME" in os.environ:
46
+ searchpath.insert(0, os.environ["WEKAHOME"])
47
+
48
+ for path in searchpath:
49
+ if os.path.exists(os.path.join(path, "weka.jar")):
50
+ _weka_classpath = os.path.join(path, "weka.jar")
51
+ version = _check_weka_version(_weka_classpath)
52
+ if version:
53
+ print(f"[Found Weka: {_weka_classpath} (version {version})]")
54
+ else:
55
+ print("[Found Weka: %s]" % _weka_classpath)
56
+ _check_weka_version(_weka_classpath)
57
+
58
+ if _weka_classpath is None:
59
+ raise LookupError(
60
+ "Unable to find weka.jar! Use config_weka() "
61
+ "or set the WEKAHOME environment variable. "
62
+ "For more information about Weka, please see "
63
+ "https://www.cs.waikato.ac.nz/ml/weka/"
64
+ )
65
+
66
+
67
+ def _check_weka_version(jar):
68
+ try:
69
+ zf = zipfile.ZipFile(jar)
70
+ except (SystemExit, KeyboardInterrupt):
71
+ raise
72
+ except:
73
+ return None
74
+ try:
75
+ try:
76
+ return zf.read("weka/core/version.txt")
77
+ except KeyError:
78
+ return None
79
+ finally:
80
+ zf.close()
81
+
82
+
83
+ class WekaClassifier(ClassifierI):
84
+ def __init__(self, formatter, model_filename):
85
+ self._formatter = formatter
86
+ self._model = model_filename
87
+
88
+ def prob_classify_many(self, featuresets):
89
+ return self._classify_many(featuresets, ["-p", "0", "-distribution"])
90
+
91
+ def classify_many(self, featuresets):
92
+ return self._classify_many(featuresets, ["-p", "0"])
93
+
94
+ def _classify_many(self, featuresets, options):
95
+ # Make sure we can find java & weka.
96
+ config_weka()
97
+
98
+ temp_dir = tempfile.mkdtemp()
99
+ try:
100
+ # Write the test data file.
101
+ test_filename = os.path.join(temp_dir, "test.arff")
102
+ self._formatter.write(test_filename, featuresets)
103
+
104
+ # Call weka to classify the data.
105
+ cmd = [
106
+ "weka.classifiers.bayes.NaiveBayes",
107
+ "-l",
108
+ self._model,
109
+ "-T",
110
+ test_filename,
111
+ ] + options
112
+ (stdout, stderr) = java(
113
+ cmd,
114
+ classpath=_weka_classpath,
115
+ stdout=subprocess.PIPE,
116
+ stderr=subprocess.PIPE,
117
+ )
118
+
119
+ # Check if something went wrong:
120
+ if stderr and not stdout:
121
+ if "Illegal options: -distribution" in stderr:
122
+ raise ValueError(
123
+ "The installed version of weka does "
124
+ "not support probability distribution "
125
+ "output."
126
+ )
127
+ else:
128
+ raise ValueError("Weka failed to generate output:\n%s" % stderr)
129
+
130
+ # Parse weka's output.
131
+ return self.parse_weka_output(stdout.decode(stdin.encoding).split("\n"))
132
+
133
+ finally:
134
+ for f in os.listdir(temp_dir):
135
+ os.remove(os.path.join(temp_dir, f))
136
+ os.rmdir(temp_dir)
137
+
138
+ def parse_weka_distribution(self, s):
139
+ probs = [float(v) for v in re.split("[*,]+", s) if v.strip()]
140
+ probs = dict(zip(self._formatter.labels(), probs))
141
+ return DictionaryProbDist(probs)
142
+
143
+ def parse_weka_output(self, lines):
144
+ # Strip unwanted text from stdout
145
+ for i, line in enumerate(lines):
146
+ if line.strip().startswith("inst#"):
147
+ lines = lines[i:]
148
+ break
149
+
150
+ if lines[0].split() == ["inst#", "actual", "predicted", "error", "prediction"]:
151
+ return [line.split()[2].split(":")[1] for line in lines[1:] if line.strip()]
152
+ elif lines[0].split() == [
153
+ "inst#",
154
+ "actual",
155
+ "predicted",
156
+ "error",
157
+ "distribution",
158
+ ]:
159
+ return [
160
+ self.parse_weka_distribution(line.split()[-1])
161
+ for line in lines[1:]
162
+ if line.strip()
163
+ ]
164
+
165
+ # is this safe:?
166
+ elif re.match(r"^0 \w+ [01]\.[0-9]* \?\s*$", lines[0]):
167
+ return [line.split()[1] for line in lines if line.strip()]
168
+
169
+ else:
170
+ for line in lines[:10]:
171
+ print(line)
172
+ raise ValueError(
173
+ "Unhandled output format -- your version "
174
+ "of weka may not be supported.\n"
175
+ " Header: %s" % lines[0]
176
+ )
177
+
178
+ # [xx] full list of classifiers (some may be abstract?):
179
+ # ADTree, AODE, BayesNet, ComplementNaiveBayes, ConjunctiveRule,
180
+ # DecisionStump, DecisionTable, HyperPipes, IB1, IBk, Id3, J48,
181
+ # JRip, KStar, LBR, LeastMedSq, LinearRegression, LMT, Logistic,
182
+ # LogisticBase, M5Base, MultilayerPerceptron,
183
+ # MultipleClassifiersCombiner, NaiveBayes, NaiveBayesMultinomial,
184
+ # NaiveBayesSimple, NBTree, NNge, OneR, PaceRegression, PART,
185
+ # PreConstructedLinearModel, Prism, RandomForest,
186
+ # RandomizableClassifier, RandomTree, RBFNetwork, REPTree, Ridor,
187
+ # RuleNode, SimpleLinearRegression, SimpleLogistic,
188
+ # SingleClassifierEnhancer, SMO, SMOreg, UserClassifier, VFI,
189
+ # VotedPerceptron, Winnow, ZeroR
190
+
191
+ _CLASSIFIER_CLASS = {
192
+ "naivebayes": "weka.classifiers.bayes.NaiveBayes",
193
+ "C4.5": "weka.classifiers.trees.J48",
194
+ "log_regression": "weka.classifiers.functions.Logistic",
195
+ "svm": "weka.classifiers.functions.SMO",
196
+ "kstar": "weka.classifiers.lazy.KStar",
197
+ "ripper": "weka.classifiers.rules.JRip",
198
+ }
199
+
200
+ @classmethod
201
+ def train(
202
+ cls,
203
+ model_filename,
204
+ featuresets,
205
+ classifier="naivebayes",
206
+ options=[],
207
+ quiet=True,
208
+ ):
209
+ # Make sure we can find java & weka.
210
+ config_weka()
211
+
212
+ # Build an ARFF formatter.
213
+ formatter = ARFF_Formatter.from_train(featuresets)
214
+
215
+ temp_dir = tempfile.mkdtemp()
216
+ try:
217
+ # Write the training data file.
218
+ train_filename = os.path.join(temp_dir, "train.arff")
219
+ formatter.write(train_filename, featuresets)
220
+
221
+ if classifier in cls._CLASSIFIER_CLASS:
222
+ javaclass = cls._CLASSIFIER_CLASS[classifier]
223
+ elif classifier in cls._CLASSIFIER_CLASS.values():
224
+ javaclass = classifier
225
+ else:
226
+ raise ValueError("Unknown classifier %s" % classifier)
227
+
228
+ # Train the weka model.
229
+ cmd = [javaclass, "-d", model_filename, "-t", train_filename]
230
+ cmd += list(options)
231
+ if quiet:
232
+ stdout = subprocess.PIPE
233
+ else:
234
+ stdout = None
235
+ java(cmd, classpath=_weka_classpath, stdout=stdout)
236
+
237
+ # Return the new classifier.
238
+ return WekaClassifier(formatter, model_filename)
239
+
240
+ finally:
241
+ for f in os.listdir(temp_dir):
242
+ os.remove(os.path.join(temp_dir, f))
243
+ os.rmdir(temp_dir)
244
+
245
+
246
+ class ARFF_Formatter:
247
+ """
248
+ Converts featuresets and labeled featuresets to ARFF-formatted
249
+ strings, appropriate for input into Weka.
250
+
251
+ Features and classes can be specified manually in the constructor, or may
252
+ be determined from data using ``from_train``.
253
+ """
254
+
255
+ def __init__(self, labels, features):
256
+ """
257
+ :param labels: A list of all class labels that can be generated.
258
+ :param features: A list of feature specifications, where
259
+ each feature specification is a tuple (fname, ftype);
260
+ and ftype is an ARFF type string such as NUMERIC or
261
+ STRING.
262
+ """
263
+ self._labels = labels
264
+ self._features = features
265
+
266
+ def format(self, tokens):
267
+ """Returns a string representation of ARFF output for the given data."""
268
+ return self.header_section() + self.data_section(tokens)
269
+
270
+ def labels(self):
271
+ """Returns the list of classes."""
272
+ return list(self._labels)
273
+
274
+ def write(self, outfile, tokens):
275
+ """Writes ARFF data to a file for the given data."""
276
+ if not hasattr(outfile, "write"):
277
+ outfile = open(outfile, "w")
278
+ outfile.write(self.format(tokens))
279
+ outfile.close()
280
+
281
+ @staticmethod
282
+ def from_train(tokens):
283
+ """
284
+ Constructs an ARFF_Formatter instance with class labels and feature
285
+ types determined from the given data. Handles boolean, numeric and
286
+ string (note: not nominal) types.
287
+ """
288
+ # Find the set of all attested labels.
289
+ labels = {label for (tok, label) in tokens}
290
+
291
+ # Determine the types of all features.
292
+ features = {}
293
+ for tok, label in tokens:
294
+ for (fname, fval) in tok.items():
295
+ if issubclass(type(fval), bool):
296
+ ftype = "{True, False}"
297
+ elif issubclass(type(fval), (int, float, bool)):
298
+ ftype = "NUMERIC"
299
+ elif issubclass(type(fval), str):
300
+ ftype = "STRING"
301
+ elif fval is None:
302
+ continue # can't tell the type.
303
+ else:
304
+ raise ValueError("Unsupported value type %r" % ftype)
305
+
306
+ if features.get(fname, ftype) != ftype:
307
+ raise ValueError("Inconsistent type for %s" % fname)
308
+ features[fname] = ftype
309
+ features = sorted(features.items())
310
+
311
+ return ARFF_Formatter(labels, features)
312
+
313
+ def header_section(self):
314
+ """Returns an ARFF header as a string."""
315
+ # Header comment.
316
+ s = (
317
+ "% Weka ARFF file\n"
318
+ + "% Generated automatically by NLTK\n"
319
+ + "%% %s\n\n" % time.ctime()
320
+ )
321
+
322
+ # Relation name
323
+ s += "@RELATION rel\n\n"
324
+
325
+ # Input attribute specifications
326
+ for fname, ftype in self._features:
327
+ s += "@ATTRIBUTE %-30r %s\n" % (fname, ftype)
328
+
329
+ # Label attribute specification
330
+ s += "@ATTRIBUTE %-30r {%s}\n" % ("-label-", ",".join(self._labels))
331
+
332
+ return s
333
+
334
+ def data_section(self, tokens, labeled=None):
335
+ """
336
+ Returns the ARFF data section for the given data.
337
+
338
+ :param tokens: a list of featuresets (dicts) or labelled featuresets
339
+ which are tuples (featureset, label).
340
+ :param labeled: Indicates whether the given tokens are labeled
341
+ or not. If None, then the tokens will be assumed to be
342
+ labeled if the first token's value is a tuple or list.
343
+ """
344
+ # Check if the tokens are labeled or unlabeled. If unlabeled,
345
+ # then use 'None'
346
+ if labeled is None:
347
+ labeled = tokens and isinstance(tokens[0], (tuple, list))
348
+ if not labeled:
349
+ tokens = [(tok, None) for tok in tokens]
350
+
351
+ # Data section
352
+ s = "\n@DATA\n"
353
+ for (tok, label) in tokens:
354
+ for fname, ftype in self._features:
355
+ s += "%s," % self._fmt_arff_val(tok.get(fname))
356
+ s += "%s\n" % self._fmt_arff_val(label)
357
+
358
+ return s
359
+
360
+ def _fmt_arff_val(self, fval):
361
+ if fval is None:
362
+ return "?"
363
+ elif isinstance(fval, (bool, int)):
364
+ return "%s" % fval
365
+ elif isinstance(fval, float):
366
+ return "%r" % fval
367
+ else:
368
+ return "%r" % fval
369
+
370
+
371
+ if __name__ == "__main__":
372
+ from nltk.classify.util import binary_names_demo_features, names_demo
373
+
374
+ def make_classifier(featuresets):
375
+ return WekaClassifier.train("/tmp/name.model", featuresets, "C4.5")
376
+
377
+ classifier = names_demo(make_classifier, binary_names_demo_features)
venv/lib/python3.10/site-packages/nltk/misc/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (404 Bytes). View file
 
venv/lib/python3.10/site-packages/nltk/misc/__pycache__/babelfish.cpython-310.pyc ADDED
Binary file (619 Bytes). View file
 
venv/lib/python3.10/site-packages/nltk/misc/__pycache__/chomsky.cpython-310.pyc ADDED
Binary file (5.12 kB). View file
 
venv/lib/python3.10/site-packages/nltk/misc/__pycache__/minimalset.cpython-310.pyc ADDED
Binary file (2.97 kB). View file
 
venv/lib/python3.10/site-packages/nltk/misc/__pycache__/sort.cpython-310.pyc ADDED
Binary file (3.48 kB). View file
 
venv/lib/python3.10/site-packages/nltk/stem/__init__.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Stemmers
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Trevor Cohn <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # Steven Bird <[email protected]>
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+ """
11
+ NLTK Stemmers
12
+
13
+ Interfaces used to remove morphological affixes from words, leaving
14
+ only the word stem. Stemming algorithms aim to remove those affixes
15
+ required for eg. grammatical role, tense, derivational morphology
16
+ leaving only the stem of the word. This is a difficult problem due to
17
+ irregular words (eg. common verbs in English), complicated
18
+ morphological rules, and part-of-speech and sense ambiguities
19
+ (eg. ``ceil-`` is not the stem of ``ceiling``).
20
+
21
+ StemmerI defines a standard interface for stemmers.
22
+ """
23
+
24
+ from nltk.stem.api import StemmerI
25
+ from nltk.stem.arlstem import ARLSTem
26
+ from nltk.stem.arlstem2 import ARLSTem2
27
+ from nltk.stem.cistem import Cistem
28
+ from nltk.stem.isri import ISRIStemmer
29
+ from nltk.stem.lancaster import LancasterStemmer
30
+ from nltk.stem.porter import PorterStemmer
31
+ from nltk.stem.regexp import RegexpStemmer
32
+ from nltk.stem.rslp import RSLPStemmer
33
+ from nltk.stem.snowball import SnowballStemmer
34
+ from nltk.stem.wordnet import WordNetLemmatizer
venv/lib/python3.10/site-packages/nltk/stem/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.29 kB). View file
 
venv/lib/python3.10/site-packages/nltk/stem/__pycache__/api.cpython-310.pyc ADDED
Binary file (822 Bytes). View file
 
venv/lib/python3.10/site-packages/nltk/stem/__pycache__/arlstem.cpython-310.pyc ADDED
Binary file (8.29 kB). View file
 
venv/lib/python3.10/site-packages/nltk/stem/__pycache__/arlstem2.cpython-310.pyc ADDED
Binary file (9.97 kB). View file
 
venv/lib/python3.10/site-packages/nltk/stem/__pycache__/cistem.cpython-310.pyc ADDED
Binary file (6.33 kB). View file
 
venv/lib/python3.10/site-packages/nltk/stem/__pycache__/isri.cpython-310.pyc ADDED
Binary file (9.24 kB). View file
 
venv/lib/python3.10/site-packages/nltk/stem/__pycache__/lancaster.cpython-310.pyc ADDED
Binary file (6.41 kB). View file
 
venv/lib/python3.10/site-packages/nltk/stem/__pycache__/porter.cpython-310.pyc ADDED
Binary file (21.6 kB). View file
 
venv/lib/python3.10/site-packages/nltk/stem/__pycache__/regexp.cpython-310.pyc ADDED
Binary file (1.73 kB). View file
 
venv/lib/python3.10/site-packages/nltk/stem/__pycache__/rslp.cpython-310.pyc ADDED
Binary file (3.08 kB). View file
 
venv/lib/python3.10/site-packages/nltk/stem/__pycache__/snowball.cpython-310.pyc ADDED
Binary file (97.3 kB). View file