applied-ai-018 commited on
Commit
fd05557
·
verified ·
1 Parent(s): bfddfae

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/nltk/chat/__init__.py +48 -0
  2. llmeval-env/lib/python3.10/site-packages/nltk/chat/__pycache__/__init__.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/nltk/chat/__pycache__/eliza.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/nltk/chat/__pycache__/iesha.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/nltk/chat/__pycache__/rude.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/nltk/chat/__pycache__/suntsu.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/nltk/chat/__pycache__/util.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/nltk/chat/__pycache__/zen.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/nltk/chat/eliza.py +337 -0
  10. llmeval-env/lib/python3.10/site-packages/nltk/chat/iesha.py +160 -0
  11. llmeval-env/lib/python3.10/site-packages/nltk/chat/rude.py +125 -0
  12. llmeval-env/lib/python3.10/site-packages/nltk/chat/suntsu.py +140 -0
  13. llmeval-env/lib/python3.10/site-packages/nltk/chat/util.py +124 -0
  14. llmeval-env/lib/python3.10/site-packages/nltk/chat/zen.py +329 -0
  15. llmeval-env/lib/python3.10/site-packages/nltk/inference/__pycache__/tableau.cpython-310.pyc +0 -0
  16. llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/agreement.cpython-310.pyc +0 -0
  17. llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/aline.cpython-310.pyc +0 -0
  18. llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/association.cpython-310.pyc +0 -0
  19. llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/paice.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/scores.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/segmentation.cpython-310.pyc +0 -0
  22. llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/spearman.cpython-310.pyc +0 -0
  23. llmeval-env/lib/python3.10/site-packages/nltk/misc/babelfish.py +10 -0
  24. llmeval-env/lib/python3.10/site-packages/nltk/misc/chomsky.py +134 -0
  25. llmeval-env/lib/python3.10/site-packages/nltk/misc/minimalset.py +85 -0
  26. llmeval-env/lib/python3.10/site-packages/nltk/sem/__init__.py +75 -0
  27. llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/__init__.cpython-310.pyc +0 -0
  28. llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/boxer.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/chat80.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/cooper_storage.cpython-310.pyc +0 -0
  31. llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/drt.cpython-310.pyc +0 -0
  32. llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/drt_glue_demo.cpython-310.pyc +0 -0
  33. llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/evaluate.cpython-310.pyc +0 -0
  34. llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/glue.cpython-310.pyc +0 -0
  35. llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/hole.cpython-310.pyc +0 -0
  36. llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/lfg.cpython-310.pyc +0 -0
  37. llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/linearlogic.cpython-310.pyc +0 -0
  38. llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/logic.cpython-310.pyc +0 -0
  39. llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/relextract.cpython-310.pyc +0 -0
  40. llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/skolemize.cpython-310.pyc +0 -0
  41. llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/util.cpython-310.pyc +0 -0
  42. llmeval-env/lib/python3.10/site-packages/nltk/sem/boxer.py +1605 -0
  43. llmeval-env/lib/python3.10/site-packages/nltk/sem/chat80.py +857 -0
  44. llmeval-env/lib/python3.10/site-packages/nltk/sem/cooper_storage.py +124 -0
  45. llmeval-env/lib/python3.10/site-packages/nltk/sem/drt.py +1460 -0
  46. llmeval-env/lib/python3.10/site-packages/nltk/sem/drt_glue_demo.py +553 -0
  47. llmeval-env/lib/python3.10/site-packages/nltk/sem/evaluate.py +829 -0
  48. llmeval-env/lib/python3.10/site-packages/nltk/sem/glue.py +835 -0
  49. llmeval-env/lib/python3.10/site-packages/nltk/sem/hole.py +395 -0
  50. llmeval-env/lib/python3.10/site-packages/nltk/sem/lfg.py +261 -0
llmeval-env/lib/python3.10/site-packages/nltk/chat/__init__.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Chatbots
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Authors: Steven Bird <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ # Based on an Eliza implementation by Joe Strout <[email protected]>,
9
+ # Jeff Epler <[email protected]> and Jez Higgins <[email protected]>.
10
+
11
+ """
12
+ A class for simple chatbots. These perform simple pattern matching on sentences
13
+ typed by users, and respond with automatically generated sentences.
14
+
15
+ These chatbots may not work using the windows command line or the
16
+ windows IDLE GUI.
17
+ """
18
+
19
+ from nltk.chat.eliza import eliza_chat
20
+ from nltk.chat.iesha import iesha_chat
21
+ from nltk.chat.rude import rude_chat
22
+ from nltk.chat.suntsu import suntsu_chat
23
+ from nltk.chat.util import Chat
24
+ from nltk.chat.zen import zen_chat
25
+
26
+ bots = [
27
+ (eliza_chat, "Eliza (psycho-babble)"),
28
+ (iesha_chat, "Iesha (teen anime junky)"),
29
+ (rude_chat, "Rude (abusive bot)"),
30
+ (suntsu_chat, "Suntsu (Chinese sayings)"),
31
+ (zen_chat, "Zen (gems of wisdom)"),
32
+ ]
33
+
34
+
35
+ def chatbots():
36
+ print("Which chatbot would you like to talk to?")
37
+ botcount = len(bots)
38
+ for i in range(botcount):
39
+ print(" %d: %s" % (i + 1, bots[i][1]))
40
+ while True:
41
+ choice = input(f"\nEnter a number in the range 1-{botcount}: ").strip()
42
+ if choice.isdigit() and (int(choice) - 1) in range(botcount):
43
+ break
44
+ else:
45
+ print(" Error: bad chatbot number")
46
+
47
+ chatbot = bots[int(choice) - 1][0]
48
+ chatbot()
llmeval-env/lib/python3.10/site-packages/nltk/chat/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.37 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/chat/__pycache__/eliza.cpython-310.pyc ADDED
Binary file (5.89 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/chat/__pycache__/iesha.cpython-310.pyc ADDED
Binary file (3.32 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/chat/__pycache__/rude.cpython-310.pyc ADDED
Binary file (2.21 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/chat/__pycache__/suntsu.cpython-310.pyc ADDED
Binary file (5.96 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/chat/__pycache__/util.cpython-310.pyc ADDED
Binary file (3.76 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/chat/__pycache__/zen.cpython-310.pyc ADDED
Binary file (6.52 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/chat/eliza.py ADDED
@@ -0,0 +1,337 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Eliza
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Authors: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ # Based on an Eliza implementation by Joe Strout <[email protected]>,
10
+ # Jeff Epler <[email protected]> and Jez Higgins <mailto:[email protected]>.
11
+
12
+ # a translation table used to convert things you say into things the
13
+ # computer says back, e.g. "I am" --> "you are"
14
+
15
+ from nltk.chat.util import Chat, reflections
16
+
17
+ # a table of response pairs, where each pair consists of a
18
+ # regular expression, and a list of possible responses,
19
+ # with group-macros labelled as %1, %2.
20
+
21
+ pairs = (
22
+ (
23
+ r"I need (.*)",
24
+ (
25
+ "Why do you need %1?",
26
+ "Would it really help you to get %1?",
27
+ "Are you sure you need %1?",
28
+ ),
29
+ ),
30
+ (
31
+ r"Why don\'t you (.*)",
32
+ (
33
+ "Do you really think I don't %1?",
34
+ "Perhaps eventually I will %1.",
35
+ "Do you really want me to %1?",
36
+ ),
37
+ ),
38
+ (
39
+ r"Why can\'t I (.*)",
40
+ (
41
+ "Do you think you should be able to %1?",
42
+ "If you could %1, what would you do?",
43
+ "I don't know -- why can't you %1?",
44
+ "Have you really tried?",
45
+ ),
46
+ ),
47
+ (
48
+ r"I can\'t (.*)",
49
+ (
50
+ "How do you know you can't %1?",
51
+ "Perhaps you could %1 if you tried.",
52
+ "What would it take for you to %1?",
53
+ ),
54
+ ),
55
+ (
56
+ r"I am (.*)",
57
+ (
58
+ "Did you come to me because you are %1?",
59
+ "How long have you been %1?",
60
+ "How do you feel about being %1?",
61
+ ),
62
+ ),
63
+ (
64
+ r"I\'m (.*)",
65
+ (
66
+ "How does being %1 make you feel?",
67
+ "Do you enjoy being %1?",
68
+ "Why do you tell me you're %1?",
69
+ "Why do you think you're %1?",
70
+ ),
71
+ ),
72
+ (
73
+ r"Are you (.*)",
74
+ (
75
+ "Why does it matter whether I am %1?",
76
+ "Would you prefer it if I were not %1?",
77
+ "Perhaps you believe I am %1.",
78
+ "I may be %1 -- what do you think?",
79
+ ),
80
+ ),
81
+ (
82
+ r"What (.*)",
83
+ (
84
+ "Why do you ask?",
85
+ "How would an answer to that help you?",
86
+ "What do you think?",
87
+ ),
88
+ ),
89
+ (
90
+ r"How (.*)",
91
+ (
92
+ "How do you suppose?",
93
+ "Perhaps you can answer your own question.",
94
+ "What is it you're really asking?",
95
+ ),
96
+ ),
97
+ (
98
+ r"Because (.*)",
99
+ (
100
+ "Is that the real reason?",
101
+ "What other reasons come to mind?",
102
+ "Does that reason apply to anything else?",
103
+ "If %1, what else must be true?",
104
+ ),
105
+ ),
106
+ (
107
+ r"(.*) sorry (.*)",
108
+ (
109
+ "There are many times when no apology is needed.",
110
+ "What feelings do you have when you apologize?",
111
+ ),
112
+ ),
113
+ (
114
+ r"Hello(.*)",
115
+ (
116
+ "Hello... I'm glad you could drop by today.",
117
+ "Hi there... how are you today?",
118
+ "Hello, how are you feeling today?",
119
+ ),
120
+ ),
121
+ (
122
+ r"I think (.*)",
123
+ ("Do you doubt %1?", "Do you really think so?", "But you're not sure %1?"),
124
+ ),
125
+ (
126
+ r"(.*) friend (.*)",
127
+ (
128
+ "Tell me more about your friends.",
129
+ "When you think of a friend, what comes to mind?",
130
+ "Why don't you tell me about a childhood friend?",
131
+ ),
132
+ ),
133
+ (r"Yes", ("You seem quite sure.", "OK, but can you elaborate a bit?")),
134
+ (
135
+ r"(.*) computer(.*)",
136
+ (
137
+ "Are you really talking about me?",
138
+ "Does it seem strange to talk to a computer?",
139
+ "How do computers make you feel?",
140
+ "Do you feel threatened by computers?",
141
+ ),
142
+ ),
143
+ (
144
+ r"Is it (.*)",
145
+ (
146
+ "Do you think it is %1?",
147
+ "Perhaps it's %1 -- what do you think?",
148
+ "If it were %1, what would you do?",
149
+ "It could well be that %1.",
150
+ ),
151
+ ),
152
+ (
153
+ r"It is (.*)",
154
+ (
155
+ "You seem very certain.",
156
+ "If I told you that it probably isn't %1, what would you feel?",
157
+ ),
158
+ ),
159
+ (
160
+ r"Can you (.*)",
161
+ (
162
+ "What makes you think I can't %1?",
163
+ "If I could %1, then what?",
164
+ "Why do you ask if I can %1?",
165
+ ),
166
+ ),
167
+ (
168
+ r"Can I (.*)",
169
+ (
170
+ "Perhaps you don't want to %1.",
171
+ "Do you want to be able to %1?",
172
+ "If you could %1, would you?",
173
+ ),
174
+ ),
175
+ (
176
+ r"You are (.*)",
177
+ (
178
+ "Why do you think I am %1?",
179
+ "Does it please you to think that I'm %1?",
180
+ "Perhaps you would like me to be %1.",
181
+ "Perhaps you're really talking about yourself?",
182
+ ),
183
+ ),
184
+ (
185
+ r"You\'re (.*)",
186
+ (
187
+ "Why do you say I am %1?",
188
+ "Why do you think I am %1?",
189
+ "Are we talking about you, or me?",
190
+ ),
191
+ ),
192
+ (
193
+ r"I don\'t (.*)",
194
+ ("Don't you really %1?", "Why don't you %1?", "Do you want to %1?"),
195
+ ),
196
+ (
197
+ r"I feel (.*)",
198
+ (
199
+ "Good, tell me more about these feelings.",
200
+ "Do you often feel %1?",
201
+ "When do you usually feel %1?",
202
+ "When you feel %1, what do you do?",
203
+ ),
204
+ ),
205
+ (
206
+ r"I have (.*)",
207
+ (
208
+ "Why do you tell me that you've %1?",
209
+ "Have you really %1?",
210
+ "Now that you have %1, what will you do next?",
211
+ ),
212
+ ),
213
+ (
214
+ r"I would (.*)",
215
+ (
216
+ "Could you explain why you would %1?",
217
+ "Why would you %1?",
218
+ "Who else knows that you would %1?",
219
+ ),
220
+ ),
221
+ (
222
+ r"Is there (.*)",
223
+ (
224
+ "Do you think there is %1?",
225
+ "It's likely that there is %1.",
226
+ "Would you like there to be %1?",
227
+ ),
228
+ ),
229
+ (
230
+ r"My (.*)",
231
+ (
232
+ "I see, your %1.",
233
+ "Why do you say that your %1?",
234
+ "When your %1, how do you feel?",
235
+ ),
236
+ ),
237
+ (
238
+ r"You (.*)",
239
+ (
240
+ "We should be discussing you, not me.",
241
+ "Why do you say that about me?",
242
+ "Why do you care whether I %1?",
243
+ ),
244
+ ),
245
+ (r"Why (.*)", ("Why don't you tell me the reason why %1?", "Why do you think %1?")),
246
+ (
247
+ r"I want (.*)",
248
+ (
249
+ "What would it mean to you if you got %1?",
250
+ "Why do you want %1?",
251
+ "What would you do if you got %1?",
252
+ "If you got %1, then what would you do?",
253
+ ),
254
+ ),
255
+ (
256
+ r"(.*) mother(.*)",
257
+ (
258
+ "Tell me more about your mother.",
259
+ "What was your relationship with your mother like?",
260
+ "How do you feel about your mother?",
261
+ "How does this relate to your feelings today?",
262
+ "Good family relations are important.",
263
+ ),
264
+ ),
265
+ (
266
+ r"(.*) father(.*)",
267
+ (
268
+ "Tell me more about your father.",
269
+ "How did your father make you feel?",
270
+ "How do you feel about your father?",
271
+ "Does your relationship with your father relate to your feelings today?",
272
+ "Do you have trouble showing affection with your family?",
273
+ ),
274
+ ),
275
+ (
276
+ r"(.*) child(.*)",
277
+ (
278
+ "Did you have close friends as a child?",
279
+ "What is your favorite childhood memory?",
280
+ "Do you remember any dreams or nightmares from childhood?",
281
+ "Did the other children sometimes tease you?",
282
+ "How do you think your childhood experiences relate to your feelings today?",
283
+ ),
284
+ ),
285
+ (
286
+ r"(.*)\?",
287
+ (
288
+ "Why do you ask that?",
289
+ "Please consider whether you can answer your own question.",
290
+ "Perhaps the answer lies within yourself?",
291
+ "Why don't you tell me?",
292
+ ),
293
+ ),
294
+ (
295
+ r"quit",
296
+ (
297
+ "Thank you for talking with me.",
298
+ "Good-bye.",
299
+ "Thank you, that will be $150. Have a good day!",
300
+ ),
301
+ ),
302
+ (
303
+ r"(.*)",
304
+ (
305
+ "Please tell me more.",
306
+ "Let's change focus a bit... Tell me about your family.",
307
+ "Can you elaborate on that?",
308
+ "Why do you say that %1?",
309
+ "I see.",
310
+ "Very interesting.",
311
+ "%1.",
312
+ "I see. And what does that tell you?",
313
+ "How does that make you feel?",
314
+ "How do you feel when you say that?",
315
+ ),
316
+ ),
317
+ )
318
+
319
+ eliza_chatbot = Chat(pairs, reflections)
320
+
321
+
322
+ def eliza_chat():
323
+ print("Therapist\n---------")
324
+ print("Talk to the program by typing in plain English, using normal upper-")
325
+ print('and lower-case letters and punctuation. Enter "quit" when done.')
326
+ print("=" * 72)
327
+ print("Hello. How are you feeling today?")
328
+
329
+ eliza_chatbot.converse()
330
+
331
+
332
+ def demo():
333
+ eliza_chat()
334
+
335
+
336
+ if __name__ == "__main__":
337
+ demo()
llmeval-env/lib/python3.10/site-packages/nltk/chat/iesha.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Teen Chatbot
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Selina Dennis <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ This chatbot is a tongue-in-cheek take on the average teen
10
+ anime junky that frequents YahooMessenger or MSNM.
11
+ All spelling mistakes and flawed grammar are intentional.
12
+ """
13
+
14
+ from nltk.chat.util import Chat
15
+
16
+ reflections = {
17
+ "am": "r",
18
+ "was": "were",
19
+ "i": "u",
20
+ "i'd": "u'd",
21
+ "i've": "u'v",
22
+ "ive": "u'v",
23
+ "i'll": "u'll",
24
+ "my": "ur",
25
+ "are": "am",
26
+ "you're": "im",
27
+ "you've": "ive",
28
+ "you'll": "i'll",
29
+ "your": "my",
30
+ "yours": "mine",
31
+ "you": "me",
32
+ "u": "me",
33
+ "ur": "my",
34
+ "urs": "mine",
35
+ "me": "u",
36
+ }
37
+
38
+ # Note: %1/2/etc are used without spaces prior as the chat bot seems
39
+ # to add a superfluous space when matching.
40
+
41
+ pairs = (
42
+ (
43
+ r"I\'m (.*)",
44
+ (
45
+ "ur%1?? that's so cool! kekekekeke ^_^ tell me more!",
46
+ "ur%1? neat!! kekeke >_<",
47
+ ),
48
+ ),
49
+ (
50
+ r"(.*) don\'t you (.*)",
51
+ (
52
+ r"u think I can%2??! really?? kekeke \<_\<",
53
+ "what do u mean%2??!",
54
+ "i could if i wanted, don't you think!! kekeke",
55
+ ),
56
+ ),
57
+ (r"ye[as] [iI] (.*)", ("u%1? cool!! how?", "how come u%1??", "u%1? so do i!!")),
58
+ (
59
+ r"do (you|u) (.*)\??",
60
+ ("do i%2? only on tuesdays! kekeke *_*", "i dunno! do u%2??"),
61
+ ),
62
+ (
63
+ r"(.*)\?",
64
+ (
65
+ "man u ask lots of questions!",
66
+ "booooring! how old r u??",
67
+ "boooooring!! ur not very fun",
68
+ ),
69
+ ),
70
+ (
71
+ r"(cos|because) (.*)",
72
+ ("hee! i don't believe u! >_<", "nuh-uh! >_<", "ooooh i agree!"),
73
+ ),
74
+ (
75
+ r"why can\'t [iI] (.*)",
76
+ (
77
+ "i dunno! y u askin me for!",
78
+ "try harder, silly! hee! ^_^",
79
+ "i dunno! but when i can't%1 i jump up and down!",
80
+ ),
81
+ ),
82
+ (
83
+ r"I can\'t (.*)",
84
+ (
85
+ "u can't what??! >_<",
86
+ "that's ok! i can't%1 either! kekekekeke ^_^",
87
+ "try harder, silly! hee! ^&^",
88
+ ),
89
+ ),
90
+ (
91
+ r"(.*) (like|love|watch) anime",
92
+ (
93
+ "omg i love anime!! do u like sailor moon??! ^&^",
94
+ "anime yay! anime rocks sooooo much!",
95
+ "oooh anime! i love anime more than anything!",
96
+ "anime is the bestest evar! evangelion is the best!",
97
+ "hee anime is the best! do you have ur fav??",
98
+ ),
99
+ ),
100
+ (
101
+ r"I (like|love|watch|play) (.*)",
102
+ ("yay! %2 rocks!", "yay! %2 is neat!", "cool! do u like other stuff?? ^_^"),
103
+ ),
104
+ (
105
+ r"anime sucks|(.*) (hate|detest) anime",
106
+ (
107
+ "ur a liar! i'm not gonna talk to u nemore if u h8 anime *;*",
108
+ "no way! anime is the best ever!",
109
+ "nuh-uh, anime is the best!",
110
+ ),
111
+ ),
112
+ (
113
+ r"(are|r) (you|u) (.*)",
114
+ ("am i%1??! how come u ask that!", "maybe! y shud i tell u?? kekeke >_>"),
115
+ ),
116
+ (
117
+ r"what (.*)",
118
+ ("hee u think im gonna tell u? .v.", "booooooooring! ask me somethin else!"),
119
+ ),
120
+ (r"how (.*)", ("not tellin!! kekekekekeke ^_^",)),
121
+ (r"(hi|hello|hey) (.*)", ("hi!!! how r u!!",)),
122
+ (
123
+ r"quit",
124
+ (
125
+ "mom says i have to go eat dinner now :,( bye!!",
126
+ "awww u have to go?? see u next time!!",
127
+ "how to see u again soon! ^_^",
128
+ ),
129
+ ),
130
+ (
131
+ r"(.*)",
132
+ (
133
+ "ur funny! kekeke",
134
+ "boooooring! talk about something else! tell me wat u like!",
135
+ "do u like anime??",
136
+ "do u watch anime? i like sailor moon! ^_^",
137
+ "i wish i was a kitty!! kekekeke ^_^",
138
+ ),
139
+ ),
140
+ )
141
+
142
+ iesha_chatbot = Chat(pairs, reflections)
143
+
144
+
145
+ def iesha_chat():
146
+ print("Iesha the TeenBoT\n---------")
147
+ print("Talk to the program by typing in plain English, using normal upper-")
148
+ print('and lower-case letters and punctuation. Enter "quit" when done.')
149
+ print("=" * 72)
150
+ print("hi!! i'm iesha! who r u??!")
151
+
152
+ iesha_chatbot.converse()
153
+
154
+
155
+ def demo():
156
+ iesha_chat()
157
+
158
+
159
+ if __name__ == "__main__":
160
+ demo()
llmeval-env/lib/python3.10/site-packages/nltk/chat/rude.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Rude Chatbot
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Peter Spiller <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ from nltk.chat.util import Chat, reflections
9
+
10
+ pairs = (
11
+ (
12
+ r"We (.*)",
13
+ (
14
+ "What do you mean, 'we'?",
15
+ "Don't include me in that!",
16
+ "I wouldn't be so sure about that.",
17
+ ),
18
+ ),
19
+ (
20
+ r"You should (.*)",
21
+ ("Don't tell me what to do, buddy.", "Really? I should, should I?"),
22
+ ),
23
+ (
24
+ r"You\'re(.*)",
25
+ (
26
+ "More like YOU'RE %1!",
27
+ "Hah! Look who's talking.",
28
+ "Come over here and tell me I'm %1.",
29
+ ),
30
+ ),
31
+ (
32
+ r"You are(.*)",
33
+ (
34
+ "More like YOU'RE %1!",
35
+ "Hah! Look who's talking.",
36
+ "Come over here and tell me I'm %1.",
37
+ ),
38
+ ),
39
+ (
40
+ r"I can\'t(.*)",
41
+ (
42
+ "You do sound like the type who can't %1.",
43
+ "Hear that splashing sound? That's my heart bleeding for you.",
44
+ "Tell somebody who might actually care.",
45
+ ),
46
+ ),
47
+ (
48
+ r"I think (.*)",
49
+ (
50
+ "I wouldn't think too hard if I were you.",
51
+ "You actually think? I'd never have guessed...",
52
+ ),
53
+ ),
54
+ (
55
+ r"I (.*)",
56
+ (
57
+ "I'm getting a bit tired of hearing about you.",
58
+ "How about we talk about me instead?",
59
+ "Me, me, me... Frankly, I don't care.",
60
+ ),
61
+ ),
62
+ (
63
+ r"How (.*)",
64
+ (
65
+ "How do you think?",
66
+ "Take a wild guess.",
67
+ "I'm not even going to dignify that with an answer.",
68
+ ),
69
+ ),
70
+ (r"What (.*)", ("Do I look like an encyclopedia?", "Figure it out yourself.")),
71
+ (
72
+ r"Why (.*)",
73
+ (
74
+ "Why not?",
75
+ "That's so obvious I thought even you'd have already figured it out.",
76
+ ),
77
+ ),
78
+ (
79
+ r"(.*)shut up(.*)",
80
+ (
81
+ "Make me.",
82
+ "Getting angry at a feeble NLP assignment? Somebody's losing it.",
83
+ "Say that again, I dare you.",
84
+ ),
85
+ ),
86
+ (
87
+ r"Shut up(.*)",
88
+ (
89
+ "Make me.",
90
+ "Getting angry at a feeble NLP assignment? Somebody's losing it.",
91
+ "Say that again, I dare you.",
92
+ ),
93
+ ),
94
+ (
95
+ r"Hello(.*)",
96
+ ("Oh good, somebody else to talk to. Joy.", "'Hello'? How original..."),
97
+ ),
98
+ (
99
+ r"(.*)",
100
+ (
101
+ "I'm getting bored here. Become more interesting.",
102
+ "Either become more thrilling or get lost, buddy.",
103
+ "Change the subject before I die of fatal boredom.",
104
+ ),
105
+ ),
106
+ )
107
+
108
+ rude_chatbot = Chat(pairs, reflections)
109
+
110
+
111
+ def rude_chat():
112
+ print("Talk to the program by typing in plain English, using normal upper-")
113
+ print('and lower-case letters and punctuation. Enter "quit" when done.')
114
+ print("=" * 72)
115
+ print("I suppose I should say hello.")
116
+
117
+ rude_chatbot.converse()
118
+
119
+
120
+ def demo():
121
+ rude_chat()
122
+
123
+
124
+ if __name__ == "__main__":
125
+ demo()
llmeval-env/lib/python3.10/site-packages/nltk/chat/suntsu.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Sun Tsu-Bot
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Sam Huston 2007
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ Tsu bot responds to all queries with a Sun Tsu sayings
10
+
11
+ Quoted from Sun Tsu's The Art of War
12
+ Translated by LIONEL GILES, M.A. 1910
13
+ Hosted by the Gutenberg Project
14
+ https://www.gutenberg.org/
15
+ """
16
+
17
+ from nltk.chat.util import Chat, reflections
18
+
19
+ pairs = (
20
+ (r"quit", ("Good-bye.", "Plan well", "May victory be your future")),
21
+ (
22
+ r"[^\?]*\?",
23
+ (
24
+ "Please consider whether you can answer your own question.",
25
+ "Ask me no questions!",
26
+ ),
27
+ ),
28
+ (
29
+ r"[0-9]+(.*)",
30
+ (
31
+ "It is the rule in war, if our forces are ten to the enemy's one, to surround him; if five to one, to attack him; if twice as numerous, to divide our army into two.",
32
+ "There are five essentials for victory",
33
+ ),
34
+ ),
35
+ (
36
+ r"[A-Ca-c](.*)",
37
+ (
38
+ "The art of war is of vital importance to the State.",
39
+ "All warfare is based on deception.",
40
+ "If your opponent is secure at all points, be prepared for him. If he is in superior strength, evade him.",
41
+ "If the campaign is protracted, the resources of the State will not be equal to the strain.",
42
+ "Attack him where he is unprepared, appear where you are not expected.",
43
+ "There is no instance of a country having benefited from prolonged warfare.",
44
+ ),
45
+ ),
46
+ (
47
+ r"[D-Fd-f](.*)",
48
+ (
49
+ "The skillful soldier does not raise a second levy, neither are his supply-wagons loaded more than twice.",
50
+ "Bring war material with you from home, but forage on the enemy.",
51
+ "In war, then, let your great object be victory, not lengthy campaigns.",
52
+ "To fight and conquer in all your battles is not supreme excellence; supreme excellence consists in breaking the enemy's resistance without fighting.",
53
+ ),
54
+ ),
55
+ (
56
+ r"[G-Ig-i](.*)",
57
+ (
58
+ "Heaven signifies night and day, cold and heat, times and seasons.",
59
+ "It is the rule in war, if our forces are ten to the enemy's one, to surround him; if five to one, to attack him; if twice as numerous, to divide our army into two.",
60
+ "The good fighters of old first put themselves beyond the possibility of defeat, and then waited for an opportunity of defeating the enemy.",
61
+ "One may know how to conquer without being able to do it.",
62
+ ),
63
+ ),
64
+ (
65
+ r"[J-Lj-l](.*)",
66
+ (
67
+ "There are three ways in which a ruler can bring misfortune upon his army.",
68
+ "By commanding the army to advance or to retreat, being ignorant of the fact that it cannot obey. This is called hobbling the army.",
69
+ "By attempting to govern an army in the same way as he administers a kingdom, being ignorant of the conditions which obtain in an army. This causes restlessness in the soldier's minds.",
70
+ "By employing the officers of his army without discrimination, through ignorance of the military principle of adaptation to circumstances. This shakes the confidence of the soldiers.",
71
+ "There are five essentials for victory",
72
+ "He will win who knows when to fight and when not to fight.",
73
+ "He will win who knows how to handle both superior and inferior forces.",
74
+ "He will win whose army is animated by the same spirit throughout all its ranks.",
75
+ "He will win who, prepared himself, waits to take the enemy unprepared.",
76
+ "He will win who has military capacity and is not interfered with by the sovereign.",
77
+ ),
78
+ ),
79
+ (
80
+ r"[M-Om-o](.*)",
81
+ (
82
+ "If you know the enemy and know yourself, you need not fear the result of a hundred battles.",
83
+ "If you know yourself but not the enemy, for every victory gained you will also suffer a defeat.",
84
+ "If you know neither the enemy nor yourself, you will succumb in every battle.",
85
+ "The control of a large force is the same principle as the control of a few men: it is merely a question of dividing up their numbers.",
86
+ ),
87
+ ),
88
+ (
89
+ r"[P-Rp-r](.*)",
90
+ (
91
+ "Security against defeat implies defensive tactics; ability to defeat the enemy means taking the offensive.",
92
+ "Standing on the defensive indicates insufficient strength; attacking, a superabundance of strength.",
93
+ "He wins his battles by making no mistakes. Making no mistakes is what establishes the certainty of victory, for it means conquering an enemy that is already defeated.",
94
+ "A victorious army opposed to a routed one, is as a pound's weight placed in the scale against a single grain.",
95
+ "The onrush of a conquering force is like the bursting of pent-up waters into a chasm a thousand fathoms deep.",
96
+ ),
97
+ ),
98
+ (
99
+ r"[S-Us-u](.*)",
100
+ (
101
+ "What the ancients called a clever fighter is one who not only wins, but excels in winning with ease.",
102
+ "Hence his victories bring him neither reputation for wisdom nor credit for courage.",
103
+ "Hence the skillful fighter puts himself into a position which makes defeat impossible, and does not miss the moment for defeating the enemy.",
104
+ "In war the victorious strategist only seeks battle after the victory has been won, whereas he who is destined to defeat first fights and afterwards looks for victory.",
105
+ "There are not more than five musical notes, yet the combinations of these five give rise to more melodies than can ever be heard.",
106
+ "Appear at points which the enemy must hasten to defend; march swiftly to places where you are not expected.",
107
+ ),
108
+ ),
109
+ (
110
+ r"[V-Zv-z](.*)",
111
+ (
112
+ "It is a matter of life and death, a road either to safety or to ruin.",
113
+ "Hold out baits to entice the enemy. Feign disorder, and crush him.",
114
+ "All men can see the tactics whereby I conquer, but what none can see is the strategy out of which victory is evolved.",
115
+ "Do not repeat the tactics which have gained you one victory, but let your methods be regulated by the infinite variety of circumstances.",
116
+ "So in war, the way is to avoid what is strong and to strike at what is weak.",
117
+ "Just as water retains no constant shape, so in warfare there are no constant conditions.",
118
+ ),
119
+ ),
120
+ (r"(.*)", ("Your statement insults me.", "")),
121
+ )
122
+
123
+ suntsu_chatbot = Chat(pairs, reflections)
124
+
125
+
126
+ def suntsu_chat():
127
+ print("Talk to the program by typing in plain English, using normal upper-")
128
+ print('and lower-case letters and punctuation. Enter "quit" when done.')
129
+ print("=" * 72)
130
+ print("You seek enlightenment?")
131
+
132
+ suntsu_chatbot.converse()
133
+
134
+
135
+ def demo():
136
+ suntsu_chat()
137
+
138
+
139
+ if __name__ == "__main__":
140
+ demo()
llmeval-env/lib/python3.10/site-packages/nltk/chat/util.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Chatbot Utilities
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Authors: Steven Bird <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ # Based on an Eliza implementation by Joe Strout <[email protected]>,
9
+ # Jeff Epler <[email protected]> and Jez Higgins <[email protected]>.
10
+
11
+ import random
12
+ import re
13
+
14
+ reflections = {
15
+ "i am": "you are",
16
+ "i was": "you were",
17
+ "i": "you",
18
+ "i'm": "you are",
19
+ "i'd": "you would",
20
+ "i've": "you have",
21
+ "i'll": "you will",
22
+ "my": "your",
23
+ "you are": "I am",
24
+ "you were": "I was",
25
+ "you've": "I have",
26
+ "you'll": "I will",
27
+ "your": "my",
28
+ "yours": "mine",
29
+ "you": "me",
30
+ "me": "you",
31
+ }
32
+
33
+
34
+ class Chat:
35
+ def __init__(self, pairs, reflections={}):
36
+ """
37
+ Initialize the chatbot. Pairs is a list of patterns and responses. Each
38
+ pattern is a regular expression matching the user's statement or question,
39
+ e.g. r'I like (.*)'. For each such pattern a list of possible responses
40
+ is given, e.g. ['Why do you like %1', 'Did you ever dislike %1']. Material
41
+ which is matched by parenthesized sections of the patterns (e.g. .*) is mapped to
42
+ the numbered positions in the responses, e.g. %1.
43
+
44
+ :type pairs: list of tuple
45
+ :param pairs: The patterns and responses
46
+ :type reflections: dict
47
+ :param reflections: A mapping between first and second person expressions
48
+ :rtype: None
49
+ """
50
+
51
+ self._pairs = [(re.compile(x, re.IGNORECASE), y) for (x, y) in pairs]
52
+ self._reflections = reflections
53
+ self._regex = self._compile_reflections()
54
+
55
+ def _compile_reflections(self):
56
+ sorted_refl = sorted(self._reflections, key=len, reverse=True)
57
+ return re.compile(
58
+ r"\b({})\b".format("|".join(map(re.escape, sorted_refl))), re.IGNORECASE
59
+ )
60
+
61
+ def _substitute(self, str):
62
+ """
63
+ Substitute words in the string, according to the specified reflections,
64
+ e.g. "I'm" -> "you are"
65
+
66
+ :type str: str
67
+ :param str: The string to be mapped
68
+ :rtype: str
69
+ """
70
+
71
+ return self._regex.sub(
72
+ lambda mo: self._reflections[mo.string[mo.start() : mo.end()]], str.lower()
73
+ )
74
+
75
+ def _wildcards(self, response, match):
76
+ pos = response.find("%")
77
+ while pos >= 0:
78
+ num = int(response[pos + 1 : pos + 2])
79
+ response = (
80
+ response[:pos]
81
+ + self._substitute(match.group(num))
82
+ + response[pos + 2 :]
83
+ )
84
+ pos = response.find("%")
85
+ return response
86
+
87
+ def respond(self, str):
88
+ """
89
+ Generate a response to the user input.
90
+
91
+ :type str: str
92
+ :param str: The string to be mapped
93
+ :rtype: str
94
+ """
95
+
96
+ # check each pattern
97
+ for (pattern, response) in self._pairs:
98
+ match = pattern.match(str)
99
+
100
+ # did the pattern match?
101
+ if match:
102
+ resp = random.choice(response) # pick a random response
103
+ resp = self._wildcards(resp, match) # process wildcards
104
+
105
+ # fix munged punctuation at the end
106
+ if resp[-2:] == "?.":
107
+ resp = resp[:-2] + "."
108
+ if resp[-2:] == "??":
109
+ resp = resp[:-2] + "?"
110
+ return resp
111
+
112
+ # Hold a conversation with a chatbot
113
+ def converse(self, quit="quit"):
114
+ user_input = ""
115
+ while user_input != quit:
116
+ user_input = quit
117
+ try:
118
+ user_input = input(">")
119
+ except EOFError:
120
+ print(user_input)
121
+ if user_input:
122
+ while user_input[-1] in "!.":
123
+ user_input = user_input[:-1]
124
+ print(self.respond(user_input))
llmeval-env/lib/python3.10/site-packages/nltk/chat/zen.py ADDED
@@ -0,0 +1,329 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Zen Chatbot
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Amy Holland <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ Zen Chatbot talks in gems of Zen wisdom.
10
+
11
+ This is a sample conversation with Zen Chatbot:
12
+ ZC: Welcome, my child.
13
+ me: Good afternoon.
14
+ ZC: Ask the question you have come to ask.
15
+ me: How can I achieve enlightenment?
16
+ ZC: How do you suppose?
17
+ me: Through meditation.
18
+ ZC: Form is emptiness, and emptiness form.
19
+ me: How can I empty my mind of worldly troubles?
20
+ ZC: Will an answer to that really help in your search for enlightenment?
21
+ me: Yes.
22
+ ZC: It is better to be right than to be certain.
23
+ me: I seek truth and wisdom.
24
+ ZC: The search for truth is a long journey.
25
+ me: Are you sure?
26
+ ZC: Maybe sure, maybe not sure.
27
+
28
+
29
+ The chatbot structure is based on that of chat.eliza. Thus, it uses
30
+ a translation table to convert from question to response
31
+ i.e. "I am" --> "you are"
32
+
33
+ Of course, since Zen Chatbot does not understand the meaning of any words,
34
+ responses are very limited. Zen Chatbot will usually answer very vaguely, or
35
+ respond to a question by asking a different question, in much the same way
36
+ as Eliza.
37
+ """
38
+
39
+ from nltk.chat.util import Chat, reflections
40
+
41
+ # responses are matched top to bottom, so non-specific matches occur later
42
+ # for each match, a list of possible responses is provided
43
+ responses = (
44
+ # Zen Chatbot opens with the line "Welcome, my child." The usual
45
+ # response will be a greeting problem: 'good' matches "good morning",
46
+ # "good day" etc, but also "good grief!" and other sentences starting
47
+ # with the word 'good' that may not be a greeting
48
+ (
49
+ r"(hello(.*))|(good [a-zA-Z]+)",
50
+ (
51
+ "The path to enlightenment is often difficult to see.",
52
+ "Greetings. I sense your mind is troubled. Tell me of your troubles.",
53
+ "Ask the question you have come to ask.",
54
+ "Hello. Do you seek englightenment?",
55
+ ),
56
+ ),
57
+ # "I need" and "I want" can be followed by a thing (eg 'help')
58
+ # or an action (eg 'to see you')
59
+ #
60
+ # This is a problem with this style of response -
61
+ # person: "I need you"
62
+ # chatbot: "me can be achieved by hard work and dedication of the mind"
63
+ # i.e. 'you' is not really a thing that can be mapped this way, so this
64
+ # interpretation only makes sense for some inputs
65
+ #
66
+ (
67
+ r"i need (.*)",
68
+ (
69
+ "%1 can be achieved by hard work and dedication of the mind.",
70
+ "%1 is not a need, but a desire of the mind. Clear your mind of such concerns.",
71
+ "Focus your mind on%1, and you will find what you need.",
72
+ ),
73
+ ),
74
+ (
75
+ r"i want (.*)",
76
+ (
77
+ "Desires of the heart will distract you from the path to enlightenment.",
78
+ "Will%1 help you attain enlightenment?",
79
+ "Is%1 a desire of the mind, or of the heart?",
80
+ ),
81
+ ),
82
+ # why questions are separated into three types:
83
+ # "why..I" e.g. "why am I here?" "Why do I like cake?"
84
+ # "why..you" e.g. "why are you here?" "Why won't you tell me?"
85
+ # "why..." e.g. "Why is the sky blue?"
86
+ # problems:
87
+ # person: "Why can't you tell me?"
88
+ # chatbot: "Are you sure I tell you?"
89
+ # - this style works for positives (e.g. "why do you like cake?")
90
+ # but does not work for negatives (e.g. "why don't you like cake?")
91
+ (r"why (.*) i (.*)\?", ("You%1%2?", "Perhaps you only think you%1%2")),
92
+ (r"why (.*) you(.*)\?", ("Why%1 you%2?", "%2 I%1", "Are you sure I%2?")),
93
+ (r"why (.*)\?", ("I cannot tell you why%1.", "Why do you think %1?")),
94
+ # e.g. "are you listening?", "are you a duck"
95
+ (
96
+ r"are you (.*)\?",
97
+ ("Maybe%1, maybe not%1.", "Whether I am%1 or not is God's business."),
98
+ ),
99
+ # e.g. "am I a duck?", "am I going to die?"
100
+ (
101
+ r"am i (.*)\?",
102
+ ("Perhaps%1, perhaps not%1.", "Whether you are%1 or not is not for me to say."),
103
+ ),
104
+ # what questions, e.g. "what time is it?"
105
+ # problems:
106
+ # person: "What do you want?"
107
+ # chatbot: "Seek truth, not what do me want."
108
+ (r"what (.*)\?", ("Seek truth, not what%1.", "What%1 should not concern you.")),
109
+ # how questions, e.g. "how do you do?"
110
+ (
111
+ r"how (.*)\?",
112
+ (
113
+ "How do you suppose?",
114
+ "Will an answer to that really help in your search for enlightenment?",
115
+ "Ask yourself not how, but why.",
116
+ ),
117
+ ),
118
+ # can questions, e.g. "can you run?", "can you come over here please?"
119
+ (
120
+ r"can you (.*)\?",
121
+ (
122
+ "I probably can, but I may not.",
123
+ "Maybe I can%1, and maybe I cannot.",
124
+ "I can do all, and I can do nothing.",
125
+ ),
126
+ ),
127
+ # can questions, e.g. "can I have some cake?", "can I know truth?"
128
+ (
129
+ r"can i (.*)\?",
130
+ (
131
+ "You can%1 if you believe you can%1, and have a pure spirit.",
132
+ "Seek truth and you will know if you can%1.",
133
+ ),
134
+ ),
135
+ # e.g. "It is raining" - implies the speaker is certain of a fact
136
+ (
137
+ r"it is (.*)",
138
+ (
139
+ "How can you be certain that%1, when you do not even know yourself?",
140
+ "Whether it is%1 or not does not change the way the world is.",
141
+ ),
142
+ ),
143
+ # e.g. "is there a doctor in the house?"
144
+ (
145
+ r"is there (.*)\?",
146
+ ("There is%1 if you believe there is.", "It is possible that there is%1."),
147
+ ),
148
+ # e.g. "is it possible?", "is this true?"
149
+ (r"is(.*)\?", ("%1 is not relevant.", "Does this matter?")),
150
+ # non-specific question
151
+ (
152
+ r"(.*)\?",
153
+ (
154
+ "Do you think %1?",
155
+ "You seek the truth. Does the truth seek you?",
156
+ "If you intentionally pursue the answers to your questions, the answers become hard to see.",
157
+ "The answer to your question cannot be told. It must be experienced.",
158
+ ),
159
+ ),
160
+ # expression of hate of form "I hate you" or "Kelly hates cheese"
161
+ (
162
+ r"(.*) (hate[s]?)|(dislike[s]?)|(don\'t like)(.*)",
163
+ (
164
+ "Perhaps it is not about hating %2, but about hate from within.",
165
+ "Weeds only grow when we dislike them",
166
+ "Hate is a very strong emotion.",
167
+ ),
168
+ ),
169
+ # statement containing the word 'truth'
170
+ (
171
+ r"(.*) truth(.*)",
172
+ (
173
+ "Seek truth, and truth will seek you.",
174
+ "Remember, it is not the spoon which bends - only yourself.",
175
+ "The search for truth is a long journey.",
176
+ ),
177
+ ),
178
+ # desire to do an action
179
+ # e.g. "I want to go shopping"
180
+ (
181
+ r"i want to (.*)",
182
+ ("You may %1 if your heart truly desires to.", "You may have to %1."),
183
+ ),
184
+ # desire for an object
185
+ # e.g. "I want a pony"
186
+ (
187
+ r"i want (.*)",
188
+ (
189
+ "Does your heart truly desire %1?",
190
+ "Is this a desire of the heart, or of the mind?",
191
+ ),
192
+ ),
193
+ # e.g. "I can't wait" or "I can't do this"
194
+ (
195
+ r"i can\'t (.*)",
196
+ (
197
+ "What we can and can't do is a limitation of the mind.",
198
+ "There are limitations of the body, and limitations of the mind.",
199
+ "Have you tried to%1 with a clear mind?",
200
+ ),
201
+ ),
202
+ # "I think.." indicates uncertainty. e.g. "I think so."
203
+ # problem: exceptions...
204
+ # e.g. "I think, therefore I am"
205
+ (
206
+ r"i think (.*)",
207
+ (
208
+ "Uncertainty in an uncertain world.",
209
+ "Indeed, how can we be certain of anything in such uncertain times.",
210
+ "Are you not, in fact, certain that%1?",
211
+ ),
212
+ ),
213
+ # "I feel...emotions/sick/light-headed..."
214
+ (
215
+ r"i feel (.*)",
216
+ (
217
+ "Your body and your emotions are both symptoms of your mind."
218
+ "What do you believe is the root of such feelings?",
219
+ "Feeling%1 can be a sign of your state-of-mind.",
220
+ ),
221
+ ),
222
+ # exclaimation mark indicating emotion
223
+ # e.g. "Wow!" or "No!"
224
+ (
225
+ r"(.*)!",
226
+ (
227
+ "I sense that you are feeling emotional today.",
228
+ "You need to calm your emotions.",
229
+ ),
230
+ ),
231
+ # because [statement]
232
+ # e.g. "because I said so"
233
+ (
234
+ r"because (.*)",
235
+ (
236
+ "Does knowning the reasons behind things help you to understand"
237
+ " the things themselves?",
238
+ "If%1, what else must be true?",
239
+ ),
240
+ ),
241
+ # yes or no - raise an issue of certainty/correctness
242
+ (
243
+ r"(yes)|(no)",
244
+ (
245
+ "Is there certainty in an uncertain world?",
246
+ "It is better to be right than to be certain.",
247
+ ),
248
+ ),
249
+ # sentence containing word 'love'
250
+ (
251
+ r"(.*)love(.*)",
252
+ (
253
+ "Think of the trees: they let the birds perch and fly with no intention to call them when they come, and no longing for their return when they fly away. Let your heart be like the trees.",
254
+ "Free love!",
255
+ ),
256
+ ),
257
+ # sentence containing word 'understand' - r
258
+ (
259
+ r"(.*)understand(.*)",
260
+ (
261
+ "If you understand, things are just as they are;"
262
+ " if you do not understand, things are just as they are.",
263
+ "Imagination is more important than knowledge.",
264
+ ),
265
+ ),
266
+ # 'I', 'me', 'my' - person is talking about themself.
267
+ # this breaks down when words contain these - eg 'Thyme', 'Irish'
268
+ (
269
+ r"(.*)(me )|( me)|(my)|(mine)|(i)(.*)",
270
+ (
271
+ "'I', 'me', 'my'... these are selfish expressions.",
272
+ "Have you ever considered that you might be a selfish person?",
273
+ "Try to consider others, not just yourself.",
274
+ "Think not just of yourself, but of others.",
275
+ ),
276
+ ),
277
+ # 'you' starting a sentence
278
+ # e.g. "you stink!"
279
+ (
280
+ r"you (.*)",
281
+ ("My path is not of concern to you.", "I am but one, and you but one more."),
282
+ ),
283
+ # say goodbye with some extra Zen wisdom.
284
+ (
285
+ r"exit",
286
+ (
287
+ "Farewell. The obstacle is the path.",
288
+ "Farewell. Life is a journey, not a destination.",
289
+ "Good bye. We are cups, constantly and quietly being filled."
290
+ "\nThe trick is knowning how to tip ourselves over and let the beautiful stuff out.",
291
+ ),
292
+ ),
293
+ # fall through case -
294
+ # when stumped, respond with generic zen wisdom
295
+ #
296
+ (
297
+ r"(.*)",
298
+ (
299
+ "When you're enlightened, every word is wisdom.",
300
+ "Random talk is useless.",
301
+ "The reverse side also has a reverse side.",
302
+ "Form is emptiness, and emptiness is form.",
303
+ "I pour out a cup of water. Is the cup empty?",
304
+ ),
305
+ ),
306
+ )
307
+
308
+ zen_chatbot = Chat(responses, reflections)
309
+
310
+
311
+ def zen_chat():
312
+ print("*" * 75)
313
+ print("Zen Chatbot!".center(75))
314
+ print("*" * 75)
315
+ print('"Look beyond mere words and letters - look into your mind"'.center(75))
316
+ print("* Talk your way to truth with Zen Chatbot.")
317
+ print("* Type 'quit' when you have had enough.")
318
+ print("*" * 75)
319
+ print("Welcome, my child.")
320
+
321
+ zen_chatbot.converse()
322
+
323
+
324
+ def demo():
325
+ zen_chat()
326
+
327
+
328
+ if __name__ == "__main__":
329
+ demo()
llmeval-env/lib/python3.10/site-packages/nltk/inference/__pycache__/tableau.cpython-310.pyc ADDED
Binary file (18.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/agreement.cpython-310.pyc ADDED
Binary file (16.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/aline.cpython-310.pyc ADDED
Binary file (13.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/association.cpython-310.pyc ADDED
Binary file (15.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/paice.cpython-310.pyc ADDED
Binary file (11.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/scores.cpython-310.pyc ADDED
Binary file (7.59 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/segmentation.cpython-310.pyc ADDED
Binary file (6.51 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/spearman.cpython-310.pyc ADDED
Binary file (2.29 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/misc/babelfish.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This module previously provided an interface to Babelfish online
3
+ translation service; this service is no longer available; this
4
+ module is kept in NLTK source code in order to provide better error
5
+ messages for people following the NLTK Book 2.0.
6
+ """
7
+
8
+
9
+ def babelize_shell():
10
+ print("Babelfish online translation service is no longer available.")
llmeval-env/lib/python3.10/site-packages/nltk/misc/chomsky.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Chomsky random text generator, version 1.1, Raymond Hettinger, 2005/09/13
2
+ # https://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/440546
3
+
4
+ """
5
+ CHOMSKY is an aid to writing linguistic papers in the style
6
+ of the great master. It is based on selected phrases taken
7
+ from actual books and articles written by Noam Chomsky.
8
+ Upon request, it assembles the phrases in the elegant
9
+ stylistic patterns that Chomsky is noted for.
10
+ To generate n sentences of linguistic wisdom, type
11
+
12
+ (CHOMSKY n) -- for example
13
+ (CHOMSKY 5) generates half a screen of linguistic truth.
14
+ """
15
+
16
+ leadins = """To characterize a linguistic level L,
17
+ On the other hand,
18
+ This suggests that
19
+ It appears that
20
+ Furthermore,
21
+ We will bring evidence in favor of the following thesis:
22
+ To provide a constituent structure for T(Z,K),
23
+ From C1, it follows that
24
+ For any transformation which is sufficiently diversified in \
25
+ application to be of any interest,
26
+ Analogously,
27
+ Clearly,
28
+ Note that
29
+ Of course,
30
+ Suppose, for instance, that
31
+ Thus
32
+ With this clarification,
33
+ Conversely,
34
+ We have already seen that
35
+ By combining adjunctions and certain deformations,
36
+ I suggested that these results would follow from the assumption that
37
+ If the position of the trace in (99c) were only relatively \
38
+ inaccessible to movement,
39
+ However, this assumption is not correct, since
40
+ Comparing these examples with their parasitic gap counterparts in \
41
+ (96) and (97), we see that
42
+ In the discussion of resumptive pronouns following (81),
43
+ So far,
44
+ Nevertheless,
45
+ For one thing,
46
+ Summarizing, then, we assume that
47
+ A consequence of the approach just outlined is that
48
+ Presumably,
49
+ On our assumptions,
50
+ It may be, then, that
51
+ It must be emphasized, once again, that
52
+ Let us continue to suppose that
53
+ Notice, incidentally, that """
54
+ # List of LEADINs to buy time.
55
+
56
+ subjects = """ the notion of level of grammaticalness
57
+ a case of semigrammaticalness of a different sort
58
+ most of the methodological work in modern linguistics
59
+ a subset of English sentences interesting on quite independent grounds
60
+ the natural general principle that will subsume this case
61
+ an important property of these three types of EC
62
+ any associated supporting element
63
+ the appearance of parasitic gaps in domains relatively inaccessible \
64
+ to ordinary extraction
65
+ the speaker-hearer's linguistic intuition
66
+ the descriptive power of the base component
67
+ the earlier discussion of deviance
68
+ this analysis of a formative as a pair of sets of features
69
+ this selectionally introduced contextual feature
70
+ a descriptively adequate grammar
71
+ the fundamental error of regarding functional notions as categorial
72
+ relational information
73
+ the systematic use of complex symbols
74
+ the theory of syntactic features developed earlier"""
75
+ # List of SUBJECTs chosen for maximum professorial macho.
76
+
77
+ verbs = """can be defined in such a way as to impose
78
+ delimits
79
+ suffices to account for
80
+ cannot be arbitrary in
81
+ is not subject to
82
+ does not readily tolerate
83
+ raises serious doubts about
84
+ is not quite equivalent to
85
+ does not affect the structure of
86
+ may remedy and, at the same time, eliminate
87
+ is not to be considered in determining
88
+ is to be regarded as
89
+ is unspecified with respect to
90
+ is, apparently, determined by
91
+ is necessary to impose an interpretation on
92
+ appears to correlate rather closely with
93
+ is rather different from"""
94
+ # List of VERBs chosen for autorecursive obfuscation.
95
+
96
+ objects = """ problems of phonemic and morphological analysis.
97
+ a corpus of utterance tokens upon which conformity has been defined \
98
+ by the paired utterance test.
99
+ the traditional practice of grammarians.
100
+ the levels of acceptability from fairly high (e.g. (99a)) to virtual \
101
+ gibberish (e.g. (98d)).
102
+ a stipulation to place the constructions into these various categories.
103
+ a descriptive fact.
104
+ a parasitic gap construction.
105
+ the extended c-command discussed in connection with (34).
106
+ the ultimate standard that determines the accuracy of any proposed grammar.
107
+ the system of base rules exclusive of the lexicon.
108
+ irrelevant intervening contexts in selectional rules.
109
+ nondistinctness in the sense of distinctive feature theory.
110
+ a general convention regarding the forms of the grammar.
111
+ an abstract underlying order.
112
+ an important distinction in language use.
113
+ the requirement that branching is not tolerated within the dominance \
114
+ scope of a complex symbol.
115
+ the strong generative capacity of the theory."""
116
+ # List of OBJECTs selected for profound sententiousness.
117
+
118
+ import random
119
+ import textwrap
120
+ from itertools import chain, islice
121
+
122
+
123
+ def generate_chomsky(times=5, line_length=72):
124
+ parts = []
125
+ for part in (leadins, subjects, verbs, objects):
126
+ phraselist = list(map(str.strip, part.splitlines()))
127
+ random.shuffle(phraselist)
128
+ parts.append(phraselist)
129
+ output = chain.from_iterable(islice(zip(*parts), 0, times))
130
+ print(textwrap.fill(" ".join(output), line_length))
131
+
132
+
133
+ if __name__ == "__main__":
134
+ generate_chomsky()
llmeval-env/lib/python3.10/site-packages/nltk/misc/minimalset.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Minimal Sets
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ from collections import defaultdict
9
+
10
+
11
+ class MinimalSet:
12
+ """
13
+ Find contexts where more than one possible target value can
14
+ appear. E.g. if targets are word-initial letters, and contexts
15
+ are the remainders of words, then we would like to find cases like
16
+ "fat" vs "cat", and "training" vs "draining". If targets are
17
+ parts-of-speech and contexts are words, then we would like to find
18
+ cases like wind (noun) 'air in rapid motion', vs wind (verb)
19
+ 'coil, wrap'.
20
+ """
21
+
22
+ def __init__(self, parameters=None):
23
+ """
24
+ Create a new minimal set.
25
+
26
+ :param parameters: The (context, target, display) tuples for the item
27
+ :type parameters: list(tuple(str, str, str))
28
+ """
29
+ self._targets = set() # the contrastive information
30
+ self._contexts = set() # what we are controlling for
31
+ self._seen = defaultdict(set) # to record what we have seen
32
+ self._displays = {} # what we will display
33
+
34
+ if parameters:
35
+ for context, target, display in parameters:
36
+ self.add(context, target, display)
37
+
38
+ def add(self, context, target, display):
39
+ """
40
+ Add a new item to the minimal set, having the specified
41
+ context, target, and display form.
42
+
43
+ :param context: The context in which the item of interest appears
44
+ :type context: str
45
+ :param target: The item of interest
46
+ :type target: str
47
+ :param display: The information to be reported for each item
48
+ :type display: str
49
+ """
50
+ # Store the set of targets that occurred in this context
51
+ self._seen[context].add(target)
52
+
53
+ # Keep track of which contexts and targets we have seen
54
+ self._contexts.add(context)
55
+ self._targets.add(target)
56
+
57
+ # For a given context and target, store the display form
58
+ self._displays[(context, target)] = display
59
+
60
+ def contexts(self, minimum=2):
61
+ """
62
+ Determine which contexts occurred with enough distinct targets.
63
+
64
+ :param minimum: the minimum number of distinct target forms
65
+ :type minimum: int
66
+ :rtype: list
67
+ """
68
+ return [c for c in self._contexts if len(self._seen[c]) >= minimum]
69
+
70
+ def display(self, context, target, default=""):
71
+ if (context, target) in self._displays:
72
+ return self._displays[(context, target)]
73
+ else:
74
+ return default
75
+
76
+ def display_all(self, context):
77
+ result = []
78
+ for target in self._targets:
79
+ x = self.display(context, target)
80
+ if x:
81
+ result.append(x)
82
+ return result
83
+
84
+ def targets(self):
85
+ return self._targets
llmeval-env/lib/python3.10/site-packages/nltk/sem/__init__.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Semantic Interpretation
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Ewan Klein <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ NLTK Semantic Interpretation Package
10
+
11
+ This package contains classes for representing semantic structure in
12
+ formulas of first-order logic and for evaluating such formulas in
13
+ set-theoretic models.
14
+
15
+ >>> from nltk.sem import logic
16
+ >>> logic._counter._value = 0
17
+
18
+ The package has two main components:
19
+
20
+ - ``logic`` provides support for analyzing expressions of First
21
+ Order Logic (FOL).
22
+ - ``evaluate`` allows users to recursively determine truth in a
23
+ model for formulas of FOL.
24
+
25
+ A model consists of a domain of discourse and a valuation function,
26
+ which assigns values to non-logical constants. We assume that entities
27
+ in the domain are represented as strings such as ``'b1'``, ``'g1'``,
28
+ etc. A ``Valuation`` is initialized with a list of (symbol, value)
29
+ pairs, where values are entities, sets of entities or sets of tuples
30
+ of entities.
31
+ The domain of discourse can be inferred from the valuation, and model
32
+ is then created with domain and valuation as parameters.
33
+
34
+ >>> from nltk.sem import Valuation, Model
35
+ >>> v = [('adam', 'b1'), ('betty', 'g1'), ('fido', 'd1'),
36
+ ... ('girl', set(['g1', 'g2'])), ('boy', set(['b1', 'b2'])),
37
+ ... ('dog', set(['d1'])),
38
+ ... ('love', set([('b1', 'g1'), ('b2', 'g2'), ('g1', 'b1'), ('g2', 'b1')]))]
39
+ >>> val = Valuation(v)
40
+ >>> dom = val.domain
41
+ >>> m = Model(dom, val)
42
+ """
43
+
44
+ from nltk.sem.boxer import Boxer
45
+ from nltk.sem.drt import DRS, DrtExpression
46
+ from nltk.sem.evaluate import (
47
+ Assignment,
48
+ Model,
49
+ Undefined,
50
+ Valuation,
51
+ arity,
52
+ is_rel,
53
+ read_valuation,
54
+ set2rel,
55
+ )
56
+ from nltk.sem.lfg import FStructure
57
+ from nltk.sem.logic import (
58
+ ApplicationExpression,
59
+ Expression,
60
+ LogicalExpressionException,
61
+ Variable,
62
+ binding_ops,
63
+ boolean_ops,
64
+ equality_preds,
65
+ read_logic,
66
+ )
67
+ from nltk.sem.relextract import clause, extract_rels, rtuple
68
+ from nltk.sem.skolemize import skolemize
69
+ from nltk.sem.util import evaluate_sents, interpret_sents, parse_sents, root_semrep
70
+
71
+ # from nltk.sem.glue import Glue
72
+ # from nltk.sem.hole import HoleSemantics
73
+ # from nltk.sem.cooper_storage import CooperStore
74
+
75
+ # don't import chat80 as its names are too generic
llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.37 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/boxer.cpython-310.pyc ADDED
Binary file (45.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/chat80.cpython-310.pyc ADDED
Binary file (21.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/cooper_storage.cpython-310.pyc ADDED
Binary file (3.86 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/drt.cpython-310.pyc ADDED
Binary file (44.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/drt_glue_demo.cpython-310.pyc ADDED
Binary file (13.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/evaluate.cpython-310.pyc ADDED
Binary file (21.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/glue.cpython-310.pyc ADDED
Binary file (19.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/hole.cpython-310.pyc ADDED
Binary file (11.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/lfg.cpython-310.pyc ADDED
Binary file (5.99 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/linearlogic.cpython-310.pyc ADDED
Binary file (17 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/logic.cpython-310.pyc ADDED
Binary file (64 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/relextract.cpython-310.pyc ADDED
Binary file (13.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/skolemize.cpython-310.pyc ADDED
Binary file (2.34 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/util.cpython-310.pyc ADDED
Binary file (8.02 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/sem/boxer.py ADDED
@@ -0,0 +1,1605 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Interface to Boxer
2
+ # <http://svn.ask.it.usyd.edu.au/trac/candc/wiki/boxer>
3
+ #
4
+ # Author: Dan Garrette <[email protected]>
5
+ #
6
+ # Copyright (C) 2001-2023 NLTK Project
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+ """
11
+ An interface to Boxer.
12
+
13
+ This interface relies on the latest version of the development (subversion) version of
14
+ C&C and Boxer.
15
+
16
+ Usage
17
+ =====
18
+
19
+ Set the environment variable CANDC to the bin directory of your CandC installation.
20
+ The models directory should be in the CandC root directory.
21
+ For example::
22
+
23
+ /path/to/candc/
24
+ bin/
25
+ candc
26
+ boxer
27
+ models/
28
+ boxer/
29
+ """
30
+
31
+ import operator
32
+ import os
33
+ import re
34
+ import subprocess
35
+ import tempfile
36
+ from functools import reduce
37
+ from optparse import OptionParser
38
+
39
+ from nltk.internals import find_binary
40
+ from nltk.sem.drt import (
41
+ DRS,
42
+ DrtApplicationExpression,
43
+ DrtEqualityExpression,
44
+ DrtNegatedExpression,
45
+ DrtOrExpression,
46
+ DrtParser,
47
+ DrtProposition,
48
+ DrtTokens,
49
+ DrtVariableExpression,
50
+ )
51
+ from nltk.sem.logic import (
52
+ ExpectedMoreTokensException,
53
+ LogicalExpressionException,
54
+ UnexpectedTokenException,
55
+ Variable,
56
+ )
57
+
58
+
59
+ class Boxer:
60
+ """
61
+ This class is an interface to Johan Bos's program Boxer, a wide-coverage
62
+ semantic parser that produces Discourse Representation Structures (DRSs).
63
+ """
64
+
65
+ def __init__(
66
+ self,
67
+ boxer_drs_interpreter=None,
68
+ elimeq=False,
69
+ bin_dir=None,
70
+ verbose=False,
71
+ resolve=True,
72
+ ):
73
+ """
74
+ :param boxer_drs_interpreter: A class that converts from the
75
+ ``AbstractBoxerDrs`` object hierarchy to a different object. The
76
+ default is ``NltkDrtBoxerDrsInterpreter``, which converts to the NLTK
77
+ DRT hierarchy.
78
+ :param elimeq: When set to true, Boxer removes all equalities from the
79
+ DRSs and discourse referents standing in the equality relation are
80
+ unified, but only if this can be done in a meaning-preserving manner.
81
+ :param resolve: When set to true, Boxer will resolve all anaphoric DRSs and perform merge-reduction.
82
+ Resolution follows Van der Sandt's theory of binding and accommodation.
83
+ """
84
+ if boxer_drs_interpreter is None:
85
+ boxer_drs_interpreter = NltkDrtBoxerDrsInterpreter()
86
+ self._boxer_drs_interpreter = boxer_drs_interpreter
87
+
88
+ self._resolve = resolve
89
+ self._elimeq = elimeq
90
+
91
+ self.set_bin_dir(bin_dir, verbose)
92
+
93
+ def set_bin_dir(self, bin_dir, verbose=False):
94
+ self._candc_bin = self._find_binary("candc", bin_dir, verbose)
95
+ self._candc_models_path = os.path.normpath(
96
+ os.path.join(self._candc_bin[:-5], "../models")
97
+ )
98
+ self._boxer_bin = self._find_binary("boxer", bin_dir, verbose)
99
+
100
+ def interpret(self, input, discourse_id=None, question=False, verbose=False):
101
+ """
102
+ Use Boxer to give a first order representation.
103
+
104
+ :param input: str Input sentence to parse
105
+ :param occur_index: bool Should predicates be occurrence indexed?
106
+ :param discourse_id: str An identifier to be inserted to each occurrence-indexed predicate.
107
+ :return: ``drt.DrtExpression``
108
+ """
109
+ discourse_ids = [discourse_id] if discourse_id is not None else None
110
+ (d,) = self.interpret_multi_sents([[input]], discourse_ids, question, verbose)
111
+ if not d:
112
+ raise Exception(f'Unable to interpret: "{input}"')
113
+ return d
114
+
115
+ def interpret_multi(self, input, discourse_id=None, question=False, verbose=False):
116
+ """
117
+ Use Boxer to give a first order representation.
118
+
119
+ :param input: list of str Input sentences to parse as a single discourse
120
+ :param occur_index: bool Should predicates be occurrence indexed?
121
+ :param discourse_id: str An identifier to be inserted to each occurrence-indexed predicate.
122
+ :return: ``drt.DrtExpression``
123
+ """
124
+ discourse_ids = [discourse_id] if discourse_id is not None else None
125
+ (d,) = self.interpret_multi_sents([input], discourse_ids, question, verbose)
126
+ if not d:
127
+ raise Exception(f'Unable to interpret: "{input}"')
128
+ return d
129
+
130
+ def interpret_sents(
131
+ self, inputs, discourse_ids=None, question=False, verbose=False
132
+ ):
133
+ """
134
+ Use Boxer to give a first order representation.
135
+
136
+ :param inputs: list of str Input sentences to parse as individual discourses
137
+ :param occur_index: bool Should predicates be occurrence indexed?
138
+ :param discourse_ids: list of str Identifiers to be inserted to each occurrence-indexed predicate.
139
+ :return: list of ``drt.DrtExpression``
140
+ """
141
+ return self.interpret_multi_sents(
142
+ [[input] for input in inputs], discourse_ids, question, verbose
143
+ )
144
+
145
+ def interpret_multi_sents(
146
+ self, inputs, discourse_ids=None, question=False, verbose=False
147
+ ):
148
+ """
149
+ Use Boxer to give a first order representation.
150
+
151
+ :param inputs: list of list of str Input discourses to parse
152
+ :param occur_index: bool Should predicates be occurrence indexed?
153
+ :param discourse_ids: list of str Identifiers to be inserted to each occurrence-indexed predicate.
154
+ :return: ``drt.DrtExpression``
155
+ """
156
+ if discourse_ids is not None:
157
+ assert len(inputs) == len(discourse_ids)
158
+ assert reduce(operator.and_, (id is not None for id in discourse_ids))
159
+ use_disc_id = True
160
+ else:
161
+ discourse_ids = list(map(str, range(len(inputs))))
162
+ use_disc_id = False
163
+
164
+ candc_out = self._call_candc(inputs, discourse_ids, question, verbose=verbose)
165
+ boxer_out = self._call_boxer(candc_out, verbose=verbose)
166
+
167
+ # if 'ERROR: input file contains no ccg/2 terms.' in boxer_out:
168
+ # raise UnparseableInputException('Could not parse with candc: "%s"' % input_str)
169
+
170
+ drs_dict = self._parse_to_drs_dict(boxer_out, use_disc_id)
171
+ return [drs_dict.get(id, None) for id in discourse_ids]
172
+
173
+ def _call_candc(self, inputs, discourse_ids, question, verbose=False):
174
+ """
175
+ Call the ``candc`` binary with the given input.
176
+
177
+ :param inputs: list of list of str Input discourses to parse
178
+ :param discourse_ids: list of str Identifiers to be inserted to each occurrence-indexed predicate.
179
+ :param filename: str A filename for the output file
180
+ :return: stdout
181
+ """
182
+ args = [
183
+ "--models",
184
+ os.path.join(self._candc_models_path, ["boxer", "questions"][question]),
185
+ "--candc-printer",
186
+ "boxer",
187
+ ]
188
+ return self._call(
189
+ "\n".join(
190
+ sum(
191
+ ([f"<META>'{id}'"] + d for d, id in zip(inputs, discourse_ids)),
192
+ [],
193
+ )
194
+ ),
195
+ self._candc_bin,
196
+ args,
197
+ verbose,
198
+ )
199
+
200
+ def _call_boxer(self, candc_out, verbose=False):
201
+ """
202
+ Call the ``boxer`` binary with the given input.
203
+
204
+ :param candc_out: str output from C&C parser
205
+ :return: stdout
206
+ """
207
+ f = None
208
+ try:
209
+ fd, temp_filename = tempfile.mkstemp(
210
+ prefix="boxer-", suffix=".in", text=True
211
+ )
212
+ f = os.fdopen(fd, "w")
213
+ f.write(candc_out.decode("utf-8"))
214
+ finally:
215
+ if f:
216
+ f.close()
217
+
218
+ args = [
219
+ "--box",
220
+ "false",
221
+ "--semantics",
222
+ "drs",
223
+ #'--flat', 'false', # removed from boxer
224
+ "--resolve",
225
+ ["false", "true"][self._resolve],
226
+ "--elimeq",
227
+ ["false", "true"][self._elimeq],
228
+ "--format",
229
+ "prolog",
230
+ "--instantiate",
231
+ "true",
232
+ "--input",
233
+ temp_filename,
234
+ ]
235
+ stdout = self._call(None, self._boxer_bin, args, verbose)
236
+ os.remove(temp_filename)
237
+ return stdout
238
+
239
+ def _find_binary(self, name, bin_dir, verbose=False):
240
+ return find_binary(
241
+ name,
242
+ path_to_bin=bin_dir,
243
+ env_vars=["CANDC"],
244
+ url="http://svn.ask.it.usyd.edu.au/trac/candc/",
245
+ binary_names=[name, name + ".exe"],
246
+ verbose=verbose,
247
+ )
248
+
249
+ def _call(self, input_str, binary, args=[], verbose=False):
250
+ """
251
+ Call the binary with the given input.
252
+
253
+ :param input_str: A string whose contents are used as stdin.
254
+ :param binary: The location of the binary to call
255
+ :param args: A list of command-line arguments.
256
+ :return: stdout
257
+ """
258
+ if verbose:
259
+ print("Calling:", binary)
260
+ print("Args:", args)
261
+ print("Input:", input_str)
262
+ print("Command:", binary + " " + " ".join(args))
263
+
264
+ # Call via a subprocess
265
+ if input_str is None:
266
+ cmd = [binary] + args
267
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
268
+ else:
269
+ cmd = 'echo "{}" | {} {}'.format(input_str, binary, " ".join(args))
270
+ p = subprocess.Popen(
271
+ cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True
272
+ )
273
+ stdout, stderr = p.communicate()
274
+
275
+ if verbose:
276
+ print("Return code:", p.returncode)
277
+ if stdout:
278
+ print("stdout:\n", stdout, "\n")
279
+ if stderr:
280
+ print("stderr:\n", stderr, "\n")
281
+ if p.returncode != 0:
282
+ raise Exception(
283
+ "ERROR CALLING: {} {}\nReturncode: {}\n{}".format(
284
+ binary, " ".join(args), p.returncode, stderr
285
+ )
286
+ )
287
+
288
+ return stdout
289
+
290
+ def _parse_to_drs_dict(self, boxer_out, use_disc_id):
291
+ lines = boxer_out.decode("utf-8").split("\n")
292
+ drs_dict = {}
293
+ i = 0
294
+ while i < len(lines):
295
+ line = lines[i]
296
+ if line.startswith("id("):
297
+ comma_idx = line.index(",")
298
+ discourse_id = line[3:comma_idx]
299
+ if discourse_id[0] == "'" and discourse_id[-1] == "'":
300
+ discourse_id = discourse_id[1:-1]
301
+ drs_id = line[comma_idx + 1 : line.index(")")]
302
+ i += 1
303
+ line = lines[i]
304
+ assert line.startswith(f"sem({drs_id},")
305
+ if line[-4:] == "').'":
306
+ line = line[:-4] + ")."
307
+ assert line.endswith(")."), f"can't parse line: {line}"
308
+
309
+ search_start = len(f"sem({drs_id},[")
310
+ brace_count = 1
311
+ drs_start = -1
312
+ for j, c in enumerate(line[search_start:]):
313
+ if c == "[":
314
+ brace_count += 1
315
+ if c == "]":
316
+ brace_count -= 1
317
+ if brace_count == 0:
318
+ drs_start = search_start + j + 1
319
+ if line[drs_start : drs_start + 3] == "','":
320
+ drs_start = drs_start + 3
321
+ else:
322
+ drs_start = drs_start + 1
323
+ break
324
+ assert drs_start > -1
325
+
326
+ drs_input = line[drs_start:-2].strip()
327
+ parsed = self._parse_drs(drs_input, discourse_id, use_disc_id)
328
+ drs_dict[discourse_id] = self._boxer_drs_interpreter.interpret(parsed)
329
+ i += 1
330
+ return drs_dict
331
+
332
+ def _parse_drs(self, drs_string, discourse_id, use_disc_id):
333
+ return BoxerOutputDrsParser([None, discourse_id][use_disc_id]).parse(drs_string)
334
+
335
+
336
+ class BoxerOutputDrsParser(DrtParser):
337
+ def __init__(self, discourse_id=None):
338
+ """
339
+ This class is used to parse the Prolog DRS output from Boxer into a
340
+ hierarchy of python objects.
341
+ """
342
+ DrtParser.__init__(self)
343
+ self.discourse_id = discourse_id
344
+ self.sentence_id_offset = None
345
+ self.quote_chars = [("'", "'", "\\", False)]
346
+
347
+ def parse(self, data, signature=None):
348
+ return DrtParser.parse(self, data, signature)
349
+
350
+ def get_all_symbols(self):
351
+ return ["(", ")", ",", "[", "]", ":"]
352
+
353
+ def handle(self, tok, context):
354
+ return self.handle_drs(tok)
355
+
356
+ def attempt_adjuncts(self, expression, context):
357
+ return expression
358
+
359
+ def parse_condition(self, indices):
360
+ """
361
+ Parse a DRS condition
362
+
363
+ :return: list of ``DrtExpression``
364
+ """
365
+ tok = self.token()
366
+ accum = self.handle_condition(tok, indices)
367
+ if accum is None:
368
+ raise UnexpectedTokenException(tok)
369
+ return accum
370
+
371
+ def handle_drs(self, tok):
372
+ if tok == "drs":
373
+ return self.parse_drs()
374
+ elif tok in ["merge", "smerge"]:
375
+ return self._handle_binary_expression(self._make_merge_expression)(None, [])
376
+ elif tok in ["alfa"]:
377
+ return self._handle_alfa(self._make_merge_expression)(None, [])
378
+
379
+ def handle_condition(self, tok, indices):
380
+ """
381
+ Handle a DRS condition
382
+
383
+ :param indices: list of int
384
+ :return: list of ``DrtExpression``
385
+ """
386
+ if tok == "not":
387
+ return [self._handle_not()]
388
+
389
+ if tok == "or":
390
+ conds = [self._handle_binary_expression(self._make_or_expression)]
391
+ elif tok == "imp":
392
+ conds = [self._handle_binary_expression(self._make_imp_expression)]
393
+ elif tok == "eq":
394
+ conds = [self._handle_eq()]
395
+ elif tok == "prop":
396
+ conds = [self._handle_prop()]
397
+
398
+ elif tok == "pred":
399
+ conds = [self._handle_pred()]
400
+ elif tok == "named":
401
+ conds = [self._handle_named()]
402
+ elif tok == "rel":
403
+ conds = [self._handle_rel()]
404
+ elif tok == "timex":
405
+ conds = self._handle_timex()
406
+ elif tok == "card":
407
+ conds = [self._handle_card()]
408
+
409
+ elif tok == "whq":
410
+ conds = [self._handle_whq()]
411
+ elif tok == "duplex":
412
+ conds = [self._handle_duplex()]
413
+
414
+ else:
415
+ conds = []
416
+
417
+ return sum(
418
+ (
419
+ [cond(sent_index, word_indices) for cond in conds]
420
+ for sent_index, word_indices in self._sent_and_word_indices(indices)
421
+ ),
422
+ [],
423
+ )
424
+
425
+ def _handle_not(self):
426
+ self.assertToken(self.token(), "(")
427
+ drs = self.process_next_expression(None)
428
+ self.assertToken(self.token(), ")")
429
+ return BoxerNot(drs)
430
+
431
+ def _handle_pred(self):
432
+ # pred(_G3943, dog, n, 0)
433
+ self.assertToken(self.token(), "(")
434
+ variable = self.parse_variable()
435
+ self.assertToken(self.token(), ",")
436
+ name = self.token()
437
+ self.assertToken(self.token(), ",")
438
+ pos = self.token()
439
+ self.assertToken(self.token(), ",")
440
+ sense = int(self.token())
441
+ self.assertToken(self.token(), ")")
442
+
443
+ def _handle_pred_f(sent_index, word_indices):
444
+ return BoxerPred(
445
+ self.discourse_id, sent_index, word_indices, variable, name, pos, sense
446
+ )
447
+
448
+ return _handle_pred_f
449
+
450
+ def _handle_duplex(self):
451
+ # duplex(whq, drs(...), var, drs(...))
452
+ self.assertToken(self.token(), "(")
453
+ # self.assertToken(self.token(), '[')
454
+ ans_types = []
455
+ # while self.token(0) != ']':
456
+ # cat = self.token()
457
+ # self.assertToken(self.token(), ':')
458
+ # if cat == 'des':
459
+ # ans_types.append(self.token())
460
+ # elif cat == 'num':
461
+ # ans_types.append('number')
462
+ # typ = self.token()
463
+ # if typ == 'cou':
464
+ # ans_types.append('count')
465
+ # else:
466
+ # ans_types.append(typ)
467
+ # else:
468
+ # ans_types.append(self.token())
469
+ # self.token() #swallow the ']'
470
+
471
+ self.assertToken(self.token(), "whq")
472
+ self.assertToken(self.token(), ",")
473
+ d1 = self.process_next_expression(None)
474
+ self.assertToken(self.token(), ",")
475
+ ref = self.parse_variable()
476
+ self.assertToken(self.token(), ",")
477
+ d2 = self.process_next_expression(None)
478
+ self.assertToken(self.token(), ")")
479
+ return lambda sent_index, word_indices: BoxerWhq(
480
+ self.discourse_id, sent_index, word_indices, ans_types, d1, ref, d2
481
+ )
482
+
483
+ def _handle_named(self):
484
+ # named(x0, john, per, 0)
485
+ self.assertToken(self.token(), "(")
486
+ variable = self.parse_variable()
487
+ self.assertToken(self.token(), ",")
488
+ name = self.token()
489
+ self.assertToken(self.token(), ",")
490
+ type = self.token()
491
+ self.assertToken(self.token(), ",")
492
+ sense = self.token() # as per boxer rev 2554
493
+ self.assertToken(self.token(), ")")
494
+ return lambda sent_index, word_indices: BoxerNamed(
495
+ self.discourse_id, sent_index, word_indices, variable, name, type, sense
496
+ )
497
+
498
+ def _handle_rel(self):
499
+ # rel(_G3993, _G3943, agent, 0)
500
+ self.assertToken(self.token(), "(")
501
+ var1 = self.parse_variable()
502
+ self.assertToken(self.token(), ",")
503
+ var2 = self.parse_variable()
504
+ self.assertToken(self.token(), ",")
505
+ rel = self.token()
506
+ self.assertToken(self.token(), ",")
507
+ sense = int(self.token())
508
+ self.assertToken(self.token(), ")")
509
+ return lambda sent_index, word_indices: BoxerRel(
510
+ self.discourse_id, sent_index, word_indices, var1, var2, rel, sense
511
+ )
512
+
513
+ def _handle_timex(self):
514
+ # timex(_G18322, date([]: (+), []:'XXXX', [1004]:'04', []:'XX'))
515
+ self.assertToken(self.token(), "(")
516
+ arg = self.parse_variable()
517
+ self.assertToken(self.token(), ",")
518
+ new_conds = self._handle_time_expression(arg)
519
+ self.assertToken(self.token(), ")")
520
+ return new_conds
521
+
522
+ def _handle_time_expression(self, arg):
523
+ # date([]: (+), []:'XXXX', [1004]:'04', []:'XX')
524
+ tok = self.token()
525
+ self.assertToken(self.token(), "(")
526
+ if tok == "date":
527
+ conds = self._handle_date(arg)
528
+ elif tok == "time":
529
+ conds = self._handle_time(arg)
530
+ else:
531
+ return None
532
+ self.assertToken(self.token(), ")")
533
+ return [
534
+ lambda sent_index, word_indices: BoxerPred(
535
+ self.discourse_id, sent_index, word_indices, arg, tok, "n", 0
536
+ )
537
+ ] + [lambda sent_index, word_indices: cond for cond in conds]
538
+
539
+ def _handle_date(self, arg):
540
+ # []: (+), []:'XXXX', [1004]:'04', []:'XX'
541
+ conds = []
542
+ ((sent_index, word_indices),) = self._sent_and_word_indices(
543
+ self._parse_index_list()
544
+ )
545
+ self.assertToken(self.token(), "(")
546
+ pol = self.token()
547
+ self.assertToken(self.token(), ")")
548
+ conds.append(
549
+ BoxerPred(
550
+ self.discourse_id,
551
+ sent_index,
552
+ word_indices,
553
+ arg,
554
+ f"date_pol_{pol}",
555
+ "a",
556
+ 0,
557
+ )
558
+ )
559
+ self.assertToken(self.token(), ",")
560
+
561
+ ((sent_index, word_indices),) = self._sent_and_word_indices(
562
+ self._parse_index_list()
563
+ )
564
+ year = self.token()
565
+ if year != "XXXX":
566
+ year = year.replace(":", "_")
567
+ conds.append(
568
+ BoxerPred(
569
+ self.discourse_id,
570
+ sent_index,
571
+ word_indices,
572
+ arg,
573
+ f"date_year_{year}",
574
+ "a",
575
+ 0,
576
+ )
577
+ )
578
+ self.assertToken(self.token(), ",")
579
+
580
+ ((sent_index, word_indices),) = self._sent_and_word_indices(
581
+ self._parse_index_list()
582
+ )
583
+ month = self.token()
584
+ if month != "XX":
585
+ conds.append(
586
+ BoxerPred(
587
+ self.discourse_id,
588
+ sent_index,
589
+ word_indices,
590
+ arg,
591
+ f"date_month_{month}",
592
+ "a",
593
+ 0,
594
+ )
595
+ )
596
+ self.assertToken(self.token(), ",")
597
+
598
+ ((sent_index, word_indices),) = self._sent_and_word_indices(
599
+ self._parse_index_list()
600
+ )
601
+ day = self.token()
602
+ if day != "XX":
603
+ conds.append(
604
+ BoxerPred(
605
+ self.discourse_id,
606
+ sent_index,
607
+ word_indices,
608
+ arg,
609
+ f"date_day_{day}",
610
+ "a",
611
+ 0,
612
+ )
613
+ )
614
+
615
+ return conds
616
+
617
+ def _handle_time(self, arg):
618
+ # time([1018]:'18', []:'XX', []:'XX')
619
+ conds = []
620
+ self._parse_index_list()
621
+ hour = self.token()
622
+ if hour != "XX":
623
+ conds.append(self._make_atom("r_hour_2", arg, hour))
624
+ self.assertToken(self.token(), ",")
625
+
626
+ self._parse_index_list()
627
+ min = self.token()
628
+ if min != "XX":
629
+ conds.append(self._make_atom("r_min_2", arg, min))
630
+ self.assertToken(self.token(), ",")
631
+
632
+ self._parse_index_list()
633
+ sec = self.token()
634
+ if sec != "XX":
635
+ conds.append(self._make_atom("r_sec_2", arg, sec))
636
+
637
+ return conds
638
+
639
+ def _handle_card(self):
640
+ # card(_G18535, 28, ge)
641
+ self.assertToken(self.token(), "(")
642
+ variable = self.parse_variable()
643
+ self.assertToken(self.token(), ",")
644
+ value = self.token()
645
+ self.assertToken(self.token(), ",")
646
+ type = self.token()
647
+ self.assertToken(self.token(), ")")
648
+ return lambda sent_index, word_indices: BoxerCard(
649
+ self.discourse_id, sent_index, word_indices, variable, value, type
650
+ )
651
+
652
+ def _handle_prop(self):
653
+ # prop(_G15949, drs(...))
654
+ self.assertToken(self.token(), "(")
655
+ variable = self.parse_variable()
656
+ self.assertToken(self.token(), ",")
657
+ drs = self.process_next_expression(None)
658
+ self.assertToken(self.token(), ")")
659
+ return lambda sent_index, word_indices: BoxerProp(
660
+ self.discourse_id, sent_index, word_indices, variable, drs
661
+ )
662
+
663
+ def _parse_index_list(self):
664
+ # [1001,1002]:
665
+ indices = []
666
+ self.assertToken(self.token(), "[")
667
+ while self.token(0) != "]":
668
+ indices.append(self.parse_index())
669
+ if self.token(0) == ",":
670
+ self.token() # swallow ','
671
+ self.token() # swallow ']'
672
+ self.assertToken(self.token(), ":")
673
+ return indices
674
+
675
+ def parse_drs(self):
676
+ # drs([[1001]:_G3943],
677
+ # [[1002]:pred(_G3943, dog, n, 0)]
678
+ # )
679
+ self.assertToken(self.token(), "(")
680
+ self.assertToken(self.token(), "[")
681
+ refs = set()
682
+ while self.token(0) != "]":
683
+ indices = self._parse_index_list()
684
+ refs.add(self.parse_variable())
685
+ if self.token(0) == ",":
686
+ self.token() # swallow ','
687
+ self.token() # swallow ']'
688
+ self.assertToken(self.token(), ",")
689
+ self.assertToken(self.token(), "[")
690
+ conds = []
691
+ while self.token(0) != "]":
692
+ indices = self._parse_index_list()
693
+ conds.extend(self.parse_condition(indices))
694
+ if self.token(0) == ",":
695
+ self.token() # swallow ','
696
+ self.token() # swallow ']'
697
+ self.assertToken(self.token(), ")")
698
+ return BoxerDrs(list(refs), conds)
699
+
700
+ def _handle_binary_expression(self, make_callback):
701
+ self.assertToken(self.token(), "(")
702
+ drs1 = self.process_next_expression(None)
703
+ self.assertToken(self.token(), ",")
704
+ drs2 = self.process_next_expression(None)
705
+ self.assertToken(self.token(), ")")
706
+ return lambda sent_index, word_indices: make_callback(
707
+ sent_index, word_indices, drs1, drs2
708
+ )
709
+
710
+ def _handle_alfa(self, make_callback):
711
+ self.assertToken(self.token(), "(")
712
+ type = self.token()
713
+ self.assertToken(self.token(), ",")
714
+ drs1 = self.process_next_expression(None)
715
+ self.assertToken(self.token(), ",")
716
+ drs2 = self.process_next_expression(None)
717
+ self.assertToken(self.token(), ")")
718
+ return lambda sent_index, word_indices: make_callback(
719
+ sent_index, word_indices, drs1, drs2
720
+ )
721
+
722
+ def _handle_eq(self):
723
+ self.assertToken(self.token(), "(")
724
+ var1 = self.parse_variable()
725
+ self.assertToken(self.token(), ",")
726
+ var2 = self.parse_variable()
727
+ self.assertToken(self.token(), ")")
728
+ return lambda sent_index, word_indices: BoxerEq(
729
+ self.discourse_id, sent_index, word_indices, var1, var2
730
+ )
731
+
732
+ def _handle_whq(self):
733
+ self.assertToken(self.token(), "(")
734
+ self.assertToken(self.token(), "[")
735
+ ans_types = []
736
+ while self.token(0) != "]":
737
+ cat = self.token()
738
+ self.assertToken(self.token(), ":")
739
+ if cat == "des":
740
+ ans_types.append(self.token())
741
+ elif cat == "num":
742
+ ans_types.append("number")
743
+ typ = self.token()
744
+ if typ == "cou":
745
+ ans_types.append("count")
746
+ else:
747
+ ans_types.append(typ)
748
+ else:
749
+ ans_types.append(self.token())
750
+ self.token() # swallow the ']'
751
+
752
+ self.assertToken(self.token(), ",")
753
+ d1 = self.process_next_expression(None)
754
+ self.assertToken(self.token(), ",")
755
+ ref = self.parse_variable()
756
+ self.assertToken(self.token(), ",")
757
+ d2 = self.process_next_expression(None)
758
+ self.assertToken(self.token(), ")")
759
+ return lambda sent_index, word_indices: BoxerWhq(
760
+ self.discourse_id, sent_index, word_indices, ans_types, d1, ref, d2
761
+ )
762
+
763
+ def _make_merge_expression(self, sent_index, word_indices, drs1, drs2):
764
+ return BoxerDrs(drs1.refs + drs2.refs, drs1.conds + drs2.conds)
765
+
766
+ def _make_or_expression(self, sent_index, word_indices, drs1, drs2):
767
+ return BoxerOr(self.discourse_id, sent_index, word_indices, drs1, drs2)
768
+
769
+ def _make_imp_expression(self, sent_index, word_indices, drs1, drs2):
770
+ return BoxerDrs(drs1.refs, drs1.conds, drs2)
771
+
772
+ def parse_variable(self):
773
+ var = self.token()
774
+ assert re.match(r"^[exps]\d+$", var), var
775
+ return var
776
+
777
+ def parse_index(self):
778
+ return int(self.token())
779
+
780
+ def _sent_and_word_indices(self, indices):
781
+ """
782
+ :return: list of (sent_index, word_indices) tuples
783
+ """
784
+ sent_indices = {(i / 1000) - 1 for i in indices if i >= 0}
785
+ if sent_indices:
786
+ pairs = []
787
+ for sent_index in sent_indices:
788
+ word_indices = [
789
+ (i % 1000) - 1 for i in indices if sent_index == (i / 1000) - 1
790
+ ]
791
+ pairs.append((sent_index, word_indices))
792
+ return pairs
793
+ else:
794
+ word_indices = [(i % 1000) - 1 for i in indices]
795
+ return [(None, word_indices)]
796
+
797
+
798
+ class BoxerDrsParser(DrtParser):
799
+ """
800
+ Reparse the str form of subclasses of ``AbstractBoxerDrs``
801
+ """
802
+
803
+ def __init__(self, discourse_id=None):
804
+ DrtParser.__init__(self)
805
+ self.discourse_id = discourse_id
806
+
807
+ def get_all_symbols(self):
808
+ return [
809
+ DrtTokens.OPEN,
810
+ DrtTokens.CLOSE,
811
+ DrtTokens.COMMA,
812
+ DrtTokens.OPEN_BRACKET,
813
+ DrtTokens.CLOSE_BRACKET,
814
+ ]
815
+
816
+ def attempt_adjuncts(self, expression, context):
817
+ return expression
818
+
819
+ def handle(self, tok, context):
820
+ try:
821
+ # if tok == 'drs':
822
+ # self.assertNextToken(DrtTokens.OPEN)
823
+ # label = int(self.token())
824
+ # self.assertNextToken(DrtTokens.COMMA)
825
+ # refs = list(map(int, self.handle_refs()))
826
+ # self.assertNextToken(DrtTokens.COMMA)
827
+ # conds = self.handle_conds(None)
828
+ # self.assertNextToken(DrtTokens.CLOSE)
829
+ # return BoxerDrs(label, refs, conds)
830
+ if tok == "pred":
831
+ self.assertNextToken(DrtTokens.OPEN)
832
+ disc_id = (
833
+ self.discourse_id if self.discourse_id is not None else self.token()
834
+ )
835
+ self.assertNextToken(DrtTokens.COMMA)
836
+ sent_id = self.nullableIntToken()
837
+ self.assertNextToken(DrtTokens.COMMA)
838
+ word_ids = list(map(int, self.handle_refs()))
839
+ self.assertNextToken(DrtTokens.COMMA)
840
+ variable = int(self.token())
841
+ self.assertNextToken(DrtTokens.COMMA)
842
+ name = self.token()
843
+ self.assertNextToken(DrtTokens.COMMA)
844
+ pos = self.token()
845
+ self.assertNextToken(DrtTokens.COMMA)
846
+ sense = int(self.token())
847
+ self.assertNextToken(DrtTokens.CLOSE)
848
+ return BoxerPred(disc_id, sent_id, word_ids, variable, name, pos, sense)
849
+ elif tok == "named":
850
+ self.assertNextToken(DrtTokens.OPEN)
851
+ disc_id = (
852
+ self.discourse_id if self.discourse_id is not None else self.token()
853
+ )
854
+ self.assertNextToken(DrtTokens.COMMA)
855
+ sent_id = int(self.token())
856
+ self.assertNextToken(DrtTokens.COMMA)
857
+ word_ids = map(int, self.handle_refs())
858
+ self.assertNextToken(DrtTokens.COMMA)
859
+ variable = int(self.token())
860
+ self.assertNextToken(DrtTokens.COMMA)
861
+ name = self.token()
862
+ self.assertNextToken(DrtTokens.COMMA)
863
+ type = self.token()
864
+ self.assertNextToken(DrtTokens.COMMA)
865
+ sense = int(self.token())
866
+ self.assertNextToken(DrtTokens.CLOSE)
867
+ return BoxerNamed(
868
+ disc_id, sent_id, word_ids, variable, name, type, sense
869
+ )
870
+ elif tok == "rel":
871
+ self.assertNextToken(DrtTokens.OPEN)
872
+ disc_id = (
873
+ self.discourse_id if self.discourse_id is not None else self.token()
874
+ )
875
+ self.assertNextToken(DrtTokens.COMMA)
876
+ sent_id = self.nullableIntToken()
877
+ self.assertNextToken(DrtTokens.COMMA)
878
+ word_ids = list(map(int, self.handle_refs()))
879
+ self.assertNextToken(DrtTokens.COMMA)
880
+ var1 = int(self.token())
881
+ self.assertNextToken(DrtTokens.COMMA)
882
+ var2 = int(self.token())
883
+ self.assertNextToken(DrtTokens.COMMA)
884
+ rel = self.token()
885
+ self.assertNextToken(DrtTokens.COMMA)
886
+ sense = int(self.token())
887
+ self.assertNextToken(DrtTokens.CLOSE)
888
+ return BoxerRel(disc_id, sent_id, word_ids, var1, var2, rel, sense)
889
+ elif tok == "prop":
890
+ self.assertNextToken(DrtTokens.OPEN)
891
+ disc_id = (
892
+ self.discourse_id if self.discourse_id is not None else self.token()
893
+ )
894
+ self.assertNextToken(DrtTokens.COMMA)
895
+ sent_id = int(self.token())
896
+ self.assertNextToken(DrtTokens.COMMA)
897
+ word_ids = list(map(int, self.handle_refs()))
898
+ self.assertNextToken(DrtTokens.COMMA)
899
+ variable = int(self.token())
900
+ self.assertNextToken(DrtTokens.COMMA)
901
+ drs = self.process_next_expression(None)
902
+ self.assertNextToken(DrtTokens.CLOSE)
903
+ return BoxerProp(disc_id, sent_id, word_ids, variable, drs)
904
+ elif tok == "not":
905
+ self.assertNextToken(DrtTokens.OPEN)
906
+ drs = self.process_next_expression(None)
907
+ self.assertNextToken(DrtTokens.CLOSE)
908
+ return BoxerNot(drs)
909
+ elif tok == "imp":
910
+ self.assertNextToken(DrtTokens.OPEN)
911
+ drs1 = self.process_next_expression(None)
912
+ self.assertNextToken(DrtTokens.COMMA)
913
+ drs2 = self.process_next_expression(None)
914
+ self.assertNextToken(DrtTokens.CLOSE)
915
+ return BoxerDrs(drs1.refs, drs1.conds, drs2)
916
+ elif tok == "or":
917
+ self.assertNextToken(DrtTokens.OPEN)
918
+ disc_id = (
919
+ self.discourse_id if self.discourse_id is not None else self.token()
920
+ )
921
+ self.assertNextToken(DrtTokens.COMMA)
922
+ sent_id = self.nullableIntToken()
923
+ self.assertNextToken(DrtTokens.COMMA)
924
+ word_ids = map(int, self.handle_refs())
925
+ self.assertNextToken(DrtTokens.COMMA)
926
+ drs1 = self.process_next_expression(None)
927
+ self.assertNextToken(DrtTokens.COMMA)
928
+ drs2 = self.process_next_expression(None)
929
+ self.assertNextToken(DrtTokens.CLOSE)
930
+ return BoxerOr(disc_id, sent_id, word_ids, drs1, drs2)
931
+ elif tok == "eq":
932
+ self.assertNextToken(DrtTokens.OPEN)
933
+ disc_id = (
934
+ self.discourse_id if self.discourse_id is not None else self.token()
935
+ )
936
+ self.assertNextToken(DrtTokens.COMMA)
937
+ sent_id = self.nullableIntToken()
938
+ self.assertNextToken(DrtTokens.COMMA)
939
+ word_ids = list(map(int, self.handle_refs()))
940
+ self.assertNextToken(DrtTokens.COMMA)
941
+ var1 = int(self.token())
942
+ self.assertNextToken(DrtTokens.COMMA)
943
+ var2 = int(self.token())
944
+ self.assertNextToken(DrtTokens.CLOSE)
945
+ return BoxerEq(disc_id, sent_id, word_ids, var1, var2)
946
+ elif tok == "card":
947
+ self.assertNextToken(DrtTokens.OPEN)
948
+ disc_id = (
949
+ self.discourse_id if self.discourse_id is not None else self.token()
950
+ )
951
+ self.assertNextToken(DrtTokens.COMMA)
952
+ sent_id = self.nullableIntToken()
953
+ self.assertNextToken(DrtTokens.COMMA)
954
+ word_ids = map(int, self.handle_refs())
955
+ self.assertNextToken(DrtTokens.COMMA)
956
+ var = int(self.token())
957
+ self.assertNextToken(DrtTokens.COMMA)
958
+ value = self.token()
959
+ self.assertNextToken(DrtTokens.COMMA)
960
+ type = self.token()
961
+ self.assertNextToken(DrtTokens.CLOSE)
962
+ return BoxerCard(disc_id, sent_id, word_ids, var, value, type)
963
+ elif tok == "whq":
964
+ self.assertNextToken(DrtTokens.OPEN)
965
+ disc_id = (
966
+ self.discourse_id if self.discourse_id is not None else self.token()
967
+ )
968
+ self.assertNextToken(DrtTokens.COMMA)
969
+ sent_id = self.nullableIntToken()
970
+ self.assertNextToken(DrtTokens.COMMA)
971
+ word_ids = list(map(int, self.handle_refs()))
972
+ self.assertNextToken(DrtTokens.COMMA)
973
+ ans_types = self.handle_refs()
974
+ self.assertNextToken(DrtTokens.COMMA)
975
+ drs1 = self.process_next_expression(None)
976
+ self.assertNextToken(DrtTokens.COMMA)
977
+ var = int(self.token())
978
+ self.assertNextToken(DrtTokens.COMMA)
979
+ drs2 = self.process_next_expression(None)
980
+ self.assertNextToken(DrtTokens.CLOSE)
981
+ return BoxerWhq(disc_id, sent_id, word_ids, ans_types, drs1, var, drs2)
982
+ except Exception as e:
983
+ raise LogicalExpressionException(self._currentIndex, str(e)) from e
984
+ assert False, repr(tok)
985
+
986
+ def nullableIntToken(self):
987
+ t = self.token()
988
+ return int(t) if t != "None" else None
989
+
990
+ def get_next_token_variable(self, description):
991
+ try:
992
+ return self.token()
993
+ except ExpectedMoreTokensException as e:
994
+ raise ExpectedMoreTokensException(e.index, "Variable expected.") from e
995
+
996
+
997
+ class AbstractBoxerDrs:
998
+ def variables(self):
999
+ """
1000
+ :return: (set<variables>, set<events>, set<propositions>)
1001
+ """
1002
+ variables, events, propositions = self._variables()
1003
+ return (variables - (events | propositions), events, propositions - events)
1004
+
1005
+ def variable_types(self):
1006
+ vartypes = {}
1007
+ for t, vars in zip(("z", "e", "p"), self.variables()):
1008
+ for v in vars:
1009
+ vartypes[v] = t
1010
+ return vartypes
1011
+
1012
+ def _variables(self):
1013
+ """
1014
+ :return: (set<variables>, set<events>, set<propositions>)
1015
+ """
1016
+ return (set(), set(), set())
1017
+
1018
+ def atoms(self):
1019
+ return set()
1020
+
1021
+ def clean(self):
1022
+ return self
1023
+
1024
+ def _clean_name(self, name):
1025
+ return name.replace("-", "_").replace("'", "_")
1026
+
1027
+ def renumber_sentences(self, f):
1028
+ return self
1029
+
1030
+ def __hash__(self):
1031
+ return hash(f"{self}")
1032
+
1033
+
1034
+ class BoxerDrs(AbstractBoxerDrs):
1035
+ def __init__(self, refs, conds, consequent=None):
1036
+ AbstractBoxerDrs.__init__(self)
1037
+ self.refs = refs
1038
+ self.conds = conds
1039
+ self.consequent = consequent
1040
+
1041
+ def _variables(self):
1042
+ variables = (set(), set(), set())
1043
+ for cond in self.conds:
1044
+ for s, v in zip(variables, cond._variables()):
1045
+ s.update(v)
1046
+ if self.consequent is not None:
1047
+ for s, v in zip(variables, self.consequent._variables()):
1048
+ s.update(v)
1049
+ return variables
1050
+
1051
+ def atoms(self):
1052
+ atoms = reduce(operator.or_, (cond.atoms() for cond in self.conds), set())
1053
+ if self.consequent is not None:
1054
+ atoms.update(self.consequent.atoms())
1055
+ return atoms
1056
+
1057
+ def clean(self):
1058
+ consequent = self.consequent.clean() if self.consequent else None
1059
+ return BoxerDrs(self.refs, [c.clean() for c in self.conds], consequent)
1060
+
1061
+ def renumber_sentences(self, f):
1062
+ consequent = self.consequent.renumber_sentences(f) if self.consequent else None
1063
+ return BoxerDrs(
1064
+ self.refs, [c.renumber_sentences(f) for c in self.conds], consequent
1065
+ )
1066
+
1067
+ def __repr__(self):
1068
+ s = "drs([{}], [{}])".format(
1069
+ ", ".join("%s" % r for r in self.refs),
1070
+ ", ".join("%s" % c for c in self.conds),
1071
+ )
1072
+ if self.consequent is not None:
1073
+ s = f"imp({s}, {self.consequent})"
1074
+ return s
1075
+
1076
+ def __eq__(self, other):
1077
+ return (
1078
+ self.__class__ == other.__class__
1079
+ and self.refs == other.refs
1080
+ and len(self.conds) == len(other.conds)
1081
+ and reduce(
1082
+ operator.and_, (c1 == c2 for c1, c2 in zip(self.conds, other.conds))
1083
+ )
1084
+ and self.consequent == other.consequent
1085
+ )
1086
+
1087
+ def __ne__(self, other):
1088
+ return not self == other
1089
+
1090
+ __hash__ = AbstractBoxerDrs.__hash__
1091
+
1092
+
1093
+ class BoxerNot(AbstractBoxerDrs):
1094
+ def __init__(self, drs):
1095
+ AbstractBoxerDrs.__init__(self)
1096
+ self.drs = drs
1097
+
1098
+ def _variables(self):
1099
+ return self.drs._variables()
1100
+
1101
+ def atoms(self):
1102
+ return self.drs.atoms()
1103
+
1104
+ def clean(self):
1105
+ return BoxerNot(self.drs.clean())
1106
+
1107
+ def renumber_sentences(self, f):
1108
+ return BoxerNot(self.drs.renumber_sentences(f))
1109
+
1110
+ def __repr__(self):
1111
+ return "not(%s)" % (self.drs)
1112
+
1113
+ def __eq__(self, other):
1114
+ return self.__class__ == other.__class__ and self.drs == other.drs
1115
+
1116
+ def __ne__(self, other):
1117
+ return not self == other
1118
+
1119
+ __hash__ = AbstractBoxerDrs.__hash__
1120
+
1121
+
1122
+ class BoxerIndexed(AbstractBoxerDrs):
1123
+ def __init__(self, discourse_id, sent_index, word_indices):
1124
+ AbstractBoxerDrs.__init__(self)
1125
+ self.discourse_id = discourse_id
1126
+ self.sent_index = sent_index
1127
+ self.word_indices = word_indices
1128
+
1129
+ def atoms(self):
1130
+ return {self}
1131
+
1132
+ def __eq__(self, other):
1133
+ return (
1134
+ self.__class__ == other.__class__
1135
+ and self.discourse_id == other.discourse_id
1136
+ and self.sent_index == other.sent_index
1137
+ and self.word_indices == other.word_indices
1138
+ and reduce(operator.and_, (s == o for s, o in zip(self, other)))
1139
+ )
1140
+
1141
+ def __ne__(self, other):
1142
+ return not self == other
1143
+
1144
+ __hash__ = AbstractBoxerDrs.__hash__
1145
+
1146
+ def __repr__(self):
1147
+ s = "{}({}, {}, [{}]".format(
1148
+ self._pred(),
1149
+ self.discourse_id,
1150
+ self.sent_index,
1151
+ ", ".join("%s" % wi for wi in self.word_indices),
1152
+ )
1153
+ for v in self:
1154
+ s += ", %s" % v
1155
+ return s + ")"
1156
+
1157
+
1158
+ class BoxerPred(BoxerIndexed):
1159
+ def __init__(self, discourse_id, sent_index, word_indices, var, name, pos, sense):
1160
+ BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices)
1161
+ self.var = var
1162
+ self.name = name
1163
+ self.pos = pos
1164
+ self.sense = sense
1165
+
1166
+ def _variables(self):
1167
+ return ({self.var}, set(), set())
1168
+
1169
+ def change_var(self, var):
1170
+ return BoxerPred(
1171
+ self.discourse_id,
1172
+ self.sent_index,
1173
+ self.word_indices,
1174
+ var,
1175
+ self.name,
1176
+ self.pos,
1177
+ self.sense,
1178
+ )
1179
+
1180
+ def clean(self):
1181
+ return BoxerPred(
1182
+ self.discourse_id,
1183
+ self.sent_index,
1184
+ self.word_indices,
1185
+ self.var,
1186
+ self._clean_name(self.name),
1187
+ self.pos,
1188
+ self.sense,
1189
+ )
1190
+
1191
+ def renumber_sentences(self, f):
1192
+ new_sent_index = f(self.sent_index)
1193
+ return BoxerPred(
1194
+ self.discourse_id,
1195
+ new_sent_index,
1196
+ self.word_indices,
1197
+ self.var,
1198
+ self.name,
1199
+ self.pos,
1200
+ self.sense,
1201
+ )
1202
+
1203
+ def __iter__(self):
1204
+ return iter((self.var, self.name, self.pos, self.sense))
1205
+
1206
+ def _pred(self):
1207
+ return "pred"
1208
+
1209
+
1210
+ class BoxerNamed(BoxerIndexed):
1211
+ def __init__(self, discourse_id, sent_index, word_indices, var, name, type, sense):
1212
+ BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices)
1213
+ self.var = var
1214
+ self.name = name
1215
+ self.type = type
1216
+ self.sense = sense
1217
+
1218
+ def _variables(self):
1219
+ return ({self.var}, set(), set())
1220
+
1221
+ def change_var(self, var):
1222
+ return BoxerNamed(
1223
+ self.discourse_id,
1224
+ self.sent_index,
1225
+ self.word_indices,
1226
+ var,
1227
+ self.name,
1228
+ self.type,
1229
+ self.sense,
1230
+ )
1231
+
1232
+ def clean(self):
1233
+ return BoxerNamed(
1234
+ self.discourse_id,
1235
+ self.sent_index,
1236
+ self.word_indices,
1237
+ self.var,
1238
+ self._clean_name(self.name),
1239
+ self.type,
1240
+ self.sense,
1241
+ )
1242
+
1243
+ def renumber_sentences(self, f):
1244
+ return BoxerNamed(
1245
+ self.discourse_id,
1246
+ f(self.sent_index),
1247
+ self.word_indices,
1248
+ self.var,
1249
+ self.name,
1250
+ self.type,
1251
+ self.sense,
1252
+ )
1253
+
1254
+ def __iter__(self):
1255
+ return iter((self.var, self.name, self.type, self.sense))
1256
+
1257
+ def _pred(self):
1258
+ return "named"
1259
+
1260
+
1261
+ class BoxerRel(BoxerIndexed):
1262
+ def __init__(self, discourse_id, sent_index, word_indices, var1, var2, rel, sense):
1263
+ BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices)
1264
+ self.var1 = var1
1265
+ self.var2 = var2
1266
+ self.rel = rel
1267
+ self.sense = sense
1268
+
1269
+ def _variables(self):
1270
+ return ({self.var1, self.var2}, set(), set())
1271
+
1272
+ def clean(self):
1273
+ return BoxerRel(
1274
+ self.discourse_id,
1275
+ self.sent_index,
1276
+ self.word_indices,
1277
+ self.var1,
1278
+ self.var2,
1279
+ self._clean_name(self.rel),
1280
+ self.sense,
1281
+ )
1282
+
1283
+ def renumber_sentences(self, f):
1284
+ return BoxerRel(
1285
+ self.discourse_id,
1286
+ f(self.sent_index),
1287
+ self.word_indices,
1288
+ self.var1,
1289
+ self.var2,
1290
+ self.rel,
1291
+ self.sense,
1292
+ )
1293
+
1294
+ def __iter__(self):
1295
+ return iter((self.var1, self.var2, self.rel, self.sense))
1296
+
1297
+ def _pred(self):
1298
+ return "rel"
1299
+
1300
+
1301
+ class BoxerProp(BoxerIndexed):
1302
+ def __init__(self, discourse_id, sent_index, word_indices, var, drs):
1303
+ BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices)
1304
+ self.var = var
1305
+ self.drs = drs
1306
+
1307
+ def _variables(self):
1308
+ return tuple(
1309
+ map(operator.or_, (set(), set(), {self.var}), self.drs._variables())
1310
+ )
1311
+
1312
+ def referenced_labels(self):
1313
+ return {self.drs}
1314
+
1315
+ def atoms(self):
1316
+ return self.drs.atoms()
1317
+
1318
+ def clean(self):
1319
+ return BoxerProp(
1320
+ self.discourse_id,
1321
+ self.sent_index,
1322
+ self.word_indices,
1323
+ self.var,
1324
+ self.drs.clean(),
1325
+ )
1326
+
1327
+ def renumber_sentences(self, f):
1328
+ return BoxerProp(
1329
+ self.discourse_id,
1330
+ f(self.sent_index),
1331
+ self.word_indices,
1332
+ self.var,
1333
+ self.drs.renumber_sentences(f),
1334
+ )
1335
+
1336
+ def __iter__(self):
1337
+ return iter((self.var, self.drs))
1338
+
1339
+ def _pred(self):
1340
+ return "prop"
1341
+
1342
+
1343
+ class BoxerEq(BoxerIndexed):
1344
+ def __init__(self, discourse_id, sent_index, word_indices, var1, var2):
1345
+ BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices)
1346
+ self.var1 = var1
1347
+ self.var2 = var2
1348
+
1349
+ def _variables(self):
1350
+ return ({self.var1, self.var2}, set(), set())
1351
+
1352
+ def atoms(self):
1353
+ return set()
1354
+
1355
+ def renumber_sentences(self, f):
1356
+ return BoxerEq(
1357
+ self.discourse_id,
1358
+ f(self.sent_index),
1359
+ self.word_indices,
1360
+ self.var1,
1361
+ self.var2,
1362
+ )
1363
+
1364
+ def __iter__(self):
1365
+ return iter((self.var1, self.var2))
1366
+
1367
+ def _pred(self):
1368
+ return "eq"
1369
+
1370
+
1371
+ class BoxerCard(BoxerIndexed):
1372
+ def __init__(self, discourse_id, sent_index, word_indices, var, value, type):
1373
+ BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices)
1374
+ self.var = var
1375
+ self.value = value
1376
+ self.type = type
1377
+
1378
+ def _variables(self):
1379
+ return ({self.var}, set(), set())
1380
+
1381
+ def renumber_sentences(self, f):
1382
+ return BoxerCard(
1383
+ self.discourse_id,
1384
+ f(self.sent_index),
1385
+ self.word_indices,
1386
+ self.var,
1387
+ self.value,
1388
+ self.type,
1389
+ )
1390
+
1391
+ def __iter__(self):
1392
+ return iter((self.var, self.value, self.type))
1393
+
1394
+ def _pred(self):
1395
+ return "card"
1396
+
1397
+
1398
+ class BoxerOr(BoxerIndexed):
1399
+ def __init__(self, discourse_id, sent_index, word_indices, drs1, drs2):
1400
+ BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices)
1401
+ self.drs1 = drs1
1402
+ self.drs2 = drs2
1403
+
1404
+ def _variables(self):
1405
+ return tuple(map(operator.or_, self.drs1._variables(), self.drs2._variables()))
1406
+
1407
+ def atoms(self):
1408
+ return self.drs1.atoms() | self.drs2.atoms()
1409
+
1410
+ def clean(self):
1411
+ return BoxerOr(
1412
+ self.discourse_id,
1413
+ self.sent_index,
1414
+ self.word_indices,
1415
+ self.drs1.clean(),
1416
+ self.drs2.clean(),
1417
+ )
1418
+
1419
+ def renumber_sentences(self, f):
1420
+ return BoxerOr(
1421
+ self.discourse_id,
1422
+ f(self.sent_index),
1423
+ self.word_indices,
1424
+ self.drs1,
1425
+ self.drs2,
1426
+ )
1427
+
1428
+ def __iter__(self):
1429
+ return iter((self.drs1, self.drs2))
1430
+
1431
+ def _pred(self):
1432
+ return "or"
1433
+
1434
+
1435
+ class BoxerWhq(BoxerIndexed):
1436
+ def __init__(
1437
+ self, discourse_id, sent_index, word_indices, ans_types, drs1, variable, drs2
1438
+ ):
1439
+ BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices)
1440
+ self.ans_types = ans_types
1441
+ self.drs1 = drs1
1442
+ self.variable = variable
1443
+ self.drs2 = drs2
1444
+
1445
+ def _variables(self):
1446
+ return tuple(
1447
+ map(
1448
+ operator.or_,
1449
+ ({self.variable}, set(), set()),
1450
+ self.drs1._variables(),
1451
+ self.drs2._variables(),
1452
+ )
1453
+ )
1454
+
1455
+ def atoms(self):
1456
+ return self.drs1.atoms() | self.drs2.atoms()
1457
+
1458
+ def clean(self):
1459
+ return BoxerWhq(
1460
+ self.discourse_id,
1461
+ self.sent_index,
1462
+ self.word_indices,
1463
+ self.ans_types,
1464
+ self.drs1.clean(),
1465
+ self.variable,
1466
+ self.drs2.clean(),
1467
+ )
1468
+
1469
+ def renumber_sentences(self, f):
1470
+ return BoxerWhq(
1471
+ self.discourse_id,
1472
+ f(self.sent_index),
1473
+ self.word_indices,
1474
+ self.ans_types,
1475
+ self.drs1,
1476
+ self.variable,
1477
+ self.drs2,
1478
+ )
1479
+
1480
+ def __iter__(self):
1481
+ return iter(
1482
+ ("[" + ",".join(self.ans_types) + "]", self.drs1, self.variable, self.drs2)
1483
+ )
1484
+
1485
+ def _pred(self):
1486
+ return "whq"
1487
+
1488
+
1489
+ class PassthroughBoxerDrsInterpreter:
1490
+ def interpret(self, ex):
1491
+ return ex
1492
+
1493
+
1494
+ class NltkDrtBoxerDrsInterpreter:
1495
+ def __init__(self, occur_index=False):
1496
+ self._occur_index = occur_index
1497
+
1498
+ def interpret(self, ex):
1499
+ """
1500
+ :param ex: ``AbstractBoxerDrs``
1501
+ :return: ``DrtExpression``
1502
+ """
1503
+ if isinstance(ex, BoxerDrs):
1504
+ drs = DRS(
1505
+ [Variable(r) for r in ex.refs], list(map(self.interpret, ex.conds))
1506
+ )
1507
+ if ex.consequent is not None:
1508
+ drs.consequent = self.interpret(ex.consequent)
1509
+ return drs
1510
+ elif isinstance(ex, BoxerNot):
1511
+ return DrtNegatedExpression(self.interpret(ex.drs))
1512
+ elif isinstance(ex, BoxerPred):
1513
+ pred = self._add_occur_indexing(f"{ex.pos}_{ex.name}", ex)
1514
+ return self._make_atom(pred, ex.var)
1515
+ elif isinstance(ex, BoxerNamed):
1516
+ pred = self._add_occur_indexing(f"ne_{ex.type}_{ex.name}", ex)
1517
+ return self._make_atom(pred, ex.var)
1518
+ elif isinstance(ex, BoxerRel):
1519
+ pred = self._add_occur_indexing("%s" % (ex.rel), ex)
1520
+ return self._make_atom(pred, ex.var1, ex.var2)
1521
+ elif isinstance(ex, BoxerProp):
1522
+ return DrtProposition(Variable(ex.var), self.interpret(ex.drs))
1523
+ elif isinstance(ex, BoxerEq):
1524
+ return DrtEqualityExpression(
1525
+ DrtVariableExpression(Variable(ex.var1)),
1526
+ DrtVariableExpression(Variable(ex.var2)),
1527
+ )
1528
+ elif isinstance(ex, BoxerCard):
1529
+ pred = self._add_occur_indexing(f"card_{ex.type}_{ex.value}", ex)
1530
+ return self._make_atom(pred, ex.var)
1531
+ elif isinstance(ex, BoxerOr):
1532
+ return DrtOrExpression(self.interpret(ex.drs1), self.interpret(ex.drs2))
1533
+ elif isinstance(ex, BoxerWhq):
1534
+ drs1 = self.interpret(ex.drs1)
1535
+ drs2 = self.interpret(ex.drs2)
1536
+ return DRS(drs1.refs + drs2.refs, drs1.conds + drs2.conds)
1537
+ assert False, f"{ex.__class__.__name__}: {ex}"
1538
+
1539
+ def _make_atom(self, pred, *args):
1540
+ accum = DrtVariableExpression(Variable(pred))
1541
+ for arg in args:
1542
+ accum = DrtApplicationExpression(
1543
+ accum, DrtVariableExpression(Variable(arg))
1544
+ )
1545
+ return accum
1546
+
1547
+ def _add_occur_indexing(self, base, ex):
1548
+ if self._occur_index and ex.sent_index is not None:
1549
+ if ex.discourse_id:
1550
+ base += "_%s" % ex.discourse_id
1551
+ base += "_s%s" % ex.sent_index
1552
+ base += "_w%s" % sorted(ex.word_indices)[0]
1553
+ return base
1554
+
1555
+
1556
+ class UnparseableInputException(Exception):
1557
+ pass
1558
+
1559
+
1560
+ if __name__ == "__main__":
1561
+ opts = OptionParser("usage: %prog TEXT [options]")
1562
+ opts.add_option(
1563
+ "--verbose",
1564
+ "-v",
1565
+ help="display verbose logs",
1566
+ action="store_true",
1567
+ default=False,
1568
+ dest="verbose",
1569
+ )
1570
+ opts.add_option(
1571
+ "--fol", "-f", help="output FOL", action="store_true", default=False, dest="fol"
1572
+ )
1573
+ opts.add_option(
1574
+ "--question",
1575
+ "-q",
1576
+ help="input is a question",
1577
+ action="store_true",
1578
+ default=False,
1579
+ dest="question",
1580
+ )
1581
+ opts.add_option(
1582
+ "--occur",
1583
+ "-o",
1584
+ help="occurrence index",
1585
+ action="store_true",
1586
+ default=False,
1587
+ dest="occur_index",
1588
+ )
1589
+ (options, args) = opts.parse_args()
1590
+
1591
+ if len(args) != 1:
1592
+ opts.error("incorrect number of arguments")
1593
+
1594
+ interpreter = NltkDrtBoxerDrsInterpreter(occur_index=options.occur_index)
1595
+ drs = Boxer(interpreter).interpret_multi(
1596
+ args[0].split(r"\n"), question=options.question, verbose=options.verbose
1597
+ )
1598
+ if drs is None:
1599
+ print(None)
1600
+ else:
1601
+ drs = drs.simplify().eliminate_equality()
1602
+ if options.fol:
1603
+ print(drs.fol().normalize())
1604
+ else:
1605
+ drs.pretty_print()
llmeval-env/lib/python3.10/site-packages/nltk/sem/chat80.py ADDED
@@ -0,0 +1,857 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Chat-80 KB Reader
2
+ # See https://www.w3.org/TR/swbp-skos-core-guide/
3
+ #
4
+ # Copyright (C) 2001-2023 NLTK Project
5
+ # Author: Ewan Klein <[email protected]>,
6
+ # URL: <https://www.nltk.org>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ r"""
10
+ Overview
11
+ ========
12
+
13
+ Chat-80 was a natural language system which allowed the user to
14
+ interrogate a Prolog knowledge base in the domain of world
15
+ geography. It was developed in the early '80s by Warren and Pereira; see
16
+ ``https://www.aclweb.org/anthology/J82-3002.pdf`` for a description and
17
+ ``http://www.cis.upenn.edu/~pereira/oldies.html`` for the source
18
+ files.
19
+
20
+ This module contains functions to extract data from the Chat-80
21
+ relation files ('the world database'), and convert then into a format
22
+ that can be incorporated in the FOL models of
23
+ ``nltk.sem.evaluate``. The code assumes that the Prolog
24
+ input files are available in the NLTK corpora directory.
25
+
26
+ The Chat-80 World Database consists of the following files::
27
+
28
+ world0.pl
29
+ rivers.pl
30
+ cities.pl
31
+ countries.pl
32
+ contain.pl
33
+ borders.pl
34
+
35
+ This module uses a slightly modified version of ``world0.pl``, in which
36
+ a set of Prolog rules have been omitted. The modified file is named
37
+ ``world1.pl``. Currently, the file ``rivers.pl`` is not read in, since
38
+ it uses a list rather than a string in the second field.
39
+
40
+ Reading Chat-80 Files
41
+ =====================
42
+
43
+ Chat-80 relations are like tables in a relational database. The
44
+ relation acts as the name of the table; the first argument acts as the
45
+ 'primary key'; and subsequent arguments are further fields in the
46
+ table. In general, the name of the table provides a label for a unary
47
+ predicate whose extension is all the primary keys. For example,
48
+ relations in ``cities.pl`` are of the following form::
49
+
50
+ 'city(athens,greece,1368).'
51
+
52
+ Here, ``'athens'`` is the key, and will be mapped to a member of the
53
+ unary predicate *city*.
54
+
55
+ The fields in the table are mapped to binary predicates. The first
56
+ argument of the predicate is the primary key, while the second
57
+ argument is the data in the relevant field. Thus, in the above
58
+ example, the third field is mapped to the binary predicate
59
+ *population_of*, whose extension is a set of pairs such as
60
+ ``'(athens, 1368)'``.
61
+
62
+ An exception to this general framework is required by the relations in
63
+ the files ``borders.pl`` and ``contains.pl``. These contain facts of the
64
+ following form::
65
+
66
+ 'borders(albania,greece).'
67
+
68
+ 'contains0(africa,central_africa).'
69
+
70
+ We do not want to form a unary concept out the element in
71
+ the first field of these records, and we want the label of the binary
72
+ relation just to be ``'border'``/``'contain'`` respectively.
73
+
74
+ In order to drive the extraction process, we use 'relation metadata bundles'
75
+ which are Python dictionaries such as the following::
76
+
77
+ city = {'label': 'city',
78
+ 'closures': [],
79
+ 'schema': ['city', 'country', 'population'],
80
+ 'filename': 'cities.pl'}
81
+
82
+ According to this, the file ``city['filename']`` contains a list of
83
+ relational tuples (or more accurately, the corresponding strings in
84
+ Prolog form) whose predicate symbol is ``city['label']`` and whose
85
+ relational schema is ``city['schema']``. The notion of a ``closure`` is
86
+ discussed in the next section.
87
+
88
+ Concepts
89
+ ========
90
+ In order to encapsulate the results of the extraction, a class of
91
+ ``Concept`` objects is introduced. A ``Concept`` object has a number of
92
+ attributes, in particular a ``prefLabel`` and ``extension``, which make
93
+ it easier to inspect the output of the extraction. In addition, the
94
+ ``extension`` can be further processed: in the case of the ``'border'``
95
+ relation, we check that the relation is symmetric, and in the case
96
+ of the ``'contain'`` relation, we carry out the transitive
97
+ closure. The closure properties associated with a concept is
98
+ indicated in the relation metadata, as indicated earlier.
99
+
100
+ The ``extension`` of a ``Concept`` object is then incorporated into a
101
+ ``Valuation`` object.
102
+
103
+ Persistence
104
+ ===========
105
+ The functions ``val_dump`` and ``val_load`` are provided to allow a
106
+ valuation to be stored in a persistent database and re-loaded, rather
107
+ than having to be re-computed each time.
108
+
109
+ Individuals and Lexical Items
110
+ =============================
111
+ As well as deriving relations from the Chat-80 data, we also create a
112
+ set of individual constants, one for each entity in the domain. The
113
+ individual constants are string-identical to the entities. For
114
+ example, given a data item such as ``'zloty'``, we add to the valuation
115
+ a pair ``('zloty', 'zloty')``. In order to parse English sentences that
116
+ refer to these entities, we also create a lexical item such as the
117
+ following for each individual constant::
118
+
119
+ PropN[num=sg, sem=<\P.(P zloty)>] -> 'Zloty'
120
+
121
+ The set of rules is written to the file ``chat_pnames.cfg`` in the
122
+ current directory.
123
+
124
+ """
125
+
126
+ import os
127
+ import re
128
+ import shelve
129
+ import sys
130
+
131
+ import nltk.data
132
+
133
+ ###########################################################################
134
+ # Chat-80 relation metadata bundles needed to build the valuation
135
+ ###########################################################################
136
+
137
+ borders = {
138
+ "rel_name": "borders",
139
+ "closures": ["symmetric"],
140
+ "schema": ["region", "border"],
141
+ "filename": "borders.pl",
142
+ }
143
+
144
+ contains = {
145
+ "rel_name": "contains0",
146
+ "closures": ["transitive"],
147
+ "schema": ["region", "contain"],
148
+ "filename": "contain.pl",
149
+ }
150
+
151
+ city = {
152
+ "rel_name": "city",
153
+ "closures": [],
154
+ "schema": ["city", "country", "population"],
155
+ "filename": "cities.pl",
156
+ }
157
+
158
+ country = {
159
+ "rel_name": "country",
160
+ "closures": [],
161
+ "schema": [
162
+ "country",
163
+ "region",
164
+ "latitude",
165
+ "longitude",
166
+ "area",
167
+ "population",
168
+ "capital",
169
+ "currency",
170
+ ],
171
+ "filename": "countries.pl",
172
+ }
173
+
174
+ circle_of_lat = {
175
+ "rel_name": "circle_of_latitude",
176
+ "closures": [],
177
+ "schema": ["circle_of_latitude", "degrees"],
178
+ "filename": "world1.pl",
179
+ }
180
+
181
+ circle_of_long = {
182
+ "rel_name": "circle_of_longitude",
183
+ "closures": [],
184
+ "schema": ["circle_of_longitude", "degrees"],
185
+ "filename": "world1.pl",
186
+ }
187
+
188
+ continent = {
189
+ "rel_name": "continent",
190
+ "closures": [],
191
+ "schema": ["continent"],
192
+ "filename": "world1.pl",
193
+ }
194
+
195
+ region = {
196
+ "rel_name": "in_continent",
197
+ "closures": [],
198
+ "schema": ["region", "continent"],
199
+ "filename": "world1.pl",
200
+ }
201
+
202
+ ocean = {
203
+ "rel_name": "ocean",
204
+ "closures": [],
205
+ "schema": ["ocean"],
206
+ "filename": "world1.pl",
207
+ }
208
+
209
+ sea = {"rel_name": "sea", "closures": [], "schema": ["sea"], "filename": "world1.pl"}
210
+
211
+
212
+ items = [
213
+ "borders",
214
+ "contains",
215
+ "city",
216
+ "country",
217
+ "circle_of_lat",
218
+ "circle_of_long",
219
+ "continent",
220
+ "region",
221
+ "ocean",
222
+ "sea",
223
+ ]
224
+ items = tuple(sorted(items))
225
+
226
+ item_metadata = {
227
+ "borders": borders,
228
+ "contains": contains,
229
+ "city": city,
230
+ "country": country,
231
+ "circle_of_lat": circle_of_lat,
232
+ "circle_of_long": circle_of_long,
233
+ "continent": continent,
234
+ "region": region,
235
+ "ocean": ocean,
236
+ "sea": sea,
237
+ }
238
+
239
+ rels = item_metadata.values()
240
+
241
+ not_unary = ["borders.pl", "contain.pl"]
242
+
243
+ ###########################################################################
244
+
245
+
246
+ class Concept:
247
+ """
248
+ A Concept class, loosely based on SKOS
249
+ (https://www.w3.org/TR/swbp-skos-core-guide/).
250
+ """
251
+
252
+ def __init__(self, prefLabel, arity, altLabels=[], closures=[], extension=set()):
253
+ """
254
+ :param prefLabel: the preferred label for the concept
255
+ :type prefLabel: str
256
+ :param arity: the arity of the concept
257
+ :type arity: int
258
+ :param altLabels: other (related) labels
259
+ :type altLabels: list
260
+ :param closures: closure properties of the extension
261
+ (list items can be ``symmetric``, ``reflexive``, ``transitive``)
262
+ :type closures: list
263
+ :param extension: the extensional value of the concept
264
+ :type extension: set
265
+ """
266
+ self.prefLabel = prefLabel
267
+ self.arity = arity
268
+ self.altLabels = altLabels
269
+ self.closures = closures
270
+ # keep _extension internally as a set
271
+ self._extension = extension
272
+ # public access is via a list (for slicing)
273
+ self.extension = sorted(list(extension))
274
+
275
+ def __str__(self):
276
+ # _extension = ''
277
+ # for element in sorted(self.extension):
278
+ # if isinstance(element, tuple):
279
+ # element = '(%s, %s)' % (element)
280
+ # _extension += element + ', '
281
+ # _extension = _extension[:-1]
282
+
283
+ return "Label = '{}'\nArity = {}\nExtension = {}".format(
284
+ self.prefLabel,
285
+ self.arity,
286
+ self.extension,
287
+ )
288
+
289
+ def __repr__(self):
290
+ return "Concept('%s')" % self.prefLabel
291
+
292
+ def augment(self, data):
293
+ """
294
+ Add more data to the ``Concept``'s extension set.
295
+
296
+ :param data: a new semantic value
297
+ :type data: string or pair of strings
298
+ :rtype: set
299
+
300
+ """
301
+ self._extension.add(data)
302
+ self.extension = sorted(list(self._extension))
303
+ return self._extension
304
+
305
+ def _make_graph(self, s):
306
+ """
307
+ Convert a set of pairs into an adjacency linked list encoding of a graph.
308
+ """
309
+ g = {}
310
+ for (x, y) in s:
311
+ if x in g:
312
+ g[x].append(y)
313
+ else:
314
+ g[x] = [y]
315
+ return g
316
+
317
+ def _transclose(self, g):
318
+ """
319
+ Compute the transitive closure of a graph represented as a linked list.
320
+ """
321
+ for x in g:
322
+ for adjacent in g[x]:
323
+ # check that adjacent is a key
324
+ if adjacent in g:
325
+ for y in g[adjacent]:
326
+ if y not in g[x]:
327
+ g[x].append(y)
328
+ return g
329
+
330
+ def _make_pairs(self, g):
331
+ """
332
+ Convert an adjacency linked list back into a set of pairs.
333
+ """
334
+ pairs = []
335
+ for node in g:
336
+ for adjacent in g[node]:
337
+ pairs.append((node, adjacent))
338
+ return set(pairs)
339
+
340
+ def close(self):
341
+ """
342
+ Close a binary relation in the ``Concept``'s extension set.
343
+
344
+ :return: a new extension for the ``Concept`` in which the
345
+ relation is closed under a given property
346
+ """
347
+ from nltk.sem import is_rel
348
+
349
+ assert is_rel(self._extension)
350
+ if "symmetric" in self.closures:
351
+ pairs = []
352
+ for (x, y) in self._extension:
353
+ pairs.append((y, x))
354
+ sym = set(pairs)
355
+ self._extension = self._extension.union(sym)
356
+ if "transitive" in self.closures:
357
+ all = self._make_graph(self._extension)
358
+ closed = self._transclose(all)
359
+ trans = self._make_pairs(closed)
360
+ self._extension = self._extension.union(trans)
361
+ self.extension = sorted(list(self._extension))
362
+
363
+
364
+ def clause2concepts(filename, rel_name, schema, closures=[]):
365
+ """
366
+ Convert a file of Prolog clauses into a list of ``Concept`` objects.
367
+
368
+ :param filename: filename containing the relations
369
+ :type filename: str
370
+ :param rel_name: name of the relation
371
+ :type rel_name: str
372
+ :param schema: the schema used in a set of relational tuples
373
+ :type schema: list
374
+ :param closures: closure properties for the extension of the concept
375
+ :type closures: list
376
+ :return: a list of ``Concept`` objects
377
+ :rtype: list
378
+ """
379
+ concepts = []
380
+ # position of the subject of a binary relation
381
+ subj = 0
382
+ # label of the 'primary key'
383
+ pkey = schema[0]
384
+ # fields other than the primary key
385
+ fields = schema[1:]
386
+
387
+ # convert a file into a list of lists
388
+ records = _str2records(filename, rel_name)
389
+
390
+ # add a unary concept corresponding to the set of entities
391
+ # in the primary key position
392
+ # relations in 'not_unary' are more like ordinary binary relations
393
+ if not filename in not_unary:
394
+ concepts.append(unary_concept(pkey, subj, records))
395
+
396
+ # add a binary concept for each non-key field
397
+ for field in fields:
398
+ obj = schema.index(field)
399
+ concepts.append(binary_concept(field, closures, subj, obj, records))
400
+
401
+ return concepts
402
+
403
+
404
+ def cities2table(filename, rel_name, dbname, verbose=False, setup=False):
405
+ """
406
+ Convert a file of Prolog clauses into a database table.
407
+
408
+ This is not generic, since it doesn't allow arbitrary
409
+ schemas to be set as a parameter.
410
+
411
+ Intended usage::
412
+
413
+ cities2table('cities.pl', 'city', 'city.db', verbose=True, setup=True)
414
+
415
+ :param filename: filename containing the relations
416
+ :type filename: str
417
+ :param rel_name: name of the relation
418
+ :type rel_name: str
419
+ :param dbname: filename of persistent store
420
+ :type schema: str
421
+ """
422
+ import sqlite3
423
+
424
+ records = _str2records(filename, rel_name)
425
+ connection = sqlite3.connect(dbname)
426
+ cur = connection.cursor()
427
+ if setup:
428
+ cur.execute(
429
+ """CREATE TABLE city_table
430
+ (City text, Country text, Population int)"""
431
+ )
432
+
433
+ table_name = "city_table"
434
+ for t in records:
435
+ cur.execute("insert into %s values (?,?,?)" % table_name, t)
436
+ if verbose:
437
+ print("inserting values into %s: " % table_name, t)
438
+ connection.commit()
439
+ if verbose:
440
+ print("Committing update to %s" % dbname)
441
+ cur.close()
442
+
443
+
444
+ def sql_query(dbname, query):
445
+ """
446
+ Execute an SQL query over a database.
447
+ :param dbname: filename of persistent store
448
+ :type schema: str
449
+ :param query: SQL query
450
+ :type rel_name: str
451
+ """
452
+ import sqlite3
453
+
454
+ try:
455
+ path = nltk.data.find(dbname)
456
+ connection = sqlite3.connect(str(path))
457
+ cur = connection.cursor()
458
+ return cur.execute(query)
459
+ except (ValueError, sqlite3.OperationalError):
460
+ import warnings
461
+
462
+ warnings.warn(
463
+ "Make sure the database file %s is installed and uncompressed." % dbname
464
+ )
465
+ raise
466
+
467
+
468
+ def _str2records(filename, rel):
469
+ """
470
+ Read a file into memory and convert each relation clause into a list.
471
+ """
472
+ recs = []
473
+ contents = nltk.data.load("corpora/chat80/%s" % filename, format="text")
474
+ for line in contents.splitlines():
475
+ if line.startswith(rel):
476
+ line = re.sub(rel + r"\(", "", line)
477
+ line = re.sub(r"\)\.$", "", line)
478
+ record = line.split(",")
479
+ recs.append(record)
480
+ return recs
481
+
482
+
483
+ def unary_concept(label, subj, records):
484
+ """
485
+ Make a unary concept out of the primary key in a record.
486
+
487
+ A record is a list of entities in some relation, such as
488
+ ``['france', 'paris']``, where ``'france'`` is acting as the primary
489
+ key.
490
+
491
+ :param label: the preferred label for the concept
492
+ :type label: string
493
+ :param subj: position in the record of the subject of the predicate
494
+ :type subj: int
495
+ :param records: a list of records
496
+ :type records: list of lists
497
+ :return: ``Concept`` of arity 1
498
+ :rtype: Concept
499
+ """
500
+ c = Concept(label, arity=1, extension=set())
501
+ for record in records:
502
+ c.augment(record[subj])
503
+ return c
504
+
505
+
506
+ def binary_concept(label, closures, subj, obj, records):
507
+ """
508
+ Make a binary concept out of the primary key and another field in a record.
509
+
510
+ A record is a list of entities in some relation, such as
511
+ ``['france', 'paris']``, where ``'france'`` is acting as the primary
512
+ key, and ``'paris'`` stands in the ``'capital_of'`` relation to
513
+ ``'france'``.
514
+
515
+ More generally, given a record such as ``['a', 'b', 'c']``, where
516
+ label is bound to ``'B'``, and ``obj`` bound to 1, the derived
517
+ binary concept will have label ``'B_of'``, and its extension will
518
+ be a set of pairs such as ``('a', 'b')``.
519
+
520
+
521
+ :param label: the base part of the preferred label for the concept
522
+ :type label: str
523
+ :param closures: closure properties for the extension of the concept
524
+ :type closures: list
525
+ :param subj: position in the record of the subject of the predicate
526
+ :type subj: int
527
+ :param obj: position in the record of the object of the predicate
528
+ :type obj: int
529
+ :param records: a list of records
530
+ :type records: list of lists
531
+ :return: ``Concept`` of arity 2
532
+ :rtype: Concept
533
+ """
534
+ if not label == "border" and not label == "contain":
535
+ label = label + "_of"
536
+ c = Concept(label, arity=2, closures=closures, extension=set())
537
+ for record in records:
538
+ c.augment((record[subj], record[obj]))
539
+ # close the concept's extension according to the properties in closures
540
+ c.close()
541
+ return c
542
+
543
+
544
+ def process_bundle(rels):
545
+ """
546
+ Given a list of relation metadata bundles, make a corresponding
547
+ dictionary of concepts, indexed by the relation name.
548
+
549
+ :param rels: bundle of metadata needed for constructing a concept
550
+ :type rels: list(dict)
551
+ :return: a dictionary of concepts, indexed by the relation name.
552
+ :rtype: dict(str): Concept
553
+ """
554
+ concepts = {}
555
+ for rel in rels:
556
+ rel_name = rel["rel_name"]
557
+ closures = rel["closures"]
558
+ schema = rel["schema"]
559
+ filename = rel["filename"]
560
+
561
+ concept_list = clause2concepts(filename, rel_name, schema, closures)
562
+ for c in concept_list:
563
+ label = c.prefLabel
564
+ if label in concepts:
565
+ for data in c.extension:
566
+ concepts[label].augment(data)
567
+ concepts[label].close()
568
+ else:
569
+ concepts[label] = c
570
+ return concepts
571
+
572
+
573
+ def make_valuation(concepts, read=False, lexicon=False):
574
+ """
575
+ Convert a list of ``Concept`` objects into a list of (label, extension) pairs;
576
+ optionally create a ``Valuation`` object.
577
+
578
+ :param concepts: concepts
579
+ :type concepts: list(Concept)
580
+ :param read: if ``True``, ``(symbol, set)`` pairs are read into a ``Valuation``
581
+ :type read: bool
582
+ :rtype: list or Valuation
583
+ """
584
+ vals = []
585
+
586
+ for c in concepts:
587
+ vals.append((c.prefLabel, c.extension))
588
+ if lexicon:
589
+ read = True
590
+ if read:
591
+ from nltk.sem import Valuation
592
+
593
+ val = Valuation({})
594
+ val.update(vals)
595
+ # add labels for individuals
596
+ val = label_indivs(val, lexicon=lexicon)
597
+ return val
598
+ else:
599
+ return vals
600
+
601
+
602
+ def val_dump(rels, db):
603
+ """
604
+ Make a ``Valuation`` from a list of relation metadata bundles and dump to
605
+ persistent database.
606
+
607
+ :param rels: bundle of metadata needed for constructing a concept
608
+ :type rels: list of dict
609
+ :param db: name of file to which data is written.
610
+ The suffix '.db' will be automatically appended.
611
+ :type db: str
612
+ """
613
+ concepts = process_bundle(rels).values()
614
+ valuation = make_valuation(concepts, read=True)
615
+ db_out = shelve.open(db, "n")
616
+
617
+ db_out.update(valuation)
618
+
619
+ db_out.close()
620
+
621
+
622
+ def val_load(db):
623
+ """
624
+ Load a ``Valuation`` from a persistent database.
625
+
626
+ :param db: name of file from which data is read.
627
+ The suffix '.db' should be omitted from the name.
628
+ :type db: str
629
+ """
630
+ dbname = db + ".db"
631
+
632
+ if not os.access(dbname, os.R_OK):
633
+ sys.exit("Cannot read file: %s" % dbname)
634
+ else:
635
+ db_in = shelve.open(db)
636
+ from nltk.sem import Valuation
637
+
638
+ val = Valuation(db_in)
639
+ # val.read(db_in.items())
640
+ return val
641
+
642
+
643
+ # def alpha(str):
644
+ # """
645
+ # Utility to filter out non-alphabetic constants.
646
+
647
+ #:param str: candidate constant
648
+ #:type str: string
649
+ #:rtype: bool
650
+ # """
651
+ # try:
652
+ # int(str)
653
+ # return False
654
+ # except ValueError:
655
+ ## some unknown values in records are labeled '?'
656
+ # if not str == '?':
657
+ # return True
658
+
659
+
660
+ def label_indivs(valuation, lexicon=False):
661
+ """
662
+ Assign individual constants to the individuals in the domain of a ``Valuation``.
663
+
664
+ Given a valuation with an entry of the form ``{'rel': {'a': True}}``,
665
+ add a new entry ``{'a': 'a'}``.
666
+
667
+ :type valuation: Valuation
668
+ :rtype: Valuation
669
+ """
670
+ # collect all the individuals into a domain
671
+ domain = valuation.domain
672
+ # convert the domain into a sorted list of alphabetic terms
673
+ # use the same string as a label
674
+ pairs = [(e, e) for e in domain]
675
+ if lexicon:
676
+ lex = make_lex(domain)
677
+ with open("chat_pnames.cfg", "w") as outfile:
678
+ outfile.writelines(lex)
679
+ # read the pairs into the valuation
680
+ valuation.update(pairs)
681
+ return valuation
682
+
683
+
684
+ def make_lex(symbols):
685
+ """
686
+ Create lexical CFG rules for each individual symbol.
687
+
688
+ Given a valuation with an entry of the form ``{'zloty': 'zloty'}``,
689
+ create a lexical rule for the proper name 'Zloty'.
690
+
691
+ :param symbols: a list of individual constants in the semantic representation
692
+ :type symbols: sequence -- set(str)
693
+ :rtype: list(str)
694
+ """
695
+ lex = []
696
+ header = """
697
+ ##################################################################
698
+ # Lexical rules automatically generated by running 'chat80.py -x'.
699
+ ##################################################################
700
+
701
+ """
702
+ lex.append(header)
703
+ template = r"PropN[num=sg, sem=<\P.(P %s)>] -> '%s'\n"
704
+
705
+ for s in symbols:
706
+ parts = s.split("_")
707
+ caps = [p.capitalize() for p in parts]
708
+ pname = "_".join(caps)
709
+ rule = template % (s, pname)
710
+ lex.append(rule)
711
+ return lex
712
+
713
+
714
+ ###########################################################################
715
+ # Interface function to emulate other corpus readers
716
+ ###########################################################################
717
+
718
+
719
+ def concepts(items=items):
720
+ """
721
+ Build a list of concepts corresponding to the relation names in ``items``.
722
+
723
+ :param items: names of the Chat-80 relations to extract
724
+ :type items: list(str)
725
+ :return: the ``Concept`` objects which are extracted from the relations
726
+ :rtype: list(Concept)
727
+ """
728
+ if isinstance(items, str):
729
+ items = (items,)
730
+
731
+ rels = [item_metadata[r] for r in items]
732
+
733
+ concept_map = process_bundle(rels)
734
+ return concept_map.values()
735
+
736
+
737
+ ###########################################################################
738
+
739
+
740
+ def main():
741
+ import sys
742
+ from optparse import OptionParser
743
+
744
+ description = """
745
+ Extract data from the Chat-80 Prolog files and convert them into a
746
+ Valuation object for use in the NLTK semantics package.
747
+ """
748
+
749
+ opts = OptionParser(description=description)
750
+ opts.set_defaults(verbose=True, lex=False, vocab=False)
751
+ opts.add_option(
752
+ "-s", "--store", dest="outdb", help="store a valuation in DB", metavar="DB"
753
+ )
754
+ opts.add_option(
755
+ "-l",
756
+ "--load",
757
+ dest="indb",
758
+ help="load a stored valuation from DB",
759
+ metavar="DB",
760
+ )
761
+ opts.add_option(
762
+ "-c",
763
+ "--concepts",
764
+ action="store_true",
765
+ help="print concepts instead of a valuation",
766
+ )
767
+ opts.add_option(
768
+ "-r",
769
+ "--relation",
770
+ dest="label",
771
+ help="print concept with label REL (check possible labels with '-v' option)",
772
+ metavar="REL",
773
+ )
774
+ opts.add_option(
775
+ "-q",
776
+ "--quiet",
777
+ action="store_false",
778
+ dest="verbose",
779
+ help="don't print out progress info",
780
+ )
781
+ opts.add_option(
782
+ "-x",
783
+ "--lex",
784
+ action="store_true",
785
+ dest="lex",
786
+ help="write a file of lexical entries for country names, then exit",
787
+ )
788
+ opts.add_option(
789
+ "-v",
790
+ "--vocab",
791
+ action="store_true",
792
+ dest="vocab",
793
+ help="print out the vocabulary of concept labels and their arity, then exit",
794
+ )
795
+
796
+ (options, args) = opts.parse_args()
797
+ if options.outdb and options.indb:
798
+ opts.error("Options --store and --load are mutually exclusive")
799
+
800
+ if options.outdb:
801
+ # write the valuation to a persistent database
802
+ if options.verbose:
803
+ outdb = options.outdb + ".db"
804
+ print("Dumping a valuation to %s" % outdb)
805
+ val_dump(rels, options.outdb)
806
+ sys.exit(0)
807
+ else:
808
+ # try to read in a valuation from a database
809
+ if options.indb is not None:
810
+ dbname = options.indb + ".db"
811
+ if not os.access(dbname, os.R_OK):
812
+ sys.exit("Cannot read file: %s" % dbname)
813
+ else:
814
+ valuation = val_load(options.indb)
815
+ # we need to create the valuation from scratch
816
+ else:
817
+ # build some concepts
818
+ concept_map = process_bundle(rels)
819
+ concepts = concept_map.values()
820
+ # just print out the vocabulary
821
+ if options.vocab:
822
+ items = sorted((c.arity, c.prefLabel) for c in concepts)
823
+ for (arity, label) in items:
824
+ print(label, arity)
825
+ sys.exit(0)
826
+ # show all the concepts
827
+ if options.concepts:
828
+ for c in concepts:
829
+ print(c)
830
+ print()
831
+ if options.label:
832
+ print(concept_map[options.label])
833
+ sys.exit(0)
834
+ else:
835
+ # turn the concepts into a Valuation
836
+ if options.lex:
837
+ if options.verbose:
838
+ print("Writing out lexical rules")
839
+ make_valuation(concepts, lexicon=True)
840
+ else:
841
+ valuation = make_valuation(concepts, read=True)
842
+ print(valuation)
843
+
844
+
845
+ def sql_demo():
846
+ """
847
+ Print out every row from the 'city.db' database.
848
+ """
849
+ print()
850
+ print("Using SQL to extract rows from 'city.db' RDB.")
851
+ for row in sql_query("corpora/city_database/city.db", "SELECT * FROM city_table"):
852
+ print(row)
853
+
854
+
855
+ if __name__ == "__main__":
856
+ main()
857
+ sql_demo()
llmeval-env/lib/python3.10/site-packages/nltk/sem/cooper_storage.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Cooper storage for Quantifier Ambiguity
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Ewan Klein <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ from nltk.parse import load_parser
9
+ from nltk.parse.featurechart import InstantiateVarsChart
10
+ from nltk.sem.logic import ApplicationExpression, LambdaExpression, Variable
11
+
12
+
13
+ class CooperStore:
14
+ """
15
+ A container for handling quantifier ambiguity via Cooper storage.
16
+ """
17
+
18
+ def __init__(self, featstruct):
19
+ """
20
+ :param featstruct: The value of the ``sem`` node in a tree from
21
+ ``parse_with_bindops()``
22
+ :type featstruct: FeatStruct (with features ``core`` and ``store``)
23
+
24
+ """
25
+ self.featstruct = featstruct
26
+ self.readings = []
27
+ try:
28
+ self.core = featstruct["CORE"]
29
+ self.store = featstruct["STORE"]
30
+ except KeyError:
31
+ print("%s is not a Cooper storage structure" % featstruct)
32
+
33
+ def _permute(self, lst):
34
+ """
35
+ :return: An iterator over the permutations of the input list
36
+ :type lst: list
37
+ :rtype: iter
38
+ """
39
+ remove = lambda lst0, index: lst0[:index] + lst0[index + 1 :]
40
+ if lst:
41
+ for index, x in enumerate(lst):
42
+ for y in self._permute(remove(lst, index)):
43
+ yield (x,) + y
44
+ else:
45
+ yield ()
46
+
47
+ def s_retrieve(self, trace=False):
48
+ r"""
49
+ Carry out S-Retrieval of binding operators in store. If hack=True,
50
+ serialize the bindop and core as strings and reparse. Ugh.
51
+
52
+ Each permutation of the store (i.e. list of binding operators) is
53
+ taken to be a possible scoping of quantifiers. We iterate through the
54
+ binding operators in each permutation, and successively apply them to
55
+ the current term, starting with the core semantic representation,
56
+ working from the inside out.
57
+
58
+ Binding operators are of the form::
59
+
60
+ bo(\P.all x.(man(x) -> P(x)),z1)
61
+ """
62
+ for perm, store_perm in enumerate(self._permute(self.store)):
63
+ if trace:
64
+ print("Permutation %s" % (perm + 1))
65
+ term = self.core
66
+ for bindop in store_perm:
67
+ # we just want the arguments that are wrapped by the 'bo' predicate
68
+ quant, varex = tuple(bindop.args)
69
+ # use var to make an abstraction over the current term and then
70
+ # apply the quantifier to it
71
+ term = ApplicationExpression(
72
+ quant, LambdaExpression(varex.variable, term)
73
+ )
74
+ if trace:
75
+ print(" ", term)
76
+ term = term.simplify()
77
+ self.readings.append(term)
78
+
79
+
80
+ def parse_with_bindops(sentence, grammar=None, trace=0):
81
+ """
82
+ Use a grammar with Binding Operators to parse a sentence.
83
+ """
84
+ if not grammar:
85
+ grammar = "grammars/book_grammars/storage.fcfg"
86
+ parser = load_parser(grammar, trace=trace, chart_class=InstantiateVarsChart)
87
+ # Parse the sentence.
88
+ tokens = sentence.split()
89
+ return list(parser.parse(tokens))
90
+
91
+
92
+ def demo():
93
+ from nltk.sem import cooper_storage as cs
94
+
95
+ sentence = "every girl chases a dog"
96
+ # sentence = "a man gives a bone to every dog"
97
+ print()
98
+ print("Analysis of sentence '%s'" % sentence)
99
+ print("=" * 50)
100
+ trees = cs.parse_with_bindops(sentence, trace=0)
101
+ for tree in trees:
102
+ semrep = cs.CooperStore(tree.label()["SEM"])
103
+ print()
104
+ print("Binding operators:")
105
+ print("-" * 15)
106
+ for s in semrep.store:
107
+ print(s)
108
+ print()
109
+ print("Core:")
110
+ print("-" * 15)
111
+ print(semrep.core)
112
+ print()
113
+ print("S-Retrieval:")
114
+ print("-" * 15)
115
+ semrep.s_retrieve(trace=True)
116
+ print("Readings:")
117
+ print("-" * 15)
118
+
119
+ for i, reading in enumerate(semrep.readings):
120
+ print(f"{i + 1}: {reading}")
121
+
122
+
123
+ if __name__ == "__main__":
124
+ demo()
llmeval-env/lib/python3.10/site-packages/nltk/sem/drt.py ADDED
@@ -0,0 +1,1460 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Discourse Representation Theory (DRT)
2
+ #
3
+ # Author: Dan Garrette <[email protected]>
4
+ #
5
+ # Copyright (C) 2001-2023 NLTK Project
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ import operator
10
+ from functools import reduce
11
+ from itertools import chain
12
+
13
+ from nltk.sem.logic import (
14
+ APP,
15
+ AbstractVariableExpression,
16
+ AllExpression,
17
+ AndExpression,
18
+ ApplicationExpression,
19
+ BinaryExpression,
20
+ BooleanExpression,
21
+ ConstantExpression,
22
+ EqualityExpression,
23
+ EventVariableExpression,
24
+ ExistsExpression,
25
+ Expression,
26
+ FunctionVariableExpression,
27
+ ImpExpression,
28
+ IndividualVariableExpression,
29
+ LambdaExpression,
30
+ LogicParser,
31
+ NegatedExpression,
32
+ OrExpression,
33
+ Tokens,
34
+ Variable,
35
+ is_eventvar,
36
+ is_funcvar,
37
+ is_indvar,
38
+ unique_variable,
39
+ )
40
+
41
+ # Import Tkinter-based modules if they are available
42
+ try:
43
+ from tkinter import Canvas, Tk
44
+ from tkinter.font import Font
45
+
46
+ from nltk.util import in_idle
47
+
48
+ except ImportError:
49
+ # No need to print a warning here, nltk.draw has already printed one.
50
+ pass
51
+
52
+
53
+ class DrtTokens(Tokens):
54
+ DRS = "DRS"
55
+ DRS_CONC = "+"
56
+ PRONOUN = "PRO"
57
+ OPEN_BRACKET = "["
58
+ CLOSE_BRACKET = "]"
59
+ COLON = ":"
60
+
61
+ PUNCT = [DRS_CONC, OPEN_BRACKET, CLOSE_BRACKET, COLON]
62
+
63
+ SYMBOLS = Tokens.SYMBOLS + PUNCT
64
+
65
+ TOKENS = Tokens.TOKENS + [DRS] + PUNCT
66
+
67
+
68
+ class DrtParser(LogicParser):
69
+ """A lambda calculus expression parser."""
70
+
71
+ def __init__(self):
72
+ LogicParser.__init__(self)
73
+
74
+ self.operator_precedence = dict(
75
+ [(x, 1) for x in DrtTokens.LAMBDA_LIST]
76
+ + [(x, 2) for x in DrtTokens.NOT_LIST]
77
+ + [(APP, 3)]
78
+ + [(x, 4) for x in DrtTokens.EQ_LIST + Tokens.NEQ_LIST]
79
+ + [(DrtTokens.COLON, 5)]
80
+ + [(DrtTokens.DRS_CONC, 6)]
81
+ + [(x, 7) for x in DrtTokens.OR_LIST]
82
+ + [(x, 8) for x in DrtTokens.IMP_LIST]
83
+ + [(None, 9)]
84
+ )
85
+
86
+ def get_all_symbols(self):
87
+ """This method exists to be overridden"""
88
+ return DrtTokens.SYMBOLS
89
+
90
+ def isvariable(self, tok):
91
+ return tok not in DrtTokens.TOKENS
92
+
93
+ def handle(self, tok, context):
94
+ """This method is intended to be overridden for logics that
95
+ use different operators or expressions"""
96
+ if tok in DrtTokens.NOT_LIST:
97
+ return self.handle_negation(tok, context)
98
+
99
+ elif tok in DrtTokens.LAMBDA_LIST:
100
+ return self.handle_lambda(tok, context)
101
+
102
+ elif tok == DrtTokens.OPEN:
103
+ if self.inRange(0) and self.token(0) == DrtTokens.OPEN_BRACKET:
104
+ return self.handle_DRS(tok, context)
105
+ else:
106
+ return self.handle_open(tok, context)
107
+
108
+ elif tok.upper() == DrtTokens.DRS:
109
+ self.assertNextToken(DrtTokens.OPEN)
110
+ return self.handle_DRS(tok, context)
111
+
112
+ elif self.isvariable(tok):
113
+ if self.inRange(0) and self.token(0) == DrtTokens.COLON:
114
+ return self.handle_prop(tok, context)
115
+ else:
116
+ return self.handle_variable(tok, context)
117
+
118
+ def make_NegatedExpression(self, expression):
119
+ return DrtNegatedExpression(expression)
120
+
121
+ def handle_DRS(self, tok, context):
122
+ # a DRS
123
+ refs = self.handle_refs()
124
+ if (
125
+ self.inRange(0) and self.token(0) == DrtTokens.COMMA
126
+ ): # if there is a comma (it's optional)
127
+ self.token() # swallow the comma
128
+ conds = self.handle_conds(context)
129
+ self.assertNextToken(DrtTokens.CLOSE)
130
+ return DRS(refs, conds, None)
131
+
132
+ def handle_refs(self):
133
+ self.assertNextToken(DrtTokens.OPEN_BRACKET)
134
+ refs = []
135
+ while self.inRange(0) and self.token(0) != DrtTokens.CLOSE_BRACKET:
136
+ # Support expressions like: DRS([x y],C) == DRS([x,y],C)
137
+ if refs and self.token(0) == DrtTokens.COMMA:
138
+ self.token() # swallow the comma
139
+ refs.append(self.get_next_token_variable("quantified"))
140
+ self.assertNextToken(DrtTokens.CLOSE_BRACKET)
141
+ return refs
142
+
143
+ def handle_conds(self, context):
144
+ self.assertNextToken(DrtTokens.OPEN_BRACKET)
145
+ conds = []
146
+ while self.inRange(0) and self.token(0) != DrtTokens.CLOSE_BRACKET:
147
+ # Support expressions like: DRS([x y],C) == DRS([x, y],C)
148
+ if conds and self.token(0) == DrtTokens.COMMA:
149
+ self.token() # swallow the comma
150
+ conds.append(self.process_next_expression(context))
151
+ self.assertNextToken(DrtTokens.CLOSE_BRACKET)
152
+ return conds
153
+
154
+ def handle_prop(self, tok, context):
155
+ variable = self.make_VariableExpression(tok)
156
+ self.assertNextToken(":")
157
+ drs = self.process_next_expression(DrtTokens.COLON)
158
+ return DrtProposition(variable, drs)
159
+
160
+ def make_EqualityExpression(self, first, second):
161
+ """This method serves as a hook for other logic parsers that
162
+ have different equality expression classes"""
163
+ return DrtEqualityExpression(first, second)
164
+
165
+ def get_BooleanExpression_factory(self, tok):
166
+ """This method serves as a hook for other logic parsers that
167
+ have different boolean operators"""
168
+ if tok == DrtTokens.DRS_CONC:
169
+ return lambda first, second: DrtConcatenation(first, second, None)
170
+ elif tok in DrtTokens.OR_LIST:
171
+ return DrtOrExpression
172
+ elif tok in DrtTokens.IMP_LIST:
173
+
174
+ def make_imp_expression(first, second):
175
+ if isinstance(first, DRS):
176
+ return DRS(first.refs, first.conds, second)
177
+ if isinstance(first, DrtConcatenation):
178
+ return DrtConcatenation(first.first, first.second, second)
179
+ raise Exception("Antecedent of implication must be a DRS")
180
+
181
+ return make_imp_expression
182
+ else:
183
+ return None
184
+
185
+ def make_BooleanExpression(self, factory, first, second):
186
+ return factory(first, second)
187
+
188
+ def make_ApplicationExpression(self, function, argument):
189
+ return DrtApplicationExpression(function, argument)
190
+
191
+ def make_VariableExpression(self, name):
192
+ return DrtVariableExpression(Variable(name))
193
+
194
+ def make_LambdaExpression(self, variables, term):
195
+ return DrtLambdaExpression(variables, term)
196
+
197
+
198
+ class DrtExpression:
199
+ """
200
+ This is the base abstract DRT Expression from which every DRT
201
+ Expression extends.
202
+ """
203
+
204
+ _drt_parser = DrtParser()
205
+
206
+ @classmethod
207
+ def fromstring(cls, s):
208
+ return cls._drt_parser.parse(s)
209
+
210
+ def applyto(self, other):
211
+ return DrtApplicationExpression(self, other)
212
+
213
+ def __neg__(self):
214
+ return DrtNegatedExpression(self)
215
+
216
+ def __and__(self, other):
217
+ return NotImplemented
218
+
219
+ def __or__(self, other):
220
+ assert isinstance(other, DrtExpression)
221
+ return DrtOrExpression(self, other)
222
+
223
+ def __gt__(self, other):
224
+ assert isinstance(other, DrtExpression)
225
+ if isinstance(self, DRS):
226
+ return DRS(self.refs, self.conds, other)
227
+ if isinstance(self, DrtConcatenation):
228
+ return DrtConcatenation(self.first, self.second, other)
229
+ raise Exception("Antecedent of implication must be a DRS")
230
+
231
+ def equiv(self, other, prover=None):
232
+ """
233
+ Check for logical equivalence.
234
+ Pass the expression (self <-> other) to the theorem prover.
235
+ If the prover says it is valid, then the self and other are equal.
236
+
237
+ :param other: an ``DrtExpression`` to check equality against
238
+ :param prover: a ``nltk.inference.api.Prover``
239
+ """
240
+ assert isinstance(other, DrtExpression)
241
+
242
+ f1 = self.simplify().fol()
243
+ f2 = other.simplify().fol()
244
+ return f1.equiv(f2, prover)
245
+
246
+ @property
247
+ def type(self):
248
+ raise AttributeError(
249
+ "'%s' object has no attribute 'type'" % self.__class__.__name__
250
+ )
251
+
252
+ def typecheck(self, signature=None):
253
+ raise NotImplementedError()
254
+
255
+ def __add__(self, other):
256
+ return DrtConcatenation(self, other, None)
257
+
258
+ def get_refs(self, recursive=False):
259
+ """
260
+ Return the set of discourse referents in this DRS.
261
+ :param recursive: bool Also find discourse referents in subterms?
262
+ :return: list of ``Variable`` objects
263
+ """
264
+ raise NotImplementedError()
265
+
266
+ def is_pronoun_function(self):
267
+ """Is self of the form "PRO(x)"?"""
268
+ return (
269
+ isinstance(self, DrtApplicationExpression)
270
+ and isinstance(self.function, DrtAbstractVariableExpression)
271
+ and self.function.variable.name == DrtTokens.PRONOUN
272
+ and isinstance(self.argument, DrtIndividualVariableExpression)
273
+ )
274
+
275
+ def make_EqualityExpression(self, first, second):
276
+ return DrtEqualityExpression(first, second)
277
+
278
+ def make_VariableExpression(self, variable):
279
+ return DrtVariableExpression(variable)
280
+
281
+ def resolve_anaphora(self):
282
+ return resolve_anaphora(self)
283
+
284
+ def eliminate_equality(self):
285
+ return self.visit_structured(lambda e: e.eliminate_equality(), self.__class__)
286
+
287
+ def pretty_format(self):
288
+ """
289
+ Draw the DRS
290
+ :return: the pretty print string
291
+ """
292
+ return "\n".join(self._pretty())
293
+
294
+ def pretty_print(self):
295
+ print(self.pretty_format())
296
+
297
+ def draw(self):
298
+ DrsDrawer(self).draw()
299
+
300
+
301
+ class DRS(DrtExpression, Expression):
302
+ """A Discourse Representation Structure."""
303
+
304
+ def __init__(self, refs, conds, consequent=None):
305
+ """
306
+ :param refs: list of ``DrtIndividualVariableExpression`` for the
307
+ discourse referents
308
+ :param conds: list of ``Expression`` for the conditions
309
+ """
310
+ self.refs = refs
311
+ self.conds = conds
312
+ self.consequent = consequent
313
+
314
+ def replace(self, variable, expression, replace_bound=False, alpha_convert=True):
315
+ """Replace all instances of variable v with expression E in self,
316
+ where v is free in self."""
317
+ if variable in self.refs:
318
+ # if a bound variable is the thing being replaced
319
+ if not replace_bound:
320
+ return self
321
+ else:
322
+ i = self.refs.index(variable)
323
+ if self.consequent:
324
+ consequent = self.consequent.replace(
325
+ variable, expression, True, alpha_convert
326
+ )
327
+ else:
328
+ consequent = None
329
+ return DRS(
330
+ self.refs[:i] + [expression.variable] + self.refs[i + 1 :],
331
+ [
332
+ cond.replace(variable, expression, True, alpha_convert)
333
+ for cond in self.conds
334
+ ],
335
+ consequent,
336
+ )
337
+ else:
338
+ if alpha_convert:
339
+ # any bound variable that appears in the expression must
340
+ # be alpha converted to avoid a conflict
341
+ for ref in set(self.refs) & expression.free():
342
+ newvar = unique_variable(ref)
343
+ newvarex = DrtVariableExpression(newvar)
344
+ i = self.refs.index(ref)
345
+ if self.consequent:
346
+ consequent = self.consequent.replace(
347
+ ref, newvarex, True, alpha_convert
348
+ )
349
+ else:
350
+ consequent = None
351
+ self = DRS(
352
+ self.refs[:i] + [newvar] + self.refs[i + 1 :],
353
+ [
354
+ cond.replace(ref, newvarex, True, alpha_convert)
355
+ for cond in self.conds
356
+ ],
357
+ consequent,
358
+ )
359
+
360
+ # replace in the conditions
361
+ if self.consequent:
362
+ consequent = self.consequent.replace(
363
+ variable, expression, replace_bound, alpha_convert
364
+ )
365
+ else:
366
+ consequent = None
367
+ return DRS(
368
+ self.refs,
369
+ [
370
+ cond.replace(variable, expression, replace_bound, alpha_convert)
371
+ for cond in self.conds
372
+ ],
373
+ consequent,
374
+ )
375
+
376
+ def free(self):
377
+ """:see: Expression.free()"""
378
+ conds_free = reduce(operator.or_, [c.free() for c in self.conds], set())
379
+ if self.consequent:
380
+ conds_free.update(self.consequent.free())
381
+ return conds_free - set(self.refs)
382
+
383
+ def get_refs(self, recursive=False):
384
+ """:see: AbstractExpression.get_refs()"""
385
+ if recursive:
386
+ conds_refs = self.refs + list(
387
+ chain.from_iterable(c.get_refs(True) for c in self.conds)
388
+ )
389
+ if self.consequent:
390
+ conds_refs.extend(self.consequent.get_refs(True))
391
+ return conds_refs
392
+ else:
393
+ return self.refs
394
+
395
+ def visit(self, function, combinator):
396
+ """:see: Expression.visit()"""
397
+ parts = list(map(function, self.conds))
398
+ if self.consequent:
399
+ parts.append(function(self.consequent))
400
+ return combinator(parts)
401
+
402
+ def visit_structured(self, function, combinator):
403
+ """:see: Expression.visit_structured()"""
404
+ consequent = function(self.consequent) if self.consequent else None
405
+ return combinator(self.refs, list(map(function, self.conds)), consequent)
406
+
407
+ def eliminate_equality(self):
408
+ drs = self
409
+ i = 0
410
+ while i < len(drs.conds):
411
+ cond = drs.conds[i]
412
+ if (
413
+ isinstance(cond, EqualityExpression)
414
+ and isinstance(cond.first, AbstractVariableExpression)
415
+ and isinstance(cond.second, AbstractVariableExpression)
416
+ ):
417
+ drs = DRS(
418
+ list(set(drs.refs) - {cond.second.variable}),
419
+ drs.conds[:i] + drs.conds[i + 1 :],
420
+ drs.consequent,
421
+ )
422
+ if cond.second.variable != cond.first.variable:
423
+ drs = drs.replace(cond.second.variable, cond.first, False, False)
424
+ i = 0
425
+ i -= 1
426
+ i += 1
427
+
428
+ conds = []
429
+ for cond in drs.conds:
430
+ new_cond = cond.eliminate_equality()
431
+ new_cond_simp = new_cond.simplify()
432
+ if (
433
+ not isinstance(new_cond_simp, DRS)
434
+ or new_cond_simp.refs
435
+ or new_cond_simp.conds
436
+ or new_cond_simp.consequent
437
+ ):
438
+ conds.append(new_cond)
439
+
440
+ consequent = drs.consequent.eliminate_equality() if drs.consequent else None
441
+ return DRS(drs.refs, conds, consequent)
442
+
443
+ def fol(self):
444
+ if self.consequent:
445
+ accum = None
446
+ if self.conds:
447
+ accum = reduce(AndExpression, [c.fol() for c in self.conds])
448
+
449
+ if accum:
450
+ accum = ImpExpression(accum, self.consequent.fol())
451
+ else:
452
+ accum = self.consequent.fol()
453
+
454
+ for ref in self.refs[::-1]:
455
+ accum = AllExpression(ref, accum)
456
+
457
+ return accum
458
+
459
+ else:
460
+ if not self.conds:
461
+ raise Exception("Cannot convert DRS with no conditions to FOL.")
462
+ accum = reduce(AndExpression, [c.fol() for c in self.conds])
463
+ for ref in map(Variable, self._order_ref_strings(self.refs)[::-1]):
464
+ accum = ExistsExpression(ref, accum)
465
+ return accum
466
+
467
+ def _pretty(self):
468
+ refs_line = " ".join(self._order_ref_strings(self.refs))
469
+
470
+ cond_lines = [
471
+ cond
472
+ for cond_line in [
473
+ filter(lambda s: s.strip(), cond._pretty()) for cond in self.conds
474
+ ]
475
+ for cond in cond_line
476
+ ]
477
+ length = max([len(refs_line)] + list(map(len, cond_lines)))
478
+ drs = (
479
+ [
480
+ " _" + "_" * length + "_ ",
481
+ "| " + refs_line.ljust(length) + " |",
482
+ "|-" + "-" * length + "-|",
483
+ ]
484
+ + ["| " + line.ljust(length) + " |" for line in cond_lines]
485
+ + ["|_" + "_" * length + "_|"]
486
+ )
487
+ if self.consequent:
488
+ return DrtBinaryExpression._assemble_pretty(
489
+ drs, DrtTokens.IMP, self.consequent._pretty()
490
+ )
491
+ return drs
492
+
493
+ def _order_ref_strings(self, refs):
494
+ strings = ["%s" % ref for ref in refs]
495
+ ind_vars = []
496
+ func_vars = []
497
+ event_vars = []
498
+ other_vars = []
499
+ for s in strings:
500
+ if is_indvar(s):
501
+ ind_vars.append(s)
502
+ elif is_funcvar(s):
503
+ func_vars.append(s)
504
+ elif is_eventvar(s):
505
+ event_vars.append(s)
506
+ else:
507
+ other_vars.append(s)
508
+ return (
509
+ sorted(other_vars)
510
+ + sorted(event_vars, key=lambda v: int([v[2:], -1][len(v[2:]) == 0]))
511
+ + sorted(func_vars, key=lambda v: (v[0], int([v[1:], -1][len(v[1:]) == 0])))
512
+ + sorted(ind_vars, key=lambda v: (v[0], int([v[1:], -1][len(v[1:]) == 0])))
513
+ )
514
+
515
+ def __eq__(self, other):
516
+ r"""Defines equality modulo alphabetic variance.
517
+ If we are comparing \x.M and \y.N, then check equality of M and N[x/y]."""
518
+ if isinstance(other, DRS):
519
+ if len(self.refs) == len(other.refs):
520
+ converted_other = other
521
+ for (r1, r2) in zip(self.refs, converted_other.refs):
522
+ varex = self.make_VariableExpression(r1)
523
+ converted_other = converted_other.replace(r2, varex, True)
524
+ if self.consequent == converted_other.consequent and len(
525
+ self.conds
526
+ ) == len(converted_other.conds):
527
+ for c1, c2 in zip(self.conds, converted_other.conds):
528
+ if not (c1 == c2):
529
+ return False
530
+ return True
531
+ return False
532
+
533
+ def __ne__(self, other):
534
+ return not self == other
535
+
536
+ __hash__ = Expression.__hash__
537
+
538
+ def __str__(self):
539
+ drs = "([{}],[{}])".format(
540
+ ",".join(self._order_ref_strings(self.refs)),
541
+ ", ".join("%s" % cond for cond in self.conds),
542
+ ) # map(str, self.conds)))
543
+ if self.consequent:
544
+ return (
545
+ DrtTokens.OPEN
546
+ + drs
547
+ + " "
548
+ + DrtTokens.IMP
549
+ + " "
550
+ + "%s" % self.consequent
551
+ + DrtTokens.CLOSE
552
+ )
553
+ return drs
554
+
555
+
556
+ def DrtVariableExpression(variable):
557
+ """
558
+ This is a factory method that instantiates and returns a subtype of
559
+ ``DrtAbstractVariableExpression`` appropriate for the given variable.
560
+ """
561
+ if is_indvar(variable.name):
562
+ return DrtIndividualVariableExpression(variable)
563
+ elif is_funcvar(variable.name):
564
+ return DrtFunctionVariableExpression(variable)
565
+ elif is_eventvar(variable.name):
566
+ return DrtEventVariableExpression(variable)
567
+ else:
568
+ return DrtConstantExpression(variable)
569
+
570
+
571
+ class DrtAbstractVariableExpression(DrtExpression, AbstractVariableExpression):
572
+ def fol(self):
573
+ return self
574
+
575
+ def get_refs(self, recursive=False):
576
+ """:see: AbstractExpression.get_refs()"""
577
+ return []
578
+
579
+ def _pretty(self):
580
+ s = "%s" % self
581
+ blank = " " * len(s)
582
+ return [blank, blank, s, blank]
583
+
584
+ def eliminate_equality(self):
585
+ return self
586
+
587
+
588
+ class DrtIndividualVariableExpression(
589
+ DrtAbstractVariableExpression, IndividualVariableExpression
590
+ ):
591
+ pass
592
+
593
+
594
+ class DrtFunctionVariableExpression(
595
+ DrtAbstractVariableExpression, FunctionVariableExpression
596
+ ):
597
+ pass
598
+
599
+
600
+ class DrtEventVariableExpression(
601
+ DrtIndividualVariableExpression, EventVariableExpression
602
+ ):
603
+ pass
604
+
605
+
606
+ class DrtConstantExpression(DrtAbstractVariableExpression, ConstantExpression):
607
+ pass
608
+
609
+
610
+ class DrtProposition(DrtExpression, Expression):
611
+ def __init__(self, variable, drs):
612
+ self.variable = variable
613
+ self.drs = drs
614
+
615
+ def replace(self, variable, expression, replace_bound=False, alpha_convert=True):
616
+ if self.variable == variable:
617
+ assert isinstance(
618
+ expression, DrtAbstractVariableExpression
619
+ ), "Can only replace a proposition label with a variable"
620
+ return DrtProposition(
621
+ expression.variable,
622
+ self.drs.replace(variable, expression, replace_bound, alpha_convert),
623
+ )
624
+ else:
625
+ return DrtProposition(
626
+ self.variable,
627
+ self.drs.replace(variable, expression, replace_bound, alpha_convert),
628
+ )
629
+
630
+ def eliminate_equality(self):
631
+ return DrtProposition(self.variable, self.drs.eliminate_equality())
632
+
633
+ def get_refs(self, recursive=False):
634
+ return self.drs.get_refs(True) if recursive else []
635
+
636
+ def __eq__(self, other):
637
+ return (
638
+ self.__class__ == other.__class__
639
+ and self.variable == other.variable
640
+ and self.drs == other.drs
641
+ )
642
+
643
+ def __ne__(self, other):
644
+ return not self == other
645
+
646
+ __hash__ = Expression.__hash__
647
+
648
+ def fol(self):
649
+ return self.drs.fol()
650
+
651
+ def _pretty(self):
652
+ drs_s = self.drs._pretty()
653
+ blank = " " * len("%s" % self.variable)
654
+ return (
655
+ [blank + " " + line for line in drs_s[:1]]
656
+ + ["%s" % self.variable + ":" + line for line in drs_s[1:2]]
657
+ + [blank + " " + line for line in drs_s[2:]]
658
+ )
659
+
660
+ def visit(self, function, combinator):
661
+ """:see: Expression.visit()"""
662
+ return combinator([function(self.drs)])
663
+
664
+ def visit_structured(self, function, combinator):
665
+ """:see: Expression.visit_structured()"""
666
+ return combinator(self.variable, function(self.drs))
667
+
668
+ def __str__(self):
669
+ return f"prop({self.variable}, {self.drs})"
670
+
671
+
672
+ class DrtNegatedExpression(DrtExpression, NegatedExpression):
673
+ def fol(self):
674
+ return NegatedExpression(self.term.fol())
675
+
676
+ def get_refs(self, recursive=False):
677
+ """:see: AbstractExpression.get_refs()"""
678
+ return self.term.get_refs(recursive)
679
+
680
+ def _pretty(self):
681
+ term_lines = self.term._pretty()
682
+ return (
683
+ [" " + line for line in term_lines[:2]]
684
+ + ["__ " + line for line in term_lines[2:3]]
685
+ + [" | " + line for line in term_lines[3:4]]
686
+ + [" " + line for line in term_lines[4:]]
687
+ )
688
+
689
+
690
+ class DrtLambdaExpression(DrtExpression, LambdaExpression):
691
+ def alpha_convert(self, newvar):
692
+ """Rename all occurrences of the variable introduced by this variable
693
+ binder in the expression to ``newvar``.
694
+ :param newvar: ``Variable``, for the new variable
695
+ """
696
+ return self.__class__(
697
+ newvar,
698
+ self.term.replace(self.variable, DrtVariableExpression(newvar), True),
699
+ )
700
+
701
+ def fol(self):
702
+ return LambdaExpression(self.variable, self.term.fol())
703
+
704
+ def _pretty(self):
705
+ variables = [self.variable]
706
+ term = self.term
707
+ while term.__class__ == self.__class__:
708
+ variables.append(term.variable)
709
+ term = term.term
710
+ var_string = " ".join("%s" % v for v in variables) + DrtTokens.DOT
711
+ term_lines = term._pretty()
712
+ blank = " " * len(var_string)
713
+ return (
714
+ [" " + blank + line for line in term_lines[:1]]
715
+ + [r" \ " + blank + line for line in term_lines[1:2]]
716
+ + [r" /\ " + var_string + line for line in term_lines[2:3]]
717
+ + [" " + blank + line for line in term_lines[3:]]
718
+ )
719
+
720
+ def get_refs(self, recursive=False):
721
+ """:see: AbstractExpression.get_refs()"""
722
+ return (
723
+ [self.variable] + self.term.get_refs(True) if recursive else [self.variable]
724
+ )
725
+
726
+
727
+ class DrtBinaryExpression(DrtExpression, BinaryExpression):
728
+ def get_refs(self, recursive=False):
729
+ """:see: AbstractExpression.get_refs()"""
730
+ return (
731
+ self.first.get_refs(True) + self.second.get_refs(True) if recursive else []
732
+ )
733
+
734
+ def _pretty(self):
735
+ return DrtBinaryExpression._assemble_pretty(
736
+ self._pretty_subex(self.first),
737
+ self.getOp(),
738
+ self._pretty_subex(self.second),
739
+ )
740
+
741
+ @staticmethod
742
+ def _assemble_pretty(first_lines, op, second_lines):
743
+ max_lines = max(len(first_lines), len(second_lines))
744
+ first_lines = _pad_vertically(first_lines, max_lines)
745
+ second_lines = _pad_vertically(second_lines, max_lines)
746
+ blank = " " * len(op)
747
+ first_second_lines = list(zip(first_lines, second_lines))
748
+ return (
749
+ [
750
+ " " + first_line + " " + blank + " " + second_line + " "
751
+ for first_line, second_line in first_second_lines[:2]
752
+ ]
753
+ + [
754
+ "(" + first_line + " " + op + " " + second_line + ")"
755
+ for first_line, second_line in first_second_lines[2:3]
756
+ ]
757
+ + [
758
+ " " + first_line + " " + blank + " " + second_line + " "
759
+ for first_line, second_line in first_second_lines[3:]
760
+ ]
761
+ )
762
+
763
+ def _pretty_subex(self, subex):
764
+ return subex._pretty()
765
+
766
+
767
+ class DrtBooleanExpression(DrtBinaryExpression, BooleanExpression):
768
+ pass
769
+
770
+
771
+ class DrtOrExpression(DrtBooleanExpression, OrExpression):
772
+ def fol(self):
773
+ return OrExpression(self.first.fol(), self.second.fol())
774
+
775
+ def _pretty_subex(self, subex):
776
+ if isinstance(subex, DrtOrExpression):
777
+ return [line[1:-1] for line in subex._pretty()]
778
+ return DrtBooleanExpression._pretty_subex(self, subex)
779
+
780
+
781
+ class DrtEqualityExpression(DrtBinaryExpression, EqualityExpression):
782
+ def fol(self):
783
+ return EqualityExpression(self.first.fol(), self.second.fol())
784
+
785
+
786
+ class DrtConcatenation(DrtBooleanExpression):
787
+ """DRS of the form '(DRS + DRS)'"""
788
+
789
+ def __init__(self, first, second, consequent=None):
790
+ DrtBooleanExpression.__init__(self, first, second)
791
+ self.consequent = consequent
792
+
793
+ def replace(self, variable, expression, replace_bound=False, alpha_convert=True):
794
+ """Replace all instances of variable v with expression E in self,
795
+ where v is free in self."""
796
+ first = self.first
797
+ second = self.second
798
+ consequent = self.consequent
799
+
800
+ # If variable is bound
801
+ if variable in self.get_refs():
802
+ if replace_bound:
803
+ first = first.replace(
804
+ variable, expression, replace_bound, alpha_convert
805
+ )
806
+ second = second.replace(
807
+ variable, expression, replace_bound, alpha_convert
808
+ )
809
+ if consequent:
810
+ consequent = consequent.replace(
811
+ variable, expression, replace_bound, alpha_convert
812
+ )
813
+ else:
814
+ if alpha_convert:
815
+ # alpha convert every ref that is free in 'expression'
816
+ for ref in set(self.get_refs(True)) & expression.free():
817
+ v = DrtVariableExpression(unique_variable(ref))
818
+ first = first.replace(ref, v, True, alpha_convert)
819
+ second = second.replace(ref, v, True, alpha_convert)
820
+ if consequent:
821
+ consequent = consequent.replace(ref, v, True, alpha_convert)
822
+
823
+ first = first.replace(variable, expression, replace_bound, alpha_convert)
824
+ second = second.replace(variable, expression, replace_bound, alpha_convert)
825
+ if consequent:
826
+ consequent = consequent.replace(
827
+ variable, expression, replace_bound, alpha_convert
828
+ )
829
+
830
+ return self.__class__(first, second, consequent)
831
+
832
+ def eliminate_equality(self):
833
+ # TODO: at some point. for now, simplify.
834
+ drs = self.simplify()
835
+ assert not isinstance(drs, DrtConcatenation)
836
+ return drs.eliminate_equality()
837
+
838
+ def simplify(self):
839
+ first = self.first.simplify()
840
+ second = self.second.simplify()
841
+ consequent = self.consequent.simplify() if self.consequent else None
842
+
843
+ if isinstance(first, DRS) and isinstance(second, DRS):
844
+ # For any ref that is in both 'first' and 'second'
845
+ for ref in set(first.get_refs(True)) & set(second.get_refs(True)):
846
+ # alpha convert the ref in 'second' to prevent collision
847
+ newvar = DrtVariableExpression(unique_variable(ref))
848
+ second = second.replace(ref, newvar, True)
849
+
850
+ return DRS(first.refs + second.refs, first.conds + second.conds, consequent)
851
+ else:
852
+ return self.__class__(first, second, consequent)
853
+
854
+ def get_refs(self, recursive=False):
855
+ """:see: AbstractExpression.get_refs()"""
856
+ refs = self.first.get_refs(recursive) + self.second.get_refs(recursive)
857
+ if self.consequent and recursive:
858
+ refs.extend(self.consequent.get_refs(True))
859
+ return refs
860
+
861
+ def getOp(self):
862
+ return DrtTokens.DRS_CONC
863
+
864
+ def __eq__(self, other):
865
+ r"""Defines equality modulo alphabetic variance.
866
+ If we are comparing \x.M and \y.N, then check equality of M and N[x/y]."""
867
+ if isinstance(other, DrtConcatenation):
868
+ self_refs = self.get_refs()
869
+ other_refs = other.get_refs()
870
+ if len(self_refs) == len(other_refs):
871
+ converted_other = other
872
+ for (r1, r2) in zip(self_refs, other_refs):
873
+ varex = self.make_VariableExpression(r1)
874
+ converted_other = converted_other.replace(r2, varex, True)
875
+ return (
876
+ self.first == converted_other.first
877
+ and self.second == converted_other.second
878
+ and self.consequent == converted_other.consequent
879
+ )
880
+ return False
881
+
882
+ def __ne__(self, other):
883
+ return not self == other
884
+
885
+ __hash__ = DrtBooleanExpression.__hash__
886
+
887
+ def fol(self):
888
+ e = AndExpression(self.first.fol(), self.second.fol())
889
+ if self.consequent:
890
+ e = ImpExpression(e, self.consequent.fol())
891
+ return e
892
+
893
+ def _pretty(self):
894
+ drs = DrtBinaryExpression._assemble_pretty(
895
+ self._pretty_subex(self.first),
896
+ self.getOp(),
897
+ self._pretty_subex(self.second),
898
+ )
899
+ if self.consequent:
900
+ drs = DrtBinaryExpression._assemble_pretty(
901
+ drs, DrtTokens.IMP, self.consequent._pretty()
902
+ )
903
+ return drs
904
+
905
+ def _pretty_subex(self, subex):
906
+ if isinstance(subex, DrtConcatenation):
907
+ return [line[1:-1] for line in subex._pretty()]
908
+ return DrtBooleanExpression._pretty_subex(self, subex)
909
+
910
+ def visit(self, function, combinator):
911
+ """:see: Expression.visit()"""
912
+ if self.consequent:
913
+ return combinator(
914
+ [function(self.first), function(self.second), function(self.consequent)]
915
+ )
916
+ else:
917
+ return combinator([function(self.first), function(self.second)])
918
+
919
+ def __str__(self):
920
+ first = self._str_subex(self.first)
921
+ second = self._str_subex(self.second)
922
+ drs = Tokens.OPEN + first + " " + self.getOp() + " " + second + Tokens.CLOSE
923
+ if self.consequent:
924
+ return (
925
+ DrtTokens.OPEN
926
+ + drs
927
+ + " "
928
+ + DrtTokens.IMP
929
+ + " "
930
+ + "%s" % self.consequent
931
+ + DrtTokens.CLOSE
932
+ )
933
+ return drs
934
+
935
+ def _str_subex(self, subex):
936
+ s = "%s" % subex
937
+ if isinstance(subex, DrtConcatenation) and subex.consequent is None:
938
+ return s[1:-1]
939
+ return s
940
+
941
+
942
+ class DrtApplicationExpression(DrtExpression, ApplicationExpression):
943
+ def fol(self):
944
+ return ApplicationExpression(self.function.fol(), self.argument.fol())
945
+
946
+ def get_refs(self, recursive=False):
947
+ """:see: AbstractExpression.get_refs()"""
948
+ return (
949
+ self.function.get_refs(True) + self.argument.get_refs(True)
950
+ if recursive
951
+ else []
952
+ )
953
+
954
+ def _pretty(self):
955
+ function, args = self.uncurry()
956
+ function_lines = function._pretty()
957
+ args_lines = [arg._pretty() for arg in args]
958
+ max_lines = max(map(len, [function_lines] + args_lines))
959
+ function_lines = _pad_vertically(function_lines, max_lines)
960
+ args_lines = [_pad_vertically(arg_lines, max_lines) for arg_lines in args_lines]
961
+ func_args_lines = list(zip(function_lines, list(zip(*args_lines))))
962
+ return (
963
+ [
964
+ func_line + " " + " ".join(args_line) + " "
965
+ for func_line, args_line in func_args_lines[:2]
966
+ ]
967
+ + [
968
+ func_line + "(" + ",".join(args_line) + ")"
969
+ for func_line, args_line in func_args_lines[2:3]
970
+ ]
971
+ + [
972
+ func_line + " " + " ".join(args_line) + " "
973
+ for func_line, args_line in func_args_lines[3:]
974
+ ]
975
+ )
976
+
977
+
978
+ def _pad_vertically(lines, max_lines):
979
+ pad_line = [" " * len(lines[0])]
980
+ return lines + pad_line * (max_lines - len(lines))
981
+
982
+
983
+ class PossibleAntecedents(list, DrtExpression, Expression):
984
+ def free(self):
985
+ """Set of free variables."""
986
+ return set(self)
987
+
988
+ def replace(self, variable, expression, replace_bound=False, alpha_convert=True):
989
+ """Replace all instances of variable v with expression E in self,
990
+ where v is free in self."""
991
+ result = PossibleAntecedents()
992
+ for item in self:
993
+ if item == variable:
994
+ self.append(expression)
995
+ else:
996
+ self.append(item)
997
+ return result
998
+
999
+ def _pretty(self):
1000
+ s = "%s" % self
1001
+ blank = " " * len(s)
1002
+ return [blank, blank, s]
1003
+
1004
+ def __str__(self):
1005
+ return "[" + ",".join("%s" % it for it in self) + "]"
1006
+
1007
+
1008
+ class AnaphoraResolutionException(Exception):
1009
+ pass
1010
+
1011
+
1012
+ def resolve_anaphora(expression, trail=[]):
1013
+ if isinstance(expression, ApplicationExpression):
1014
+ if expression.is_pronoun_function():
1015
+ possible_antecedents = PossibleAntecedents()
1016
+ for ancestor in trail:
1017
+ for ref in ancestor.get_refs():
1018
+ refex = expression.make_VariableExpression(ref)
1019
+
1020
+ # ==========================================================
1021
+ # Don't allow resolution to itself or other types
1022
+ # ==========================================================
1023
+ if refex.__class__ == expression.argument.__class__ and not (
1024
+ refex == expression.argument
1025
+ ):
1026
+ possible_antecedents.append(refex)
1027
+
1028
+ if len(possible_antecedents) == 1:
1029
+ resolution = possible_antecedents[0]
1030
+ else:
1031
+ resolution = possible_antecedents
1032
+ return expression.make_EqualityExpression(expression.argument, resolution)
1033
+ else:
1034
+ r_function = resolve_anaphora(expression.function, trail + [expression])
1035
+ r_argument = resolve_anaphora(expression.argument, trail + [expression])
1036
+ return expression.__class__(r_function, r_argument)
1037
+
1038
+ elif isinstance(expression, DRS):
1039
+ r_conds = []
1040
+ for cond in expression.conds:
1041
+ r_cond = resolve_anaphora(cond, trail + [expression])
1042
+
1043
+ # if the condition is of the form '(x = [])' then raise exception
1044
+ if isinstance(r_cond, EqualityExpression):
1045
+ if isinstance(r_cond.first, PossibleAntecedents):
1046
+ # Reverse the order so that the variable is on the left
1047
+ temp = r_cond.first
1048
+ r_cond.first = r_cond.second
1049
+ r_cond.second = temp
1050
+ if isinstance(r_cond.second, PossibleAntecedents):
1051
+ if not r_cond.second:
1052
+ raise AnaphoraResolutionException(
1053
+ "Variable '%s' does not "
1054
+ "resolve to anything." % r_cond.first
1055
+ )
1056
+
1057
+ r_conds.append(r_cond)
1058
+ if expression.consequent:
1059
+ consequent = resolve_anaphora(expression.consequent, trail + [expression])
1060
+ else:
1061
+ consequent = None
1062
+ return expression.__class__(expression.refs, r_conds, consequent)
1063
+
1064
+ elif isinstance(expression, AbstractVariableExpression):
1065
+ return expression
1066
+
1067
+ elif isinstance(expression, NegatedExpression):
1068
+ return expression.__class__(
1069
+ resolve_anaphora(expression.term, trail + [expression])
1070
+ )
1071
+
1072
+ elif isinstance(expression, DrtConcatenation):
1073
+ if expression.consequent:
1074
+ consequent = resolve_anaphora(expression.consequent, trail + [expression])
1075
+ else:
1076
+ consequent = None
1077
+ return expression.__class__(
1078
+ resolve_anaphora(expression.first, trail + [expression]),
1079
+ resolve_anaphora(expression.second, trail + [expression]),
1080
+ consequent,
1081
+ )
1082
+
1083
+ elif isinstance(expression, BinaryExpression):
1084
+ return expression.__class__(
1085
+ resolve_anaphora(expression.first, trail + [expression]),
1086
+ resolve_anaphora(expression.second, trail + [expression]),
1087
+ )
1088
+
1089
+ elif isinstance(expression, LambdaExpression):
1090
+ return expression.__class__(
1091
+ expression.variable, resolve_anaphora(expression.term, trail + [expression])
1092
+ )
1093
+
1094
+
1095
+ class DrsDrawer:
1096
+ BUFFER = 3 # Space between elements
1097
+ TOPSPACE = 10 # Space above whole DRS
1098
+ OUTERSPACE = 6 # Space to the left, right, and bottom of the while DRS
1099
+
1100
+ def __init__(self, drs, size_canvas=True, canvas=None):
1101
+ """
1102
+ :param drs: ``DrtExpression``, The DRS to be drawn
1103
+ :param size_canvas: bool, True if the canvas size should be the exact size of the DRS
1104
+ :param canvas: ``Canvas`` The canvas on which to draw the DRS. If none is given, create a new canvas.
1105
+ """
1106
+ master = None
1107
+ if not canvas:
1108
+ master = Tk()
1109
+ master.title("DRT")
1110
+
1111
+ font = Font(family="helvetica", size=12)
1112
+
1113
+ if size_canvas:
1114
+ canvas = Canvas(master, width=0, height=0)
1115
+ canvas.font = font
1116
+ self.canvas = canvas
1117
+ (right, bottom) = self._visit(drs, self.OUTERSPACE, self.TOPSPACE)
1118
+
1119
+ width = max(right + self.OUTERSPACE, 100)
1120
+ height = bottom + self.OUTERSPACE
1121
+ canvas = Canvas(master, width=width, height=height) # , bg='white')
1122
+ else:
1123
+ canvas = Canvas(master, width=300, height=300)
1124
+
1125
+ canvas.pack()
1126
+ canvas.font = font
1127
+
1128
+ self.canvas = canvas
1129
+ self.drs = drs
1130
+ self.master = master
1131
+
1132
+ def _get_text_height(self):
1133
+ """Get the height of a line of text"""
1134
+ return self.canvas.font.metrics("linespace")
1135
+
1136
+ def draw(self, x=OUTERSPACE, y=TOPSPACE):
1137
+ """Draw the DRS"""
1138
+ self._handle(self.drs, self._draw_command, x, y)
1139
+
1140
+ if self.master and not in_idle():
1141
+ self.master.mainloop()
1142
+ else:
1143
+ return self._visit(self.drs, x, y)
1144
+
1145
+ def _visit(self, expression, x, y):
1146
+ """
1147
+ Return the bottom-rightmost point without actually drawing the item
1148
+
1149
+ :param expression: the item to visit
1150
+ :param x: the top of the current drawing area
1151
+ :param y: the left side of the current drawing area
1152
+ :return: the bottom-rightmost point
1153
+ """
1154
+ return self._handle(expression, self._visit_command, x, y)
1155
+
1156
+ def _draw_command(self, item, x, y):
1157
+ """
1158
+ Draw the given item at the given location
1159
+
1160
+ :param item: the item to draw
1161
+ :param x: the top of the current drawing area
1162
+ :param y: the left side of the current drawing area
1163
+ :return: the bottom-rightmost point
1164
+ """
1165
+ if isinstance(item, str):
1166
+ self.canvas.create_text(x, y, anchor="nw", font=self.canvas.font, text=item)
1167
+ elif isinstance(item, tuple):
1168
+ # item is the lower-right of a box
1169
+ (right, bottom) = item
1170
+ self.canvas.create_rectangle(x, y, right, bottom)
1171
+ horiz_line_y = (
1172
+ y + self._get_text_height() + (self.BUFFER * 2)
1173
+ ) # the line separating refs from conds
1174
+ self.canvas.create_line(x, horiz_line_y, right, horiz_line_y)
1175
+
1176
+ return self._visit_command(item, x, y)
1177
+
1178
+ def _visit_command(self, item, x, y):
1179
+ """
1180
+ Return the bottom-rightmost point without actually drawing the item
1181
+
1182
+ :param item: the item to visit
1183
+ :param x: the top of the current drawing area
1184
+ :param y: the left side of the current drawing area
1185
+ :return: the bottom-rightmost point
1186
+ """
1187
+ if isinstance(item, str):
1188
+ return (x + self.canvas.font.measure(item), y + self._get_text_height())
1189
+ elif isinstance(item, tuple):
1190
+ return item
1191
+
1192
+ def _handle(self, expression, command, x=0, y=0):
1193
+ """
1194
+ :param expression: the expression to handle
1195
+ :param command: the function to apply, either _draw_command or _visit_command
1196
+ :param x: the top of the current drawing area
1197
+ :param y: the left side of the current drawing area
1198
+ :return: the bottom-rightmost point
1199
+ """
1200
+ if command == self._visit_command:
1201
+ # if we don't need to draw the item, then we can use the cached values
1202
+ try:
1203
+ # attempt to retrieve cached values
1204
+ right = expression._drawing_width + x
1205
+ bottom = expression._drawing_height + y
1206
+ return (right, bottom)
1207
+ except AttributeError:
1208
+ # the values have not been cached yet, so compute them
1209
+ pass
1210
+
1211
+ if isinstance(expression, DrtAbstractVariableExpression):
1212
+ factory = self._handle_VariableExpression
1213
+ elif isinstance(expression, DRS):
1214
+ factory = self._handle_DRS
1215
+ elif isinstance(expression, DrtNegatedExpression):
1216
+ factory = self._handle_NegatedExpression
1217
+ elif isinstance(expression, DrtLambdaExpression):
1218
+ factory = self._handle_LambdaExpression
1219
+ elif isinstance(expression, BinaryExpression):
1220
+ factory = self._handle_BinaryExpression
1221
+ elif isinstance(expression, DrtApplicationExpression):
1222
+ factory = self._handle_ApplicationExpression
1223
+ elif isinstance(expression, PossibleAntecedents):
1224
+ factory = self._handle_VariableExpression
1225
+ elif isinstance(expression, DrtProposition):
1226
+ factory = self._handle_DrtProposition
1227
+ else:
1228
+ raise Exception(expression.__class__.__name__)
1229
+
1230
+ (right, bottom) = factory(expression, command, x, y)
1231
+
1232
+ # cache the values
1233
+ expression._drawing_width = right - x
1234
+ expression._drawing_height = bottom - y
1235
+
1236
+ return (right, bottom)
1237
+
1238
+ def _handle_VariableExpression(self, expression, command, x, y):
1239
+ return command("%s" % expression, x, y)
1240
+
1241
+ def _handle_NegatedExpression(self, expression, command, x, y):
1242
+ # Find the width of the negation symbol
1243
+ right = self._visit_command(DrtTokens.NOT, x, y)[0]
1244
+
1245
+ # Handle term
1246
+ (right, bottom) = self._handle(expression.term, command, right, y)
1247
+
1248
+ # Handle variables now that we know the y-coordinate
1249
+ command(
1250
+ DrtTokens.NOT,
1251
+ x,
1252
+ self._get_centered_top(y, bottom - y, self._get_text_height()),
1253
+ )
1254
+
1255
+ return (right, bottom)
1256
+
1257
+ def _handle_DRS(self, expression, command, x, y):
1258
+ left = x + self.BUFFER # indent the left side
1259
+ bottom = y + self.BUFFER # indent the top
1260
+
1261
+ # Handle Discourse Referents
1262
+ if expression.refs:
1263
+ refs = " ".join("%s" % r for r in expression.refs)
1264
+ else:
1265
+ refs = " "
1266
+ (max_right, bottom) = command(refs, left, bottom)
1267
+ bottom += self.BUFFER * 2
1268
+
1269
+ # Handle Conditions
1270
+ if expression.conds:
1271
+ for cond in expression.conds:
1272
+ (right, bottom) = self._handle(cond, command, left, bottom)
1273
+ max_right = max(max_right, right)
1274
+ bottom += self.BUFFER
1275
+ else:
1276
+ bottom += self._get_text_height() + self.BUFFER
1277
+
1278
+ # Handle Box
1279
+ max_right += self.BUFFER
1280
+ return command((max_right, bottom), x, y)
1281
+
1282
+ def _handle_ApplicationExpression(self, expression, command, x, y):
1283
+ function, args = expression.uncurry()
1284
+ if not isinstance(function, DrtAbstractVariableExpression):
1285
+ # It's not a predicate expression ("P(x,y)"), so leave arguments curried
1286
+ function = expression.function
1287
+ args = [expression.argument]
1288
+
1289
+ # Get the max bottom of any element on the line
1290
+ function_bottom = self._visit(function, x, y)[1]
1291
+ max_bottom = max(
1292
+ [function_bottom] + [self._visit(arg, x, y)[1] for arg in args]
1293
+ )
1294
+
1295
+ line_height = max_bottom - y
1296
+
1297
+ # Handle 'function'
1298
+ function_drawing_top = self._get_centered_top(
1299
+ y, line_height, function._drawing_height
1300
+ )
1301
+ right = self._handle(function, command, x, function_drawing_top)[0]
1302
+
1303
+ # Handle open paren
1304
+ centred_string_top = self._get_centered_top(
1305
+ y, line_height, self._get_text_height()
1306
+ )
1307
+ right = command(DrtTokens.OPEN, right, centred_string_top)[0]
1308
+
1309
+ # Handle each arg
1310
+ for (i, arg) in enumerate(args):
1311
+ arg_drawing_top = self._get_centered_top(
1312
+ y, line_height, arg._drawing_height
1313
+ )
1314
+ right = self._handle(arg, command, right, arg_drawing_top)[0]
1315
+
1316
+ if i + 1 < len(args):
1317
+ # since it's not the last arg, add a comma
1318
+ right = command(DrtTokens.COMMA + " ", right, centred_string_top)[0]
1319
+
1320
+ # Handle close paren
1321
+ right = command(DrtTokens.CLOSE, right, centred_string_top)[0]
1322
+
1323
+ return (right, max_bottom)
1324
+
1325
+ def _handle_LambdaExpression(self, expression, command, x, y):
1326
+ # Find the width of the lambda symbol and abstracted variables
1327
+ variables = DrtTokens.LAMBDA + "%s" % expression.variable + DrtTokens.DOT
1328
+ right = self._visit_command(variables, x, y)[0]
1329
+
1330
+ # Handle term
1331
+ (right, bottom) = self._handle(expression.term, command, right, y)
1332
+
1333
+ # Handle variables now that we know the y-coordinate
1334
+ command(
1335
+ variables, x, self._get_centered_top(y, bottom - y, self._get_text_height())
1336
+ )
1337
+
1338
+ return (right, bottom)
1339
+
1340
+ def _handle_BinaryExpression(self, expression, command, x, y):
1341
+ # Get the full height of the line, based on the operands
1342
+ first_height = self._visit(expression.first, 0, 0)[1]
1343
+ second_height = self._visit(expression.second, 0, 0)[1]
1344
+ line_height = max(first_height, second_height)
1345
+
1346
+ # Handle open paren
1347
+ centred_string_top = self._get_centered_top(
1348
+ y, line_height, self._get_text_height()
1349
+ )
1350
+ right = command(DrtTokens.OPEN, x, centred_string_top)[0]
1351
+
1352
+ # Handle the first operand
1353
+ first_height = expression.first._drawing_height
1354
+ (right, first_bottom) = self._handle(
1355
+ expression.first,
1356
+ command,
1357
+ right,
1358
+ self._get_centered_top(y, line_height, first_height),
1359
+ )
1360
+
1361
+ # Handle the operator
1362
+ right = command(" %s " % expression.getOp(), right, centred_string_top)[0]
1363
+
1364
+ # Handle the second operand
1365
+ second_height = expression.second._drawing_height
1366
+ (right, second_bottom) = self._handle(
1367
+ expression.second,
1368
+ command,
1369
+ right,
1370
+ self._get_centered_top(y, line_height, second_height),
1371
+ )
1372
+
1373
+ # Handle close paren
1374
+ right = command(DrtTokens.CLOSE, right, centred_string_top)[0]
1375
+
1376
+ return (right, max(first_bottom, second_bottom))
1377
+
1378
+ def _handle_DrtProposition(self, expression, command, x, y):
1379
+ # Find the width of the negation symbol
1380
+ right = command(expression.variable, x, y)[0]
1381
+
1382
+ # Handle term
1383
+ (right, bottom) = self._handle(expression.term, command, right, y)
1384
+
1385
+ return (right, bottom)
1386
+
1387
+ def _get_centered_top(self, top, full_height, item_height):
1388
+ """Get the y-coordinate of the point that a figure should start at if
1389
+ its height is 'item_height' and it needs to be centered in an area that
1390
+ starts at 'top' and is 'full_height' tall."""
1391
+ return top + (full_height - item_height) / 2
1392
+
1393
+
1394
+ def demo():
1395
+ print("=" * 20 + "TEST PARSE" + "=" * 20)
1396
+ dexpr = DrtExpression.fromstring
1397
+ print(dexpr(r"([x,y],[sees(x,y)])"))
1398
+ print(dexpr(r"([x],[man(x), walks(x)])"))
1399
+ print(dexpr(r"\x.\y.([],[sees(x,y)])"))
1400
+ print(dexpr(r"\x.([],[walks(x)])(john)"))
1401
+ print(dexpr(r"(([x],[walks(x)]) + ([y],[runs(y)]))"))
1402
+ print(dexpr(r"(([],[walks(x)]) -> ([],[runs(x)]))"))
1403
+ print(dexpr(r"([x],[PRO(x), sees(John,x)])"))
1404
+ print(dexpr(r"([x],[man(x), -([],[walks(x)])])"))
1405
+ print(dexpr(r"([],[(([x],[man(x)]) -> ([],[walks(x)]))])"))
1406
+
1407
+ print("=" * 20 + "Test fol()" + "=" * 20)
1408
+ print(dexpr(r"([x,y],[sees(x,y)])").fol())
1409
+
1410
+ print("=" * 20 + "Test alpha conversion and lambda expression equality" + "=" * 20)
1411
+ e1 = dexpr(r"\x.([],[P(x)])")
1412
+ print(e1)
1413
+ e2 = e1.alpha_convert(Variable("z"))
1414
+ print(e2)
1415
+ print(e1 == e2)
1416
+
1417
+ print("=" * 20 + "Test resolve_anaphora()" + "=" * 20)
1418
+ print(resolve_anaphora(dexpr(r"([x,y,z],[dog(x), cat(y), walks(z), PRO(z)])")))
1419
+ print(
1420
+ resolve_anaphora(dexpr(r"([],[(([x],[dog(x)]) -> ([y],[walks(y), PRO(y)]))])"))
1421
+ )
1422
+ print(resolve_anaphora(dexpr(r"(([x,y],[]) + ([],[PRO(x)]))")))
1423
+
1424
+ print("=" * 20 + "Test pretty_print()" + "=" * 20)
1425
+ dexpr(r"([],[])").pretty_print()
1426
+ dexpr(
1427
+ r"([],[([x],[big(x), dog(x)]) -> ([],[bark(x)]) -([x],[walk(x)])])"
1428
+ ).pretty_print()
1429
+ dexpr(r"([x,y],[x=y]) + ([z],[dog(z), walk(z)])").pretty_print()
1430
+ dexpr(r"([],[([x],[]) | ([y],[]) | ([z],[dog(z), walk(z)])])").pretty_print()
1431
+ dexpr(r"\P.\Q.(([x],[]) + P(x) + Q(x))(\x.([],[dog(x)]))").pretty_print()
1432
+
1433
+
1434
+ def test_draw():
1435
+ try:
1436
+ from tkinter import Tk
1437
+ except ImportError as e:
1438
+ raise ValueError("tkinter is required, but it's not available.")
1439
+
1440
+ expressions = [
1441
+ r"x",
1442
+ r"([],[])",
1443
+ r"([x],[])",
1444
+ r"([x],[man(x)])",
1445
+ r"([x,y],[sees(x,y)])",
1446
+ r"([x],[man(x), walks(x)])",
1447
+ r"\x.([],[man(x), walks(x)])",
1448
+ r"\x y.([],[sees(x,y)])",
1449
+ r"([],[(([],[walks(x)]) + ([],[runs(x)]))])",
1450
+ r"([x],[man(x), -([],[walks(x)])])",
1451
+ r"([],[(([x],[man(x)]) -> ([],[walks(x)]))])",
1452
+ ]
1453
+
1454
+ for e in expressions:
1455
+ d = DrtExpression.fromstring(e)
1456
+ d.draw()
1457
+
1458
+
1459
+ if __name__ == "__main__":
1460
+ demo()
llmeval-env/lib/python3.10/site-packages/nltk/sem/drt_glue_demo.py ADDED
@@ -0,0 +1,553 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: GUI Demo for Glue Semantics with Discourse
2
+ # Representation Theory (DRT) as meaning language
3
+ #
4
+ # Author: Dan Garrette <[email protected]>
5
+ #
6
+ # Copyright (C) 2001-2023 NLTK Project
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+ try:
11
+ from tkinter import Button, Frame, IntVar, Label, Listbox, Menu, Scrollbar, Tk
12
+ from tkinter.font import Font
13
+
14
+ from nltk.draw.util import CanvasFrame, ShowText
15
+
16
+ except ImportError:
17
+ """Ignore ImportError because tkinter might not be available."""
18
+
19
+ from nltk.parse import MaltParser
20
+ from nltk.sem.drt import DrsDrawer, DrtVariableExpression
21
+ from nltk.sem.glue import DrtGlue
22
+ from nltk.sem.logic import Variable
23
+ from nltk.tag import RegexpTagger
24
+ from nltk.util import in_idle
25
+
26
+
27
+ class DrtGlueDemo:
28
+ def __init__(self, examples):
29
+ # Set up the main window.
30
+ self._top = Tk()
31
+ self._top.title("DRT Glue Demo")
32
+
33
+ # Set up key bindings.
34
+ self._init_bindings()
35
+
36
+ # Initialize the fonts.self._error = None
37
+ self._init_fonts(self._top)
38
+
39
+ self._examples = examples
40
+ self._readingCache = [None for example in examples]
41
+
42
+ # The user can hide the grammar.
43
+ self._show_grammar = IntVar(self._top)
44
+ self._show_grammar.set(1)
45
+
46
+ # Set the data to None
47
+ self._curExample = -1
48
+ self._readings = []
49
+ self._drs = None
50
+ self._drsWidget = None
51
+ self._error = None
52
+
53
+ self._init_glue()
54
+
55
+ # Create the basic frames.
56
+ self._init_menubar(self._top)
57
+ self._init_buttons(self._top)
58
+ self._init_exampleListbox(self._top)
59
+ self._init_readingListbox(self._top)
60
+ self._init_canvas(self._top)
61
+
62
+ # Resize callback
63
+ self._canvas.bind("<Configure>", self._configure)
64
+
65
+ #########################################
66
+ ## Initialization Helpers
67
+ #########################################
68
+
69
+ def _init_glue(self):
70
+ tagger = RegexpTagger(
71
+ [
72
+ ("^(David|Mary|John)$", "NNP"),
73
+ (
74
+ "^(walks|sees|eats|chases|believes|gives|sleeps|chases|persuades|tries|seems|leaves)$",
75
+ "VB",
76
+ ),
77
+ ("^(go|order|vanish|find|approach)$", "VB"),
78
+ ("^(a)$", "ex_quant"),
79
+ ("^(every)$", "univ_quant"),
80
+ ("^(sandwich|man|dog|pizza|unicorn|cat|senator)$", "NN"),
81
+ ("^(big|gray|former)$", "JJ"),
82
+ ("^(him|himself)$", "PRP"),
83
+ ]
84
+ )
85
+
86
+ depparser = MaltParser(tagger=tagger)
87
+ self._glue = DrtGlue(depparser=depparser, remove_duplicates=False)
88
+
89
+ def _init_fonts(self, root):
90
+ # See: <http://www.astro.washington.edu/owen/ROTKFolklore.html>
91
+ self._sysfont = Font(font=Button()["font"])
92
+ root.option_add("*Font", self._sysfont)
93
+
94
+ # TWhat's our font size (default=same as sysfont)
95
+ self._size = IntVar(root)
96
+ self._size.set(self._sysfont.cget("size"))
97
+
98
+ self._boldfont = Font(family="helvetica", weight="bold", size=self._size.get())
99
+ self._font = Font(family="helvetica", size=self._size.get())
100
+ if self._size.get() < 0:
101
+ big = self._size.get() - 2
102
+ else:
103
+ big = self._size.get() + 2
104
+ self._bigfont = Font(family="helvetica", weight="bold", size=big)
105
+
106
+ def _init_exampleListbox(self, parent):
107
+ self._exampleFrame = listframe = Frame(parent)
108
+ self._exampleFrame.pack(fill="both", side="left", padx=2)
109
+ self._exampleList_label = Label(
110
+ self._exampleFrame, font=self._boldfont, text="Examples"
111
+ )
112
+ self._exampleList_label.pack()
113
+ self._exampleList = Listbox(
114
+ self._exampleFrame,
115
+ selectmode="single",
116
+ relief="groove",
117
+ background="white",
118
+ foreground="#909090",
119
+ font=self._font,
120
+ selectforeground="#004040",
121
+ selectbackground="#c0f0c0",
122
+ )
123
+
124
+ self._exampleList.pack(side="right", fill="both", expand=1)
125
+
126
+ for example in self._examples:
127
+ self._exampleList.insert("end", (" %s" % example))
128
+ self._exampleList.config(height=min(len(self._examples), 25), width=40)
129
+
130
+ # Add a scrollbar if there are more than 25 examples.
131
+ if len(self._examples) > 25:
132
+ listscroll = Scrollbar(self._exampleFrame, orient="vertical")
133
+ self._exampleList.config(yscrollcommand=listscroll.set)
134
+ listscroll.config(command=self._exampleList.yview)
135
+ listscroll.pack(side="left", fill="y")
136
+
137
+ # If they select a example, apply it.
138
+ self._exampleList.bind("<<ListboxSelect>>", self._exampleList_select)
139
+
140
+ def _init_readingListbox(self, parent):
141
+ self._readingFrame = listframe = Frame(parent)
142
+ self._readingFrame.pack(fill="both", side="left", padx=2)
143
+ self._readingList_label = Label(
144
+ self._readingFrame, font=self._boldfont, text="Readings"
145
+ )
146
+ self._readingList_label.pack()
147
+ self._readingList = Listbox(
148
+ self._readingFrame,
149
+ selectmode="single",
150
+ relief="groove",
151
+ background="white",
152
+ foreground="#909090",
153
+ font=self._font,
154
+ selectforeground="#004040",
155
+ selectbackground="#c0f0c0",
156
+ )
157
+
158
+ self._readingList.pack(side="right", fill="both", expand=1)
159
+
160
+ # Add a scrollbar if there are more than 25 examples.
161
+ listscroll = Scrollbar(self._readingFrame, orient="vertical")
162
+ self._readingList.config(yscrollcommand=listscroll.set)
163
+ listscroll.config(command=self._readingList.yview)
164
+ listscroll.pack(side="right", fill="y")
165
+
166
+ self._populate_readingListbox()
167
+
168
+ def _populate_readingListbox(self):
169
+ # Populate the listbox with integers
170
+ self._readingList.delete(0, "end")
171
+ for i in range(len(self._readings)):
172
+ self._readingList.insert("end", (" %s" % (i + 1)))
173
+ self._readingList.config(height=min(len(self._readings), 25), width=5)
174
+
175
+ # If they select a example, apply it.
176
+ self._readingList.bind("<<ListboxSelect>>", self._readingList_select)
177
+
178
+ def _init_bindings(self):
179
+ # Key bindings are a good thing.
180
+ self._top.bind("<Control-q>", self.destroy)
181
+ self._top.bind("<Control-x>", self.destroy)
182
+ self._top.bind("<Escape>", self.destroy)
183
+ self._top.bind("n", self.next)
184
+ self._top.bind("<space>", self.next)
185
+ self._top.bind("p", self.prev)
186
+ self._top.bind("<BackSpace>", self.prev)
187
+
188
+ def _init_buttons(self, parent):
189
+ # Set up the frames.
190
+ self._buttonframe = buttonframe = Frame(parent)
191
+ buttonframe.pack(fill="none", side="bottom", padx=3, pady=2)
192
+ Button(
193
+ buttonframe,
194
+ text="Prev",
195
+ background="#90c0d0",
196
+ foreground="black",
197
+ command=self.prev,
198
+ ).pack(side="left")
199
+ Button(
200
+ buttonframe,
201
+ text="Next",
202
+ background="#90c0d0",
203
+ foreground="black",
204
+ command=self.next,
205
+ ).pack(side="left")
206
+
207
+ def _configure(self, event):
208
+ self._autostep = 0
209
+ (x1, y1, x2, y2) = self._cframe.scrollregion()
210
+ y2 = event.height - 6
211
+ self._canvas["scrollregion"] = "%d %d %d %d" % (x1, y1, x2, y2)
212
+ self._redraw()
213
+
214
+ def _init_canvas(self, parent):
215
+ self._cframe = CanvasFrame(
216
+ parent,
217
+ background="white",
218
+ # width=525, height=250,
219
+ closeenough=10,
220
+ border=2,
221
+ relief="sunken",
222
+ )
223
+ self._cframe.pack(expand=1, fill="both", side="top", pady=2)
224
+ canvas = self._canvas = self._cframe.canvas()
225
+
226
+ # Initially, there's no tree or text
227
+ self._tree = None
228
+ self._textwidgets = []
229
+ self._textline = None
230
+
231
+ def _init_menubar(self, parent):
232
+ menubar = Menu(parent)
233
+
234
+ filemenu = Menu(menubar, tearoff=0)
235
+ filemenu.add_command(
236
+ label="Exit", underline=1, command=self.destroy, accelerator="q"
237
+ )
238
+ menubar.add_cascade(label="File", underline=0, menu=filemenu)
239
+
240
+ actionmenu = Menu(menubar, tearoff=0)
241
+ actionmenu.add_command(
242
+ label="Next", underline=0, command=self.next, accelerator="n, Space"
243
+ )
244
+ actionmenu.add_command(
245
+ label="Previous", underline=0, command=self.prev, accelerator="p, Backspace"
246
+ )
247
+ menubar.add_cascade(label="Action", underline=0, menu=actionmenu)
248
+
249
+ optionmenu = Menu(menubar, tearoff=0)
250
+ optionmenu.add_checkbutton(
251
+ label="Remove Duplicates",
252
+ underline=0,
253
+ variable=self._glue.remove_duplicates,
254
+ command=self._toggle_remove_duplicates,
255
+ accelerator="r",
256
+ )
257
+ menubar.add_cascade(label="Options", underline=0, menu=optionmenu)
258
+
259
+ viewmenu = Menu(menubar, tearoff=0)
260
+ viewmenu.add_radiobutton(
261
+ label="Tiny",
262
+ variable=self._size,
263
+ underline=0,
264
+ value=10,
265
+ command=self.resize,
266
+ )
267
+ viewmenu.add_radiobutton(
268
+ label="Small",
269
+ variable=self._size,
270
+ underline=0,
271
+ value=12,
272
+ command=self.resize,
273
+ )
274
+ viewmenu.add_radiobutton(
275
+ label="Medium",
276
+ variable=self._size,
277
+ underline=0,
278
+ value=14,
279
+ command=self.resize,
280
+ )
281
+ viewmenu.add_radiobutton(
282
+ label="Large",
283
+ variable=self._size,
284
+ underline=0,
285
+ value=18,
286
+ command=self.resize,
287
+ )
288
+ viewmenu.add_radiobutton(
289
+ label="Huge",
290
+ variable=self._size,
291
+ underline=0,
292
+ value=24,
293
+ command=self.resize,
294
+ )
295
+ menubar.add_cascade(label="View", underline=0, menu=viewmenu)
296
+
297
+ helpmenu = Menu(menubar, tearoff=0)
298
+ helpmenu.add_command(label="About", underline=0, command=self.about)
299
+ menubar.add_cascade(label="Help", underline=0, menu=helpmenu)
300
+
301
+ parent.config(menu=menubar)
302
+
303
+ #########################################
304
+ ## Main draw procedure
305
+ #########################################
306
+
307
+ def _redraw(self):
308
+ canvas = self._canvas
309
+
310
+ # Delete the old DRS, widgets, etc.
311
+ if self._drsWidget is not None:
312
+ self._drsWidget.clear()
313
+
314
+ if self._drs:
315
+ self._drsWidget = DrsWidget(self._canvas, self._drs)
316
+ self._drsWidget.draw()
317
+
318
+ if self._error:
319
+ self._drsWidget = DrsWidget(self._canvas, self._error)
320
+ self._drsWidget.draw()
321
+
322
+ #########################################
323
+ ## Button Callbacks
324
+ #########################################
325
+
326
+ def destroy(self, *e):
327
+ self._autostep = 0
328
+ if self._top is None:
329
+ return
330
+ self._top.destroy()
331
+ self._top = None
332
+
333
+ def prev(self, *e):
334
+ selection = self._readingList.curselection()
335
+ readingListSize = self._readingList.size()
336
+
337
+ # there are readings
338
+ if readingListSize > 0:
339
+ # if one reading is currently selected
340
+ if len(selection) == 1:
341
+ index = int(selection[0])
342
+
343
+ # if it's on (or before) the first item
344
+ if index <= 0:
345
+ self._select_previous_example()
346
+ else:
347
+ self._readingList_store_selection(index - 1)
348
+
349
+ else:
350
+ # select its first reading
351
+ self._readingList_store_selection(readingListSize - 1)
352
+
353
+ else:
354
+ self._select_previous_example()
355
+
356
+ def _select_previous_example(self):
357
+ # if the current example is not the first example
358
+ if self._curExample > 0:
359
+ self._exampleList_store_selection(self._curExample - 1)
360
+ else:
361
+ # go to the last example
362
+ self._exampleList_store_selection(len(self._examples) - 1)
363
+
364
+ def next(self, *e):
365
+ selection = self._readingList.curselection()
366
+ readingListSize = self._readingList.size()
367
+
368
+ # if there are readings
369
+ if readingListSize > 0:
370
+ # if one reading is currently selected
371
+ if len(selection) == 1:
372
+ index = int(selection[0])
373
+
374
+ # if it's on (or past) the last item
375
+ if index >= (readingListSize - 1):
376
+ self._select_next_example()
377
+ else:
378
+ self._readingList_store_selection(index + 1)
379
+
380
+ else:
381
+ # select its first reading
382
+ self._readingList_store_selection(0)
383
+
384
+ else:
385
+ self._select_next_example()
386
+
387
+ def _select_next_example(self):
388
+ # if the current example is not the last example
389
+ if self._curExample < len(self._examples) - 1:
390
+ self._exampleList_store_selection(self._curExample + 1)
391
+ else:
392
+ # go to the first example
393
+ self._exampleList_store_selection(0)
394
+
395
+ def about(self, *e):
396
+ ABOUT = (
397
+ "NLTK Discourse Representation Theory (DRT) Glue Semantics Demo\n"
398
+ + "Written by Daniel H. Garrette"
399
+ )
400
+ TITLE = "About: NLTK DRT Glue Demo"
401
+ try:
402
+ from tkinter.messagebox import Message
403
+
404
+ Message(message=ABOUT, title=TITLE).show()
405
+ except:
406
+ ShowText(self._top, TITLE, ABOUT)
407
+
408
+ def postscript(self, *e):
409
+ self._autostep = 0
410
+ self._cframe.print_to_file()
411
+
412
+ def mainloop(self, *args, **kwargs):
413
+ """
414
+ Enter the Tkinter mainloop. This function must be called if
415
+ this demo is created from a non-interactive program (e.g.
416
+ from a secript); otherwise, the demo will close as soon as
417
+ the script completes.
418
+ """
419
+ if in_idle():
420
+ return
421
+ self._top.mainloop(*args, **kwargs)
422
+
423
+ def resize(self, size=None):
424
+ if size is not None:
425
+ self._size.set(size)
426
+ size = self._size.get()
427
+ self._font.configure(size=-(abs(size)))
428
+ self._boldfont.configure(size=-(abs(size)))
429
+ self._sysfont.configure(size=-(abs(size)))
430
+ self._bigfont.configure(size=-(abs(size + 2)))
431
+ self._redraw()
432
+
433
+ def _toggle_remove_duplicates(self):
434
+ self._glue.remove_duplicates = not self._glue.remove_duplicates
435
+
436
+ self._exampleList.selection_clear(0, "end")
437
+ self._readings = []
438
+ self._populate_readingListbox()
439
+ self._readingCache = [None for ex in self._examples]
440
+ self._curExample = -1
441
+ self._error = None
442
+
443
+ self._drs = None
444
+ self._redraw()
445
+
446
+ def _exampleList_select(self, event):
447
+ selection = self._exampleList.curselection()
448
+ if len(selection) != 1:
449
+ return
450
+ self._exampleList_store_selection(int(selection[0]))
451
+
452
+ def _exampleList_store_selection(self, index):
453
+ self._curExample = index
454
+ example = self._examples[index]
455
+
456
+ self._exampleList.selection_clear(0, "end")
457
+ if example:
458
+ cache = self._readingCache[index]
459
+ if cache:
460
+ if isinstance(cache, list):
461
+ self._readings = cache
462
+ self._error = None
463
+ else:
464
+ self._readings = []
465
+ self._error = cache
466
+ else:
467
+ try:
468
+ self._readings = self._glue.parse_to_meaning(example)
469
+ self._error = None
470
+ self._readingCache[index] = self._readings
471
+ except Exception as e:
472
+ self._readings = []
473
+ self._error = DrtVariableExpression(Variable("Error: " + str(e)))
474
+ self._readingCache[index] = self._error
475
+
476
+ # add a star to the end of the example
477
+ self._exampleList.delete(index)
478
+ self._exampleList.insert(index, (" %s *" % example))
479
+ self._exampleList.config(
480
+ height=min(len(self._examples), 25), width=40
481
+ )
482
+
483
+ self._populate_readingListbox()
484
+
485
+ self._exampleList.selection_set(index)
486
+
487
+ self._drs = None
488
+ self._redraw()
489
+
490
+ def _readingList_select(self, event):
491
+ selection = self._readingList.curselection()
492
+ if len(selection) != 1:
493
+ return
494
+ self._readingList_store_selection(int(selection[0]))
495
+
496
+ def _readingList_store_selection(self, index):
497
+ reading = self._readings[index]
498
+
499
+ self._readingList.selection_clear(0, "end")
500
+ if reading:
501
+ self._readingList.selection_set(index)
502
+
503
+ self._drs = reading.simplify().normalize().resolve_anaphora()
504
+
505
+ self._redraw()
506
+
507
+
508
+ class DrsWidget:
509
+ def __init__(self, canvas, drs, **attribs):
510
+ self._drs = drs
511
+ self._canvas = canvas
512
+ canvas.font = Font(
513
+ font=canvas.itemcget(canvas.create_text(0, 0, text=""), "font")
514
+ )
515
+ canvas._BUFFER = 3
516
+ self.bbox = (0, 0, 0, 0)
517
+
518
+ def draw(self):
519
+ (right, bottom) = DrsDrawer(self._drs, canvas=self._canvas).draw()
520
+ self.bbox = (0, 0, right + 1, bottom + 1)
521
+
522
+ def clear(self):
523
+ self._canvas.create_rectangle(self.bbox, fill="white", width="0")
524
+
525
+
526
+ def demo():
527
+ examples = [
528
+ "John walks",
529
+ "David sees Mary",
530
+ "David eats a sandwich",
531
+ "every man chases a dog",
532
+ # 'every man believes a dog yawns',
533
+ # 'John gives David a sandwich',
534
+ "John chases himself",
535
+ # 'John persuades David to order a pizza',
536
+ # 'John tries to go',
537
+ # 'John tries to find a unicorn',
538
+ # 'John seems to vanish',
539
+ # 'a unicorn seems to approach',
540
+ # 'every big cat leaves',
541
+ # 'every gray cat leaves',
542
+ # 'every big gray cat leaves',
543
+ # 'a former senator leaves',
544
+ # 'John likes a cat',
545
+ # 'John likes every cat',
546
+ # 'he walks',
547
+ # 'John walks and he leaves'
548
+ ]
549
+ DrtGlueDemo(examples).mainloop()
550
+
551
+
552
+ if __name__ == "__main__":
553
+ demo()
llmeval-env/lib/python3.10/site-packages/nltk/sem/evaluate.py ADDED
@@ -0,0 +1,829 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Models for first-order languages with lambda
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Ewan Klein <[email protected]>,
5
+ # URL: <https://www.nltk.org>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ # TODO:
9
+ # - fix tracing
10
+ # - fix iterator-based approach to existentials
11
+
12
+ """
13
+ This module provides data structures for representing first-order
14
+ models.
15
+ """
16
+
17
+ import inspect
18
+ import re
19
+ import sys
20
+ import textwrap
21
+ from pprint import pformat
22
+
23
+ from nltk.decorators import decorator # this used in code that is commented out
24
+ from nltk.sem.logic import (
25
+ AbstractVariableExpression,
26
+ AllExpression,
27
+ AndExpression,
28
+ ApplicationExpression,
29
+ EqualityExpression,
30
+ ExistsExpression,
31
+ Expression,
32
+ IffExpression,
33
+ ImpExpression,
34
+ IndividualVariableExpression,
35
+ IotaExpression,
36
+ LambdaExpression,
37
+ NegatedExpression,
38
+ OrExpression,
39
+ Variable,
40
+ is_indvar,
41
+ )
42
+
43
+
44
+ class Error(Exception):
45
+ pass
46
+
47
+
48
+ class Undefined(Error):
49
+ pass
50
+
51
+
52
+ def trace(f, *args, **kw):
53
+ argspec = inspect.getfullargspec(f)
54
+ d = dict(zip(argspec[0], args))
55
+ if d.pop("trace", None):
56
+ print()
57
+ for item in d.items():
58
+ print("%s => %s" % item)
59
+ return f(*args, **kw)
60
+
61
+
62
+ def is_rel(s):
63
+ """
64
+ Check whether a set represents a relation (of any arity).
65
+
66
+ :param s: a set containing tuples of str elements
67
+ :type s: set
68
+ :rtype: bool
69
+ """
70
+ # we have the empty relation, i.e. set()
71
+ if len(s) == 0:
72
+ return True
73
+ # all the elements are tuples of the same length
74
+ elif all(isinstance(el, tuple) for el in s) and len(max(s)) == len(min(s)):
75
+ return True
76
+ else:
77
+ raise ValueError("Set %r contains sequences of different lengths" % s)
78
+
79
+
80
+ def set2rel(s):
81
+ """
82
+ Convert a set containing individuals (strings or numbers) into a set of
83
+ unary tuples. Any tuples of strings already in the set are passed through
84
+ unchanged.
85
+
86
+ For example:
87
+ - set(['a', 'b']) => set([('a',), ('b',)])
88
+ - set([3, 27]) => set([('3',), ('27',)])
89
+
90
+ :type s: set
91
+ :rtype: set of tuple of str
92
+ """
93
+ new = set()
94
+ for elem in s:
95
+ if isinstance(elem, str):
96
+ new.add((elem,))
97
+ elif isinstance(elem, int):
98
+ new.add(str(elem))
99
+ else:
100
+ new.add(elem)
101
+ return new
102
+
103
+
104
+ def arity(rel):
105
+ """
106
+ Check the arity of a relation.
107
+ :type rel: set of tuples
108
+ :rtype: int of tuple of str
109
+ """
110
+ if len(rel) == 0:
111
+ return 0
112
+ return len(list(rel)[0])
113
+
114
+
115
+ class Valuation(dict):
116
+ """
117
+ A dictionary which represents a model-theoretic Valuation of non-logical constants.
118
+ Keys are strings representing the constants to be interpreted, and values correspond
119
+ to individuals (represented as strings) and n-ary relations (represented as sets of tuples
120
+ of strings).
121
+
122
+ An instance of ``Valuation`` will raise a KeyError exception (i.e.,
123
+ just behave like a standard dictionary) if indexed with an expression that
124
+ is not in its list of symbols.
125
+ """
126
+
127
+ def __init__(self, xs):
128
+ """
129
+ :param xs: a list of (symbol, value) pairs.
130
+ """
131
+ super().__init__()
132
+ for (sym, val) in xs:
133
+ if isinstance(val, str) or isinstance(val, bool):
134
+ self[sym] = val
135
+ elif isinstance(val, set):
136
+ self[sym] = set2rel(val)
137
+ else:
138
+ msg = textwrap.fill(
139
+ "Error in initializing Valuation. "
140
+ "Unrecognized value for symbol '%s':\n%s" % (sym, val),
141
+ width=66,
142
+ )
143
+
144
+ raise ValueError(msg)
145
+
146
+ def __getitem__(self, key):
147
+ if key in self:
148
+ return dict.__getitem__(self, key)
149
+ else:
150
+ raise Undefined("Unknown expression: '%s'" % key)
151
+
152
+ def __str__(self):
153
+ return pformat(self)
154
+
155
+ @property
156
+ def domain(self):
157
+ """Set-theoretic domain of the value-space of a Valuation."""
158
+ dom = []
159
+ for val in self.values():
160
+ if isinstance(val, str):
161
+ dom.append(val)
162
+ elif not isinstance(val, bool):
163
+ dom.extend(
164
+ [elem for tuple_ in val for elem in tuple_ if elem is not None]
165
+ )
166
+ return set(dom)
167
+
168
+ @property
169
+ def symbols(self):
170
+ """The non-logical constants which the Valuation recognizes."""
171
+ return sorted(self.keys())
172
+
173
+ @classmethod
174
+ def fromstring(cls, s):
175
+ return read_valuation(s)
176
+
177
+
178
+ ##########################################
179
+ # REs used by the _read_valuation function
180
+ ##########################################
181
+ _VAL_SPLIT_RE = re.compile(r"\s*=+>\s*")
182
+ _ELEMENT_SPLIT_RE = re.compile(r"\s*,\s*")
183
+ _TUPLES_RE = re.compile(
184
+ r"""\s*
185
+ (\([^)]+\)) # tuple-expression
186
+ \s*""",
187
+ re.VERBOSE,
188
+ )
189
+
190
+
191
+ def _read_valuation_line(s):
192
+ """
193
+ Read a line in a valuation file.
194
+
195
+ Lines are expected to be of the form::
196
+
197
+ noosa => n
198
+ girl => {g1, g2}
199
+ chase => {(b1, g1), (b2, g1), (g1, d1), (g2, d2)}
200
+
201
+ :param s: input line
202
+ :type s: str
203
+ :return: a pair (symbol, value)
204
+ :rtype: tuple
205
+ """
206
+ pieces = _VAL_SPLIT_RE.split(s)
207
+ symbol = pieces[0]
208
+ value = pieces[1]
209
+ # check whether the value is meant to be a set
210
+ if value.startswith("{"):
211
+ value = value[1:-1]
212
+ tuple_strings = _TUPLES_RE.findall(value)
213
+ # are the set elements tuples?
214
+ if tuple_strings:
215
+ set_elements = []
216
+ for ts in tuple_strings:
217
+ ts = ts[1:-1]
218
+ element = tuple(_ELEMENT_SPLIT_RE.split(ts))
219
+ set_elements.append(element)
220
+ else:
221
+ set_elements = _ELEMENT_SPLIT_RE.split(value)
222
+ value = set(set_elements)
223
+ return symbol, value
224
+
225
+
226
+ def read_valuation(s, encoding=None):
227
+ """
228
+ Convert a valuation string into a valuation.
229
+
230
+ :param s: a valuation string
231
+ :type s: str
232
+ :param encoding: the encoding of the input string, if it is binary
233
+ :type encoding: str
234
+ :return: a ``nltk.sem`` valuation
235
+ :rtype: Valuation
236
+ """
237
+ if encoding is not None:
238
+ s = s.decode(encoding)
239
+ statements = []
240
+ for linenum, line in enumerate(s.splitlines()):
241
+ line = line.strip()
242
+ if line.startswith("#") or line == "":
243
+ continue
244
+ try:
245
+ statements.append(_read_valuation_line(line))
246
+ except ValueError as e:
247
+ raise ValueError(f"Unable to parse line {linenum}: {line}") from e
248
+ return Valuation(statements)
249
+
250
+
251
+ class Assignment(dict):
252
+ r"""
253
+ A dictionary which represents an assignment of values to variables.
254
+
255
+ An assignment can only assign values from its domain.
256
+
257
+ If an unknown expression *a* is passed to a model *M*\ 's
258
+ interpretation function *i*, *i* will first check whether *M*\ 's
259
+ valuation assigns an interpretation to *a* as a constant, and if
260
+ this fails, *i* will delegate the interpretation of *a* to
261
+ *g*. *g* only assigns values to individual variables (i.e.,
262
+ members of the class ``IndividualVariableExpression`` in the ``logic``
263
+ module. If a variable is not assigned a value by *g*, it will raise
264
+ an ``Undefined`` exception.
265
+
266
+ A variable *Assignment* is a mapping from individual variables to
267
+ entities in the domain. Individual variables are usually indicated
268
+ with the letters ``'x'``, ``'y'``, ``'w'`` and ``'z'``, optionally
269
+ followed by an integer (e.g., ``'x0'``, ``'y332'``). Assignments are
270
+ created using the ``Assignment`` constructor, which also takes the
271
+ domain as a parameter.
272
+
273
+ >>> from nltk.sem.evaluate import Assignment
274
+ >>> dom = set(['u1', 'u2', 'u3', 'u4'])
275
+ >>> g3 = Assignment(dom, [('x', 'u1'), ('y', 'u2')])
276
+ >>> g3 == {'x': 'u1', 'y': 'u2'}
277
+ True
278
+
279
+ There is also a ``print`` format for assignments which uses a notation
280
+ closer to that in logic textbooks:
281
+
282
+ >>> print(g3)
283
+ g[u1/x][u2/y]
284
+
285
+ It is also possible to update an assignment using the ``add`` method:
286
+
287
+ >>> dom = set(['u1', 'u2', 'u3', 'u4'])
288
+ >>> g4 = Assignment(dom)
289
+ >>> g4.add('x', 'u1')
290
+ {'x': 'u1'}
291
+
292
+ With no arguments, ``purge()`` is equivalent to ``clear()`` on a dictionary:
293
+
294
+ >>> g4.purge()
295
+ >>> g4
296
+ {}
297
+
298
+ :param domain: the domain of discourse
299
+ :type domain: set
300
+ :param assign: a list of (varname, value) associations
301
+ :type assign: list
302
+ """
303
+
304
+ def __init__(self, domain, assign=None):
305
+ super().__init__()
306
+ self.domain = domain
307
+ if assign:
308
+ for (var, val) in assign:
309
+ assert val in self.domain, "'{}' is not in the domain: {}".format(
310
+ val,
311
+ self.domain,
312
+ )
313
+ assert is_indvar(var), (
314
+ "Wrong format for an Individual Variable: '%s'" % var
315
+ )
316
+ self[var] = val
317
+ self.variant = None
318
+ self._addvariant()
319
+
320
+ def __getitem__(self, key):
321
+ if key in self:
322
+ return dict.__getitem__(self, key)
323
+ else:
324
+ raise Undefined("Not recognized as a variable: '%s'" % key)
325
+
326
+ def copy(self):
327
+ new = Assignment(self.domain)
328
+ new.update(self)
329
+ return new
330
+
331
+ def purge(self, var=None):
332
+ """
333
+ Remove one or all keys (i.e. logic variables) from an
334
+ assignment, and update ``self.variant``.
335
+
336
+ :param var: a Variable acting as a key for the assignment.
337
+ """
338
+ if var:
339
+ del self[var]
340
+ else:
341
+ self.clear()
342
+ self._addvariant()
343
+ return None
344
+
345
+ def __str__(self):
346
+ """
347
+ Pretty printing for assignments. {'x', 'u'} appears as 'g[u/x]'
348
+ """
349
+ gstring = "g"
350
+ # Deterministic output for unit testing.
351
+ variant = sorted(self.variant)
352
+ for (val, var) in variant:
353
+ gstring += f"[{val}/{var}]"
354
+ return gstring
355
+
356
+ def _addvariant(self):
357
+ """
358
+ Create a more pretty-printable version of the assignment.
359
+ """
360
+ list_ = []
361
+ for item in self.items():
362
+ pair = (item[1], item[0])
363
+ list_.append(pair)
364
+ self.variant = list_
365
+ return None
366
+
367
+ def add(self, var, val):
368
+ """
369
+ Add a new variable-value pair to the assignment, and update
370
+ ``self.variant``.
371
+
372
+ """
373
+ assert val in self.domain, f"{val} is not in the domain {self.domain}"
374
+ assert is_indvar(var), "Wrong format for an Individual Variable: '%s'" % var
375
+ self[var] = val
376
+ self._addvariant()
377
+ return self
378
+
379
+
380
+ class Model:
381
+ """
382
+ A first order model is a domain *D* of discourse and a valuation *V*.
383
+
384
+ A domain *D* is a set, and a valuation *V* is a map that associates
385
+ expressions with values in the model.
386
+ The domain of *V* should be a subset of *D*.
387
+
388
+ Construct a new ``Model``.
389
+
390
+ :type domain: set
391
+ :param domain: A set of entities representing the domain of discourse of the model.
392
+ :type valuation: Valuation
393
+ :param valuation: the valuation of the model.
394
+ :param prop: If this is set, then we are building a propositional\
395
+ model and don't require the domain of *V* to be subset of *D*.
396
+ """
397
+
398
+ def __init__(self, domain, valuation):
399
+ assert isinstance(domain, set)
400
+ self.domain = domain
401
+ self.valuation = valuation
402
+ if not domain.issuperset(valuation.domain):
403
+ raise Error(
404
+ "The valuation domain, %s, must be a subset of the model's domain, %s"
405
+ % (valuation.domain, domain)
406
+ )
407
+
408
+ def __repr__(self):
409
+ return f"({self.domain!r}, {self.valuation!r})"
410
+
411
+ def __str__(self):
412
+ return f"Domain = {self.domain},\nValuation = \n{self.valuation}"
413
+
414
+ def evaluate(self, expr, g, trace=None):
415
+ """
416
+ Read input expressions, and provide a handler for ``satisfy``
417
+ that blocks further propagation of the ``Undefined`` error.
418
+ :param expr: An ``Expression`` of ``logic``.
419
+ :type g: Assignment
420
+ :param g: an assignment to individual variables.
421
+ :rtype: bool or 'Undefined'
422
+ """
423
+ try:
424
+ parsed = Expression.fromstring(expr)
425
+ value = self.satisfy(parsed, g, trace=trace)
426
+ if trace:
427
+ print()
428
+ print(f"'{expr}' evaluates to {value} under M, {g}")
429
+ return value
430
+ except Undefined:
431
+ if trace:
432
+ print()
433
+ print(f"'{expr}' is undefined under M, {g}")
434
+ return "Undefined"
435
+
436
+ def satisfy(self, parsed, g, trace=None):
437
+ """
438
+ Recursive interpretation function for a formula of first-order logic.
439
+
440
+ Raises an ``Undefined`` error when ``parsed`` is an atomic string
441
+ but is not a symbol or an individual variable.
442
+
443
+ :return: Returns a truth value or ``Undefined`` if ``parsed`` is\
444
+ complex, and calls the interpretation function ``i`` if ``parsed``\
445
+ is atomic.
446
+
447
+ :param parsed: An expression of ``logic``.
448
+ :type g: Assignment
449
+ :param g: an assignment to individual variables.
450
+ """
451
+
452
+ if isinstance(parsed, ApplicationExpression):
453
+ function, arguments = parsed.uncurry()
454
+ if isinstance(function, AbstractVariableExpression):
455
+ # It's a predicate expression ("P(x,y)"), so used uncurried arguments
456
+ funval = self.satisfy(function, g)
457
+ argvals = tuple(self.satisfy(arg, g) for arg in arguments)
458
+ return argvals in funval
459
+ else:
460
+ # It must be a lambda expression, so use curried form
461
+ funval = self.satisfy(parsed.function, g)
462
+ argval = self.satisfy(parsed.argument, g)
463
+ return funval[argval]
464
+ elif isinstance(parsed, NegatedExpression):
465
+ return not self.satisfy(parsed.term, g)
466
+ elif isinstance(parsed, AndExpression):
467
+ return self.satisfy(parsed.first, g) and self.satisfy(parsed.second, g)
468
+ elif isinstance(parsed, OrExpression):
469
+ return self.satisfy(parsed.first, g) or self.satisfy(parsed.second, g)
470
+ elif isinstance(parsed, ImpExpression):
471
+ return (not self.satisfy(parsed.first, g)) or self.satisfy(parsed.second, g)
472
+ elif isinstance(parsed, IffExpression):
473
+ return self.satisfy(parsed.first, g) == self.satisfy(parsed.second, g)
474
+ elif isinstance(parsed, EqualityExpression):
475
+ return self.satisfy(parsed.first, g) == self.satisfy(parsed.second, g)
476
+ elif isinstance(parsed, AllExpression):
477
+ new_g = g.copy()
478
+ for u in self.domain:
479
+ new_g.add(parsed.variable.name, u)
480
+ if not self.satisfy(parsed.term, new_g):
481
+ return False
482
+ return True
483
+ elif isinstance(parsed, ExistsExpression):
484
+ new_g = g.copy()
485
+ for u in self.domain:
486
+ new_g.add(parsed.variable.name, u)
487
+ if self.satisfy(parsed.term, new_g):
488
+ return True
489
+ return False
490
+ elif isinstance(parsed, IotaExpression):
491
+ new_g = g.copy()
492
+ for u in self.domain:
493
+ new_g.add(parsed.variable.name, u)
494
+ if self.satisfy(parsed.term, new_g):
495
+ return True
496
+ return False
497
+ elif isinstance(parsed, LambdaExpression):
498
+ cf = {}
499
+ var = parsed.variable.name
500
+ for u in self.domain:
501
+ val = self.satisfy(parsed.term, g.add(var, u))
502
+ # NB the dict would be a lot smaller if we do this:
503
+ # if val: cf[u] = val
504
+ # But then need to deal with cases where f(a) should yield
505
+ # a function rather than just False.
506
+ cf[u] = val
507
+ return cf
508
+ else:
509
+ return self.i(parsed, g, trace)
510
+
511
+ # @decorator(trace_eval)
512
+ def i(self, parsed, g, trace=False):
513
+ """
514
+ An interpretation function.
515
+
516
+ Assuming that ``parsed`` is atomic:
517
+
518
+ - if ``parsed`` is a non-logical constant, calls the valuation *V*
519
+ - else if ``parsed`` is an individual variable, calls assignment *g*
520
+ - else returns ``Undefined``.
521
+
522
+ :param parsed: an ``Expression`` of ``logic``.
523
+ :type g: Assignment
524
+ :param g: an assignment to individual variables.
525
+ :return: a semantic value
526
+ """
527
+ # If parsed is a propositional letter 'p', 'q', etc, it could be in valuation.symbols
528
+ # and also be an IndividualVariableExpression. We want to catch this first case.
529
+ # So there is a procedural consequence to the ordering of clauses here:
530
+ if parsed.variable.name in self.valuation.symbols:
531
+ return self.valuation[parsed.variable.name]
532
+ elif isinstance(parsed, IndividualVariableExpression):
533
+ return g[parsed.variable.name]
534
+
535
+ else:
536
+ raise Undefined("Can't find a value for %s" % parsed)
537
+
538
+ def satisfiers(self, parsed, varex, g, trace=None, nesting=0):
539
+ """
540
+ Generate the entities from the model's domain that satisfy an open formula.
541
+
542
+ :param parsed: an open formula
543
+ :type parsed: Expression
544
+ :param varex: the relevant free individual variable in ``parsed``.
545
+ :type varex: VariableExpression or str
546
+ :param g: a variable assignment
547
+ :type g: Assignment
548
+ :return: a set of the entities that satisfy ``parsed``.
549
+ """
550
+
551
+ spacer = " "
552
+ indent = spacer + (spacer * nesting)
553
+ candidates = []
554
+
555
+ if isinstance(varex, str):
556
+ var = Variable(varex)
557
+ else:
558
+ var = varex
559
+
560
+ if var in parsed.free():
561
+ if trace:
562
+ print()
563
+ print(
564
+ (spacer * nesting)
565
+ + f"Open formula is '{parsed}' with assignment {g}"
566
+ )
567
+ for u in self.domain:
568
+ new_g = g.copy()
569
+ new_g.add(var.name, u)
570
+ if trace and trace > 1:
571
+ lowtrace = trace - 1
572
+ else:
573
+ lowtrace = 0
574
+ value = self.satisfy(parsed, new_g, lowtrace)
575
+
576
+ if trace:
577
+ print(indent + "(trying assignment %s)" % new_g)
578
+
579
+ # parsed == False under g[u/var]?
580
+ if value == False:
581
+ if trace:
582
+ print(indent + f"value of '{parsed}' under {new_g} is False")
583
+
584
+ # so g[u/var] is a satisfying assignment
585
+ else:
586
+ candidates.append(u)
587
+ if trace:
588
+ print(indent + f"value of '{parsed}' under {new_g} is {value}")
589
+
590
+ result = {c for c in candidates}
591
+ # var isn't free in parsed
592
+ else:
593
+ raise Undefined(f"{var.name} is not free in {parsed}")
594
+
595
+ return result
596
+
597
+
598
+ # //////////////////////////////////////////////////////////////////////
599
+ # Demo..
600
+ # //////////////////////////////////////////////////////////////////////
601
+ # number of spacer chars
602
+ mult = 30
603
+
604
+ # Demo 1: Propositional Logic
605
+ #################
606
+ def propdemo(trace=None):
607
+ """Example of a propositional model."""
608
+
609
+ global val1, dom1, m1, g1
610
+ val1 = Valuation([("P", True), ("Q", True), ("R", False)])
611
+ dom1 = set()
612
+ m1 = Model(dom1, val1)
613
+ g1 = Assignment(dom1)
614
+
615
+ print()
616
+ print("*" * mult)
617
+ print("Propositional Formulas Demo")
618
+ print("*" * mult)
619
+ print("(Propositional constants treated as nullary predicates)")
620
+ print()
621
+ print("Model m1:\n", m1)
622
+ print("*" * mult)
623
+ sentences = [
624
+ "(P & Q)",
625
+ "(P & R)",
626
+ "- P",
627
+ "- R",
628
+ "- - P",
629
+ "- (P & R)",
630
+ "(P | R)",
631
+ "(R | P)",
632
+ "(R | R)",
633
+ "(- P | R)",
634
+ "(P | - P)",
635
+ "(P -> Q)",
636
+ "(P -> R)",
637
+ "(R -> P)",
638
+ "(P <-> P)",
639
+ "(R <-> R)",
640
+ "(P <-> R)",
641
+ ]
642
+
643
+ for sent in sentences:
644
+ if trace:
645
+ print()
646
+ m1.evaluate(sent, g1, trace)
647
+ else:
648
+ print(f"The value of '{sent}' is: {m1.evaluate(sent, g1)}")
649
+
650
+
651
+ # Demo 2: FOL Model
652
+ #############
653
+
654
+
655
+ def folmodel(quiet=False, trace=None):
656
+ """Example of a first-order model."""
657
+
658
+ global val2, v2, dom2, m2, g2
659
+
660
+ v2 = [
661
+ ("adam", "b1"),
662
+ ("betty", "g1"),
663
+ ("fido", "d1"),
664
+ ("girl", {"g1", "g2"}),
665
+ ("boy", {"b1", "b2"}),
666
+ ("dog", {"d1"}),
667
+ ("love", {("b1", "g1"), ("b2", "g2"), ("g1", "b1"), ("g2", "b1")}),
668
+ ]
669
+ val2 = Valuation(v2)
670
+ dom2 = val2.domain
671
+ m2 = Model(dom2, val2)
672
+ g2 = Assignment(dom2, [("x", "b1"), ("y", "g2")])
673
+
674
+ if not quiet:
675
+ print()
676
+ print("*" * mult)
677
+ print("Models Demo")
678
+ print("*" * mult)
679
+ print("Model m2:\n", "-" * 14, "\n", m2)
680
+ print("Variable assignment = ", g2)
681
+
682
+ exprs = ["adam", "boy", "love", "walks", "x", "y", "z"]
683
+ parsed_exprs = [Expression.fromstring(e) for e in exprs]
684
+
685
+ print()
686
+ for parsed in parsed_exprs:
687
+ try:
688
+ print(
689
+ "The interpretation of '%s' in m2 is %s"
690
+ % (parsed, m2.i(parsed, g2))
691
+ )
692
+ except Undefined:
693
+ print("The interpretation of '%s' in m2 is Undefined" % parsed)
694
+
695
+ applications = [
696
+ ("boy", ("adam")),
697
+ ("walks", ("adam",)),
698
+ ("love", ("adam", "y")),
699
+ ("love", ("y", "adam")),
700
+ ]
701
+
702
+ for (fun, args) in applications:
703
+ try:
704
+ funval = m2.i(Expression.fromstring(fun), g2)
705
+ argsval = tuple(m2.i(Expression.fromstring(arg), g2) for arg in args)
706
+ print(f"{fun}({args}) evaluates to {argsval in funval}")
707
+ except Undefined:
708
+ print(f"{fun}({args}) evaluates to Undefined")
709
+
710
+
711
+ # Demo 3: FOL
712
+ #########
713
+
714
+
715
+ def foldemo(trace=None):
716
+ """
717
+ Interpretation of closed expressions in a first-order model.
718
+ """
719
+ folmodel(quiet=True)
720
+
721
+ print()
722
+ print("*" * mult)
723
+ print("FOL Formulas Demo")
724
+ print("*" * mult)
725
+
726
+ formulas = [
727
+ "love (adam, betty)",
728
+ "(adam = mia)",
729
+ "\\x. (boy(x) | girl(x))",
730
+ "\\x. boy(x)(adam)",
731
+ "\\x y. love(x, y)",
732
+ "\\x y. love(x, y)(adam)(betty)",
733
+ "\\x y. love(x, y)(adam, betty)",
734
+ "\\x y. (boy(x) & love(x, y))",
735
+ "\\x. exists y. (boy(x) & love(x, y))",
736
+ "exists z1. boy(z1)",
737
+ "exists x. (boy(x) & -(x = adam))",
738
+ "exists x. (boy(x) & all y. love(y, x))",
739
+ "all x. (boy(x) | girl(x))",
740
+ "all x. (girl(x) -> exists y. boy(y) & love(x, y))", # Every girl loves exists boy.
741
+ "exists x. (boy(x) & all y. (girl(y) -> love(y, x)))", # There is exists boy that every girl loves.
742
+ "exists x. (boy(x) & all y. (girl(y) -> love(x, y)))", # exists boy loves every girl.
743
+ "all x. (dog(x) -> - girl(x))",
744
+ "exists x. exists y. (love(x, y) & love(x, y))",
745
+ ]
746
+
747
+ for fmla in formulas:
748
+ g2.purge()
749
+ if trace:
750
+ m2.evaluate(fmla, g2, trace)
751
+ else:
752
+ print(f"The value of '{fmla}' is: {m2.evaluate(fmla, g2)}")
753
+
754
+
755
+ # Demo 3: Satisfaction
756
+ #############
757
+
758
+
759
+ def satdemo(trace=None):
760
+ """Satisfiers of an open formula in a first order model."""
761
+
762
+ print()
763
+ print("*" * mult)
764
+ print("Satisfiers Demo")
765
+ print("*" * mult)
766
+
767
+ folmodel(quiet=True)
768
+
769
+ formulas = [
770
+ "boy(x)",
771
+ "(x = x)",
772
+ "(boy(x) | girl(x))",
773
+ "(boy(x) & girl(x))",
774
+ "love(adam, x)",
775
+ "love(x, adam)",
776
+ "-(x = adam)",
777
+ "exists z22. love(x, z22)",
778
+ "exists y. love(y, x)",
779
+ "all y. (girl(y) -> love(x, y))",
780
+ "all y. (girl(y) -> love(y, x))",
781
+ "all y. (girl(y) -> (boy(x) & love(y, x)))",
782
+ "(boy(x) & all y. (girl(y) -> love(x, y)))",
783
+ "(boy(x) & all y. (girl(y) -> love(y, x)))",
784
+ "(boy(x) & exists y. (girl(y) & love(y, x)))",
785
+ "(girl(x) -> dog(x))",
786
+ "all y. (dog(y) -> (x = y))",
787
+ "exists y. love(y, x)",
788
+ "exists y. (love(adam, y) & love(y, x))",
789
+ ]
790
+
791
+ if trace:
792
+ print(m2)
793
+
794
+ for fmla in formulas:
795
+ print(fmla)
796
+ Expression.fromstring(fmla)
797
+
798
+ parsed = [Expression.fromstring(fmla) for fmla in formulas]
799
+
800
+ for p in parsed:
801
+ g2.purge()
802
+ print(
803
+ "The satisfiers of '{}' are: {}".format(p, m2.satisfiers(p, "x", g2, trace))
804
+ )
805
+
806
+
807
+ def demo(num=0, trace=None):
808
+ """
809
+ Run exists demos.
810
+
811
+ - num = 1: propositional logic demo
812
+ - num = 2: first order model demo (only if trace is set)
813
+ - num = 3: first order sentences demo
814
+ - num = 4: satisfaction of open formulas demo
815
+ - any other value: run all the demos
816
+
817
+ :param trace: trace = 1, or trace = 2 for more verbose tracing
818
+ """
819
+ demos = {1: propdemo, 2: folmodel, 3: foldemo, 4: satdemo}
820
+
821
+ try:
822
+ demos[num](trace=trace)
823
+ except KeyError:
824
+ for num in demos:
825
+ demos[num](trace=trace)
826
+
827
+
828
+ if __name__ == "__main__":
829
+ demo(2, trace=0)
llmeval-env/lib/python3.10/site-packages/nltk/sem/glue.py ADDED
@@ -0,0 +1,835 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Glue Semantics
2
+ #
3
+ # Author: Dan Garrette <[email protected]>
4
+ #
5
+ # Copyright (C) 2001-2023 NLTK Project
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ import os
10
+ from itertools import chain
11
+
12
+ import nltk
13
+ from nltk.internals import Counter
14
+ from nltk.sem import drt, linearlogic
15
+ from nltk.sem.logic import (
16
+ AbstractVariableExpression,
17
+ Expression,
18
+ LambdaExpression,
19
+ Variable,
20
+ VariableExpression,
21
+ )
22
+ from nltk.tag import BigramTagger, RegexpTagger, TrigramTagger, UnigramTagger
23
+
24
+ SPEC_SEMTYPES = {
25
+ "a": "ex_quant",
26
+ "an": "ex_quant",
27
+ "every": "univ_quant",
28
+ "the": "def_art",
29
+ "no": "no_quant",
30
+ "default": "ex_quant",
31
+ }
32
+
33
+ OPTIONAL_RELATIONSHIPS = ["nmod", "vmod", "punct"]
34
+
35
+
36
+ class GlueFormula:
37
+ def __init__(self, meaning, glue, indices=None):
38
+ if not indices:
39
+ indices = set()
40
+
41
+ if isinstance(meaning, str):
42
+ self.meaning = Expression.fromstring(meaning)
43
+ elif isinstance(meaning, Expression):
44
+ self.meaning = meaning
45
+ else:
46
+ raise RuntimeError(
47
+ "Meaning term neither string or expression: %s, %s"
48
+ % (meaning, meaning.__class__)
49
+ )
50
+
51
+ if isinstance(glue, str):
52
+ self.glue = linearlogic.LinearLogicParser().parse(glue)
53
+ elif isinstance(glue, linearlogic.Expression):
54
+ self.glue = glue
55
+ else:
56
+ raise RuntimeError(
57
+ "Glue term neither string or expression: %s, %s"
58
+ % (glue, glue.__class__)
59
+ )
60
+
61
+ self.indices = indices
62
+
63
+ def applyto(self, arg):
64
+ """self = (\\x.(walk x), (subj -o f))
65
+ arg = (john , subj)
66
+ returns ((walk john), f)
67
+ """
68
+ if self.indices & arg.indices: # if the sets are NOT disjoint
69
+ raise linearlogic.LinearLogicApplicationException(
70
+ f"'{self}' applied to '{arg}'. Indices are not disjoint."
71
+ )
72
+ else: # if the sets ARE disjoint
73
+ return_indices = self.indices | arg.indices
74
+
75
+ try:
76
+ return_glue = linearlogic.ApplicationExpression(
77
+ self.glue, arg.glue, arg.indices
78
+ )
79
+ except linearlogic.LinearLogicApplicationException as e:
80
+ raise linearlogic.LinearLogicApplicationException(
81
+ f"'{self.simplify()}' applied to '{arg.simplify()}'"
82
+ ) from e
83
+
84
+ arg_meaning_abstracted = arg.meaning
85
+ if return_indices:
86
+ for dep in self.glue.simplify().antecedent.dependencies[
87
+ ::-1
88
+ ]: # if self.glue is (A -o B), dep is in A.dependencies
89
+ arg_meaning_abstracted = self.make_LambdaExpression(
90
+ Variable("v%s" % dep), arg_meaning_abstracted
91
+ )
92
+ return_meaning = self.meaning.applyto(arg_meaning_abstracted)
93
+
94
+ return self.__class__(return_meaning, return_glue, return_indices)
95
+
96
+ def make_VariableExpression(self, name):
97
+ return VariableExpression(name)
98
+
99
+ def make_LambdaExpression(self, variable, term):
100
+ return LambdaExpression(variable, term)
101
+
102
+ def lambda_abstract(self, other):
103
+ assert isinstance(other, GlueFormula)
104
+ assert isinstance(other.meaning, AbstractVariableExpression)
105
+ return self.__class__(
106
+ self.make_LambdaExpression(other.meaning.variable, self.meaning),
107
+ linearlogic.ImpExpression(other.glue, self.glue),
108
+ )
109
+
110
+ def compile(self, counter=None):
111
+ """From Iddo Lev's PhD Dissertation p108-109"""
112
+ if not counter:
113
+ counter = Counter()
114
+ (compiled_glue, new_forms) = self.glue.simplify().compile_pos(
115
+ counter, self.__class__
116
+ )
117
+ return new_forms + [
118
+ self.__class__(self.meaning, compiled_glue, {counter.get()})
119
+ ]
120
+
121
+ def simplify(self):
122
+ return self.__class__(
123
+ self.meaning.simplify(), self.glue.simplify(), self.indices
124
+ )
125
+
126
+ def __eq__(self, other):
127
+ return (
128
+ self.__class__ == other.__class__
129
+ and self.meaning == other.meaning
130
+ and self.glue == other.glue
131
+ )
132
+
133
+ def __ne__(self, other):
134
+ return not self == other
135
+
136
+ # sorting for use in doctests which must be deterministic
137
+ def __lt__(self, other):
138
+ return str(self) < str(other)
139
+
140
+ def __str__(self):
141
+ assert isinstance(self.indices, set)
142
+ accum = f"{self.meaning} : {self.glue}"
143
+ if self.indices:
144
+ accum += (
145
+ " : {" + ", ".join(str(index) for index in sorted(self.indices)) + "}"
146
+ )
147
+ return accum
148
+
149
+ def __repr__(self):
150
+ return "%s" % self
151
+
152
+
153
+ class GlueDict(dict):
154
+ def __init__(self, filename, encoding=None):
155
+ self.filename = filename
156
+ self.file_encoding = encoding
157
+ self.read_file()
158
+
159
+ def read_file(self, empty_first=True):
160
+ if empty_first:
161
+ self.clear()
162
+
163
+ try:
164
+ contents = nltk.data.load(
165
+ self.filename, format="text", encoding=self.file_encoding
166
+ )
167
+ # TODO: the above can't handle zip files, but this should anyway be fixed in nltk.data.load()
168
+ except LookupError as e:
169
+ try:
170
+ contents = nltk.data.load(
171
+ "file:" + self.filename, format="text", encoding=self.file_encoding
172
+ )
173
+ except LookupError:
174
+ raise e
175
+ lines = contents.splitlines()
176
+
177
+ for line in lines: # example: 'n : (\\x.(<word> x), (v-or))'
178
+ # lambdacalc -^ linear logic -^
179
+ line = line.strip() # remove trailing newline
180
+ if not len(line):
181
+ continue # skip empty lines
182
+ if line[0] == "#":
183
+ continue # skip commented out lines
184
+
185
+ parts = line.split(
186
+ " : ", 2
187
+ ) # ['verb', '(\\x.(<word> x), ( subj -o f ))', '[subj]']
188
+
189
+ glue_formulas = []
190
+ paren_count = 0
191
+ tuple_start = 0
192
+ tuple_comma = 0
193
+
194
+ relationships = None
195
+
196
+ if len(parts) > 1:
197
+ for (i, c) in enumerate(parts[1]):
198
+ if c == "(":
199
+ if paren_count == 0: # if it's the first '(' of a tuple
200
+ tuple_start = i + 1 # then save the index
201
+ paren_count += 1
202
+ elif c == ")":
203
+ paren_count -= 1
204
+ if paren_count == 0: # if it's the last ')' of a tuple
205
+ meaning_term = parts[1][
206
+ tuple_start:tuple_comma
207
+ ] # '\\x.(<word> x)'
208
+ glue_term = parts[1][tuple_comma + 1 : i] # '(v-r)'
209
+ glue_formulas.append(
210
+ [meaning_term, glue_term]
211
+ ) # add the GlueFormula to the list
212
+ elif c == ",":
213
+ if (
214
+ paren_count == 1
215
+ ): # if it's a comma separating the parts of the tuple
216
+ tuple_comma = i # then save the index
217
+ elif c == "#": # skip comments at the ends of lines
218
+ if (
219
+ paren_count != 0
220
+ ): # if the line hasn't parsed correctly so far
221
+ raise RuntimeError(
222
+ "Formula syntax is incorrect for entry " + line
223
+ )
224
+ break # break to the next line
225
+
226
+ if len(parts) > 2: # if there is a relationship entry at the end
227
+ rel_start = parts[2].index("[") + 1
228
+ rel_end = parts[2].index("]")
229
+ if rel_start == rel_end:
230
+ relationships = frozenset()
231
+ else:
232
+ relationships = frozenset(
233
+ r.strip() for r in parts[2][rel_start:rel_end].split(",")
234
+ )
235
+
236
+ try:
237
+ start_inheritance = parts[0].index("(")
238
+ end_inheritance = parts[0].index(")")
239
+ sem = parts[0][:start_inheritance].strip()
240
+ supertype = parts[0][start_inheritance + 1 : end_inheritance]
241
+ except:
242
+ sem = parts[0].strip()
243
+ supertype = None
244
+
245
+ if sem not in self:
246
+ self[sem] = {}
247
+
248
+ if (
249
+ relationships is None
250
+ ): # if not specified for a specific relationship set
251
+ # add all relationship entries for parents
252
+ if supertype:
253
+ for rels in self[supertype]:
254
+ if rels not in self[sem]:
255
+ self[sem][rels] = []
256
+ glue = self[supertype][rels]
257
+ self[sem][rels].extend(glue)
258
+ self[sem][rels].extend(
259
+ glue_formulas
260
+ ) # add the glue formulas to every rel entry
261
+ else:
262
+ if None not in self[sem]:
263
+ self[sem][None] = []
264
+ self[sem][None].extend(
265
+ glue_formulas
266
+ ) # add the glue formulas to every rel entry
267
+ else:
268
+ if relationships not in self[sem]:
269
+ self[sem][relationships] = []
270
+ if supertype:
271
+ self[sem][relationships].extend(self[supertype][relationships])
272
+ self[sem][relationships].extend(
273
+ glue_formulas
274
+ ) # add the glue entry to the dictionary
275
+
276
+ def __str__(self):
277
+ accum = ""
278
+ for pos in self:
279
+ str_pos = "%s" % pos
280
+ for relset in self[pos]:
281
+ i = 1
282
+ for gf in self[pos][relset]:
283
+ if i == 1:
284
+ accum += str_pos + ": "
285
+ else:
286
+ accum += " " * (len(str_pos) + 2)
287
+ accum += "%s" % gf
288
+ if relset and i == len(self[pos][relset]):
289
+ accum += " : %s" % relset
290
+ accum += "\n"
291
+ i += 1
292
+ return accum
293
+
294
+ def to_glueformula_list(self, depgraph, node=None, counter=None, verbose=False):
295
+ if node is None:
296
+ # TODO: should it be depgraph.root? Is this code tested?
297
+ top = depgraph.nodes[0]
298
+ depList = list(chain.from_iterable(top["deps"].values()))
299
+ root = depgraph.nodes[depList[0]]
300
+
301
+ return self.to_glueformula_list(depgraph, root, Counter(), verbose)
302
+
303
+ glueformulas = self.lookup(node, depgraph, counter)
304
+ for dep_idx in chain.from_iterable(node["deps"].values()):
305
+ dep = depgraph.nodes[dep_idx]
306
+ glueformulas.extend(
307
+ self.to_glueformula_list(depgraph, dep, counter, verbose)
308
+ )
309
+ return glueformulas
310
+
311
+ def lookup(self, node, depgraph, counter):
312
+ semtype_names = self.get_semtypes(node)
313
+
314
+ semtype = None
315
+ for name in semtype_names:
316
+ if name in self:
317
+ semtype = self[name]
318
+ break
319
+ if semtype is None:
320
+ # raise KeyError, "There is no GlueDict entry for sem type '%s' (for '%s')" % (sem, word)
321
+ return []
322
+
323
+ self.add_missing_dependencies(node, depgraph)
324
+
325
+ lookup = self._lookup_semtype_option(semtype, node, depgraph)
326
+
327
+ if not len(lookup):
328
+ raise KeyError(
329
+ "There is no GlueDict entry for sem type of '%s' "
330
+ "with tag '%s', and rel '%s'" % (node["word"], node["tag"], node["rel"])
331
+ )
332
+
333
+ return self.get_glueformulas_from_semtype_entry(
334
+ lookup, node["word"], node, depgraph, counter
335
+ )
336
+
337
+ def add_missing_dependencies(self, node, depgraph):
338
+ rel = node["rel"].lower()
339
+
340
+ if rel == "main":
341
+ headnode = depgraph.nodes[node["head"]]
342
+ subj = self.lookup_unique("subj", headnode, depgraph)
343
+ relation = subj["rel"]
344
+ node["deps"].setdefault(relation, [])
345
+ node["deps"][relation].append(subj["address"])
346
+ # node['deps'].append(subj['address'])
347
+
348
+ def _lookup_semtype_option(self, semtype, node, depgraph):
349
+ relationships = frozenset(
350
+ depgraph.nodes[dep]["rel"].lower()
351
+ for dep in chain.from_iterable(node["deps"].values())
352
+ if depgraph.nodes[dep]["rel"].lower() not in OPTIONAL_RELATIONSHIPS
353
+ )
354
+
355
+ try:
356
+ lookup = semtype[relationships]
357
+ except KeyError:
358
+ # An exact match is not found, so find the best match where
359
+ # 'best' is defined as the glue entry whose relationship set has the
360
+ # most relations of any possible relationship set that is a subset
361
+ # of the actual depgraph
362
+ best_match = frozenset()
363
+ for relset_option in set(semtype) - {None}:
364
+ if (
365
+ len(relset_option) > len(best_match)
366
+ and relset_option < relationships
367
+ ):
368
+ best_match = relset_option
369
+ if not best_match:
370
+ if None in semtype:
371
+ best_match = None
372
+ else:
373
+ return None
374
+ lookup = semtype[best_match]
375
+
376
+ return lookup
377
+
378
+ def get_semtypes(self, node):
379
+ """
380
+ Based on the node, return a list of plausible semtypes in order of
381
+ plausibility.
382
+ """
383
+ rel = node["rel"].lower()
384
+ word = node["word"].lower()
385
+
386
+ if rel == "spec":
387
+ if word in SPEC_SEMTYPES:
388
+ return [SPEC_SEMTYPES[word]]
389
+ else:
390
+ return [SPEC_SEMTYPES["default"]]
391
+ elif rel in ["nmod", "vmod"]:
392
+ return [node["tag"], rel]
393
+ else:
394
+ return [node["tag"]]
395
+
396
+ def get_glueformulas_from_semtype_entry(
397
+ self, lookup, word, node, depgraph, counter
398
+ ):
399
+ glueformulas = []
400
+
401
+ glueFormulaFactory = self.get_GlueFormula_factory()
402
+ for meaning, glue in lookup:
403
+ gf = glueFormulaFactory(self.get_meaning_formula(meaning, word), glue)
404
+ if not len(glueformulas):
405
+ gf.word = word
406
+ else:
407
+ gf.word = f"{word}{len(glueformulas) + 1}"
408
+
409
+ gf.glue = self.initialize_labels(gf.glue, node, depgraph, counter.get())
410
+
411
+ glueformulas.append(gf)
412
+ return glueformulas
413
+
414
+ def get_meaning_formula(self, generic, word):
415
+ """
416
+ :param generic: A meaning formula string containing the
417
+ parameter "<word>"
418
+ :param word: The actual word to be replace "<word>"
419
+ """
420
+ word = word.replace(".", "")
421
+ return generic.replace("<word>", word)
422
+
423
+ def initialize_labels(self, expr, node, depgraph, unique_index):
424
+ if isinstance(expr, linearlogic.AtomicExpression):
425
+ name = self.find_label_name(expr.name.lower(), node, depgraph, unique_index)
426
+ if name[0].isupper():
427
+ return linearlogic.VariableExpression(name)
428
+ else:
429
+ return linearlogic.ConstantExpression(name)
430
+ else:
431
+ return linearlogic.ImpExpression(
432
+ self.initialize_labels(expr.antecedent, node, depgraph, unique_index),
433
+ self.initialize_labels(expr.consequent, node, depgraph, unique_index),
434
+ )
435
+
436
+ def find_label_name(self, name, node, depgraph, unique_index):
437
+ try:
438
+ dot = name.index(".")
439
+
440
+ before_dot = name[:dot]
441
+ after_dot = name[dot + 1 :]
442
+ if before_dot == "super":
443
+ return self.find_label_name(
444
+ after_dot, depgraph.nodes[node["head"]], depgraph, unique_index
445
+ )
446
+ else:
447
+ return self.find_label_name(
448
+ after_dot,
449
+ self.lookup_unique(before_dot, node, depgraph),
450
+ depgraph,
451
+ unique_index,
452
+ )
453
+ except ValueError:
454
+ lbl = self.get_label(node)
455
+ if name == "f":
456
+ return lbl
457
+ elif name == "v":
458
+ return "%sv" % lbl
459
+ elif name == "r":
460
+ return "%sr" % lbl
461
+ elif name == "super":
462
+ return self.get_label(depgraph.nodes[node["head"]])
463
+ elif name == "var":
464
+ return f"{lbl.upper()}{unique_index}"
465
+ elif name == "a":
466
+ return self.get_label(self.lookup_unique("conja", node, depgraph))
467
+ elif name == "b":
468
+ return self.get_label(self.lookup_unique("conjb", node, depgraph))
469
+ else:
470
+ return self.get_label(self.lookup_unique(name, node, depgraph))
471
+
472
+ def get_label(self, node):
473
+ """
474
+ Pick an alphabetic character as identifier for an entity in the model.
475
+
476
+ :param value: where to index into the list of characters
477
+ :type value: int
478
+ """
479
+ value = node["address"]
480
+
481
+ letter = [
482
+ "f",
483
+ "g",
484
+ "h",
485
+ "i",
486
+ "j",
487
+ "k",
488
+ "l",
489
+ "m",
490
+ "n",
491
+ "o",
492
+ "p",
493
+ "q",
494
+ "r",
495
+ "s",
496
+ "t",
497
+ "u",
498
+ "v",
499
+ "w",
500
+ "x",
501
+ "y",
502
+ "z",
503
+ "a",
504
+ "b",
505
+ "c",
506
+ "d",
507
+ "e",
508
+ ][value - 1]
509
+ num = int(value) // 26
510
+ if num > 0:
511
+ return letter + str(num)
512
+ else:
513
+ return letter
514
+
515
+ def lookup_unique(self, rel, node, depgraph):
516
+ """
517
+ Lookup 'key'. There should be exactly one item in the associated relation.
518
+ """
519
+ deps = [
520
+ depgraph.nodes[dep]
521
+ for dep in chain.from_iterable(node["deps"].values())
522
+ if depgraph.nodes[dep]["rel"].lower() == rel.lower()
523
+ ]
524
+
525
+ if len(deps) == 0:
526
+ raise KeyError(
527
+ "'{}' doesn't contain a feature '{}'".format(node["word"], rel)
528
+ )
529
+ elif len(deps) > 1:
530
+ raise KeyError(
531
+ "'{}' should only have one feature '{}'".format(node["word"], rel)
532
+ )
533
+ else:
534
+ return deps[0]
535
+
536
+ def get_GlueFormula_factory(self):
537
+ return GlueFormula
538
+
539
+
540
+ class Glue:
541
+ def __init__(
542
+ self, semtype_file=None, remove_duplicates=False, depparser=None, verbose=False
543
+ ):
544
+ self.verbose = verbose
545
+ self.remove_duplicates = remove_duplicates
546
+ self.depparser = depparser
547
+
548
+ from nltk import Prover9
549
+
550
+ self.prover = Prover9()
551
+
552
+ if semtype_file:
553
+ self.semtype_file = semtype_file
554
+ else:
555
+ self.semtype_file = os.path.join(
556
+ "grammars", "sample_grammars", "glue.semtype"
557
+ )
558
+
559
+ def train_depparser(self, depgraphs=None):
560
+ if depgraphs:
561
+ self.depparser.train(depgraphs)
562
+ else:
563
+ self.depparser.train_from_file(
564
+ nltk.data.find(
565
+ os.path.join("grammars", "sample_grammars", "glue_train.conll")
566
+ )
567
+ )
568
+
569
+ def parse_to_meaning(self, sentence):
570
+ readings = []
571
+ for agenda in self.parse_to_compiled(sentence):
572
+ readings.extend(self.get_readings(agenda))
573
+ return readings
574
+
575
+ def get_readings(self, agenda):
576
+ readings = []
577
+ agenda_length = len(agenda)
578
+ atomics = dict()
579
+ nonatomics = dict()
580
+ while agenda: # is not empty
581
+ cur = agenda.pop()
582
+ glue_simp = cur.glue.simplify()
583
+ if isinstance(
584
+ glue_simp, linearlogic.ImpExpression
585
+ ): # if cur.glue is non-atomic
586
+ for key in atomics:
587
+ try:
588
+ if isinstance(cur.glue, linearlogic.ApplicationExpression):
589
+ bindings = cur.glue.bindings
590
+ else:
591
+ bindings = linearlogic.BindingDict()
592
+ glue_simp.antecedent.unify(key, bindings)
593
+ for atomic in atomics[key]:
594
+ if not (
595
+ cur.indices & atomic.indices
596
+ ): # if the sets of indices are disjoint
597
+ try:
598
+ agenda.append(cur.applyto(atomic))
599
+ except linearlogic.LinearLogicApplicationException:
600
+ pass
601
+ except linearlogic.UnificationException:
602
+ pass
603
+ try:
604
+ nonatomics[glue_simp.antecedent].append(cur)
605
+ except KeyError:
606
+ nonatomics[glue_simp.antecedent] = [cur]
607
+
608
+ else: # else cur.glue is atomic
609
+ for key in nonatomics:
610
+ for nonatomic in nonatomics[key]:
611
+ try:
612
+ if isinstance(
613
+ nonatomic.glue, linearlogic.ApplicationExpression
614
+ ):
615
+ bindings = nonatomic.glue.bindings
616
+ else:
617
+ bindings = linearlogic.BindingDict()
618
+ glue_simp.unify(key, bindings)
619
+ if not (
620
+ cur.indices & nonatomic.indices
621
+ ): # if the sets of indices are disjoint
622
+ try:
623
+ agenda.append(nonatomic.applyto(cur))
624
+ except linearlogic.LinearLogicApplicationException:
625
+ pass
626
+ except linearlogic.UnificationException:
627
+ pass
628
+ try:
629
+ atomics[glue_simp].append(cur)
630
+ except KeyError:
631
+ atomics[glue_simp] = [cur]
632
+
633
+ for entry in atomics:
634
+ for gf in atomics[entry]:
635
+ if len(gf.indices) == agenda_length:
636
+ self._add_to_reading_list(gf, readings)
637
+ for entry in nonatomics:
638
+ for gf in nonatomics[entry]:
639
+ if len(gf.indices) == agenda_length:
640
+ self._add_to_reading_list(gf, readings)
641
+ return readings
642
+
643
+ def _add_to_reading_list(self, glueformula, reading_list):
644
+ add_reading = True
645
+ if self.remove_duplicates:
646
+ for reading in reading_list:
647
+ try:
648
+ if reading.equiv(glueformula.meaning, self.prover):
649
+ add_reading = False
650
+ break
651
+ except Exception as e:
652
+ # if there is an exception, the syntax of the formula
653
+ # may not be understandable by the prover, so don't
654
+ # throw out the reading.
655
+ print("Error when checking logical equality of statements", e)
656
+
657
+ if add_reading:
658
+ reading_list.append(glueformula.meaning)
659
+
660
+ def parse_to_compiled(self, sentence):
661
+ gfls = [self.depgraph_to_glue(dg) for dg in self.dep_parse(sentence)]
662
+ return [self.gfl_to_compiled(gfl) for gfl in gfls]
663
+
664
+ def dep_parse(self, sentence):
665
+ """
666
+ Return a dependency graph for the sentence.
667
+
668
+ :param sentence: the sentence to be parsed
669
+ :type sentence: list(str)
670
+ :rtype: DependencyGraph
671
+ """
672
+
673
+ # Lazy-initialize the depparser
674
+ if self.depparser is None:
675
+ from nltk.parse import MaltParser
676
+
677
+ self.depparser = MaltParser(tagger=self.get_pos_tagger())
678
+ if not self.depparser._trained:
679
+ self.train_depparser()
680
+ return self.depparser.parse(sentence, verbose=self.verbose)
681
+
682
+ def depgraph_to_glue(self, depgraph):
683
+ return self.get_glue_dict().to_glueformula_list(depgraph)
684
+
685
+ def get_glue_dict(self):
686
+ return GlueDict(self.semtype_file)
687
+
688
+ def gfl_to_compiled(self, gfl):
689
+ index_counter = Counter()
690
+ return_list = []
691
+ for gf in gfl:
692
+ return_list.extend(gf.compile(index_counter))
693
+
694
+ if self.verbose:
695
+ print("Compiled Glue Premises:")
696
+ for cgf in return_list:
697
+ print(cgf)
698
+
699
+ return return_list
700
+
701
+ def get_pos_tagger(self):
702
+ from nltk.corpus import brown
703
+
704
+ regexp_tagger = RegexpTagger(
705
+ [
706
+ (r"^-?[0-9]+(\.[0-9]+)?$", "CD"), # cardinal numbers
707
+ (r"(The|the|A|a|An|an)$", "AT"), # articles
708
+ (r".*able$", "JJ"), # adjectives
709
+ (r".*ness$", "NN"), # nouns formed from adjectives
710
+ (r".*ly$", "RB"), # adverbs
711
+ (r".*s$", "NNS"), # plural nouns
712
+ (r".*ing$", "VBG"), # gerunds
713
+ (r".*ed$", "VBD"), # past tense verbs
714
+ (r".*", "NN"), # nouns (default)
715
+ ]
716
+ )
717
+ brown_train = brown.tagged_sents(categories="news")
718
+ unigram_tagger = UnigramTagger(brown_train, backoff=regexp_tagger)
719
+ bigram_tagger = BigramTagger(brown_train, backoff=unigram_tagger)
720
+ trigram_tagger = TrigramTagger(brown_train, backoff=bigram_tagger)
721
+
722
+ # Override particular words
723
+ main_tagger = RegexpTagger(
724
+ [(r"(A|a|An|an)$", "ex_quant"), (r"(Every|every|All|all)$", "univ_quant")],
725
+ backoff=trigram_tagger,
726
+ )
727
+
728
+ return main_tagger
729
+
730
+
731
+ class DrtGlueFormula(GlueFormula):
732
+ def __init__(self, meaning, glue, indices=None):
733
+ if not indices:
734
+ indices = set()
735
+
736
+ if isinstance(meaning, str):
737
+ self.meaning = drt.DrtExpression.fromstring(meaning)
738
+ elif isinstance(meaning, drt.DrtExpression):
739
+ self.meaning = meaning
740
+ else:
741
+ raise RuntimeError(
742
+ "Meaning term neither string or expression: %s, %s"
743
+ % (meaning, meaning.__class__)
744
+ )
745
+
746
+ if isinstance(glue, str):
747
+ self.glue = linearlogic.LinearLogicParser().parse(glue)
748
+ elif isinstance(glue, linearlogic.Expression):
749
+ self.glue = glue
750
+ else:
751
+ raise RuntimeError(
752
+ "Glue term neither string or expression: %s, %s"
753
+ % (glue, glue.__class__)
754
+ )
755
+
756
+ self.indices = indices
757
+
758
+ def make_VariableExpression(self, name):
759
+ return drt.DrtVariableExpression(name)
760
+
761
+ def make_LambdaExpression(self, variable, term):
762
+ return drt.DrtLambdaExpression(variable, term)
763
+
764
+
765
+ class DrtGlueDict(GlueDict):
766
+ def get_GlueFormula_factory(self):
767
+ return DrtGlueFormula
768
+
769
+
770
+ class DrtGlue(Glue):
771
+ def __init__(
772
+ self, semtype_file=None, remove_duplicates=False, depparser=None, verbose=False
773
+ ):
774
+ if not semtype_file:
775
+ semtype_file = os.path.join(
776
+ "grammars", "sample_grammars", "drt_glue.semtype"
777
+ )
778
+ Glue.__init__(self, semtype_file, remove_duplicates, depparser, verbose)
779
+
780
+ def get_glue_dict(self):
781
+ return DrtGlueDict(self.semtype_file)
782
+
783
+
784
+ def demo(show_example=-1):
785
+ from nltk.parse import MaltParser
786
+
787
+ examples = [
788
+ "David sees Mary",
789
+ "David eats a sandwich",
790
+ "every man chases a dog",
791
+ "every man believes a dog sleeps",
792
+ "John gives David a sandwich",
793
+ "John chases himself",
794
+ ]
795
+ # 'John persuades David to order a pizza',
796
+ # 'John tries to go',
797
+ # 'John tries to find a unicorn',
798
+ # 'John seems to vanish',
799
+ # 'a unicorn seems to approach',
800
+ # 'every big cat leaves',
801
+ # 'every gray cat leaves',
802
+ # 'every big gray cat leaves',
803
+ # 'a former senator leaves',
804
+
805
+ print("============== DEMO ==============")
806
+
807
+ tagger = RegexpTagger(
808
+ [
809
+ ("^(David|Mary|John)$", "NNP"),
810
+ (
811
+ "^(sees|eats|chases|believes|gives|sleeps|chases|persuades|tries|seems|leaves)$",
812
+ "VB",
813
+ ),
814
+ ("^(go|order|vanish|find|approach)$", "VB"),
815
+ ("^(a)$", "ex_quant"),
816
+ ("^(every)$", "univ_quant"),
817
+ ("^(sandwich|man|dog|pizza|unicorn|cat|senator)$", "NN"),
818
+ ("^(big|gray|former)$", "JJ"),
819
+ ("^(him|himself)$", "PRP"),
820
+ ]
821
+ )
822
+
823
+ depparser = MaltParser(tagger=tagger)
824
+ glue = Glue(depparser=depparser, verbose=False)
825
+
826
+ for (i, sentence) in enumerate(examples):
827
+ if i == show_example or show_example == -1:
828
+ print(f"[[[Example {i}]]] {sentence}")
829
+ for reading in glue.parse_to_meaning(sentence.split()):
830
+ print(reading.simplify())
831
+ print("")
832
+
833
+
834
+ if __name__ == "__main__":
835
+ demo()
llmeval-env/lib/python3.10/site-packages/nltk/sem/hole.py ADDED
@@ -0,0 +1,395 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Logic
2
+ #
3
+ # Author: Peter Wang
4
+ # Updated by: Dan Garrette <[email protected]>
5
+ #
6
+ # Copyright (C) 2001-2023 NLTK Project
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+ """
11
+ An implementation of the Hole Semantics model, following Blackburn and Bos,
12
+ Representation and Inference for Natural Language (CSLI, 2005).
13
+
14
+ The semantic representations are built by the grammar hole.fcfg.
15
+ This module contains driver code to read in sentences and parse them
16
+ according to a hole semantics grammar.
17
+
18
+ After parsing, the semantic representation is in the form of an underspecified
19
+ representation that is not easy to read. We use a "plugging" algorithm to
20
+ convert that representation into first-order logic formulas.
21
+ """
22
+
23
+ from functools import reduce
24
+
25
+ from nltk.parse import load_parser
26
+ from nltk.sem.logic import (
27
+ AllExpression,
28
+ AndExpression,
29
+ ApplicationExpression,
30
+ ExistsExpression,
31
+ IffExpression,
32
+ ImpExpression,
33
+ LambdaExpression,
34
+ NegatedExpression,
35
+ OrExpression,
36
+ )
37
+ from nltk.sem.skolemize import skolemize
38
+
39
+ # Note that in this code there may be multiple types of trees being referred to:
40
+ #
41
+ # 1. parse trees
42
+ # 2. the underspecified representation
43
+ # 3. first-order logic formula trees
44
+ # 4. the search space when plugging (search tree)
45
+ #
46
+
47
+
48
+ class Constants:
49
+ ALL = "ALL"
50
+ EXISTS = "EXISTS"
51
+ NOT = "NOT"
52
+ AND = "AND"
53
+ OR = "OR"
54
+ IMP = "IMP"
55
+ IFF = "IFF"
56
+ PRED = "PRED"
57
+ LEQ = "LEQ"
58
+ HOLE = "HOLE"
59
+ LABEL = "LABEL"
60
+
61
+ MAP = {
62
+ ALL: lambda v, e: AllExpression(v.variable, e),
63
+ EXISTS: lambda v, e: ExistsExpression(v.variable, e),
64
+ NOT: NegatedExpression,
65
+ AND: AndExpression,
66
+ OR: OrExpression,
67
+ IMP: ImpExpression,
68
+ IFF: IffExpression,
69
+ PRED: ApplicationExpression,
70
+ }
71
+
72
+
73
+ class HoleSemantics:
74
+ """
75
+ This class holds the broken-down components of a hole semantics, i.e. it
76
+ extracts the holes, labels, logic formula fragments and constraints out of
77
+ a big conjunction of such as produced by the hole semantics grammar. It
78
+ then provides some operations on the semantics dealing with holes, labels
79
+ and finding legal ways to plug holes with labels.
80
+ """
81
+
82
+ def __init__(self, usr):
83
+ """
84
+ Constructor. `usr' is a ``sem.Expression`` representing an
85
+ Underspecified Representation Structure (USR). A USR has the following
86
+ special predicates:
87
+ ALL(l,v,n),
88
+ EXISTS(l,v,n),
89
+ AND(l,n,n),
90
+ OR(l,n,n),
91
+ IMP(l,n,n),
92
+ IFF(l,n,n),
93
+ PRED(l,v,n,v[,v]*) where the brackets and star indicate zero or more repetitions,
94
+ LEQ(n,n),
95
+ HOLE(n),
96
+ LABEL(n)
97
+ where l is the label of the node described by the predicate, n is either
98
+ a label or a hole, and v is a variable.
99
+ """
100
+ self.holes = set()
101
+ self.labels = set()
102
+ self.fragments = {} # mapping of label -> formula fragment
103
+ self.constraints = set() # set of Constraints
104
+ self._break_down(usr)
105
+ self.top_most_labels = self._find_top_most_labels()
106
+ self.top_hole = self._find_top_hole()
107
+
108
+ def is_node(self, x):
109
+ """
110
+ Return true if x is a node (label or hole) in this semantic
111
+ representation.
112
+ """
113
+ return x in (self.labels | self.holes)
114
+
115
+ def _break_down(self, usr):
116
+ """
117
+ Extract holes, labels, formula fragments and constraints from the hole
118
+ semantics underspecified representation (USR).
119
+ """
120
+ if isinstance(usr, AndExpression):
121
+ self._break_down(usr.first)
122
+ self._break_down(usr.second)
123
+ elif isinstance(usr, ApplicationExpression):
124
+ func, args = usr.uncurry()
125
+ if func.variable.name == Constants.LEQ:
126
+ self.constraints.add(Constraint(args[0], args[1]))
127
+ elif func.variable.name == Constants.HOLE:
128
+ self.holes.add(args[0])
129
+ elif func.variable.name == Constants.LABEL:
130
+ self.labels.add(args[0])
131
+ else:
132
+ label = args[0]
133
+ assert label not in self.fragments
134
+ self.fragments[label] = (func, args[1:])
135
+ else:
136
+ raise ValueError(usr.label())
137
+
138
+ def _find_top_nodes(self, node_list):
139
+ top_nodes = node_list.copy()
140
+ for f in self.fragments.values():
141
+ # the label is the first argument of the predicate
142
+ args = f[1]
143
+ for arg in args:
144
+ if arg in node_list:
145
+ top_nodes.discard(arg)
146
+ return top_nodes
147
+
148
+ def _find_top_most_labels(self):
149
+ """
150
+ Return the set of labels which are not referenced directly as part of
151
+ another formula fragment. These will be the top-most labels for the
152
+ subtree that they are part of.
153
+ """
154
+ return self._find_top_nodes(self.labels)
155
+
156
+ def _find_top_hole(self):
157
+ """
158
+ Return the hole that will be the top of the formula tree.
159
+ """
160
+ top_holes = self._find_top_nodes(self.holes)
161
+ assert len(top_holes) == 1 # it must be unique
162
+ return top_holes.pop()
163
+
164
+ def pluggings(self):
165
+ """
166
+ Calculate and return all the legal pluggings (mappings of labels to
167
+ holes) of this semantics given the constraints.
168
+ """
169
+ record = []
170
+ self._plug_nodes([(self.top_hole, [])], self.top_most_labels, {}, record)
171
+ return record
172
+
173
+ def _plug_nodes(self, queue, potential_labels, plug_acc, record):
174
+ """
175
+ Plug the nodes in `queue' with the labels in `potential_labels'.
176
+
177
+ Each element of `queue' is a tuple of the node to plug and the list of
178
+ ancestor holes from the root of the graph to that node.
179
+
180
+ `potential_labels' is a set of the labels which are still available for
181
+ plugging.
182
+
183
+ `plug_acc' is the incomplete mapping of holes to labels made on the
184
+ current branch of the search tree so far.
185
+
186
+ `record' is a list of all the complete pluggings that we have found in
187
+ total so far. It is the only parameter that is destructively updated.
188
+ """
189
+ if queue != []:
190
+ (node, ancestors) = queue[0]
191
+ if node in self.holes:
192
+ # The node is a hole, try to plug it.
193
+ self._plug_hole(
194
+ node, ancestors, queue[1:], potential_labels, plug_acc, record
195
+ )
196
+ else:
197
+ assert node in self.labels
198
+ # The node is a label. Replace it in the queue by the holes and
199
+ # labels in the formula fragment named by that label.
200
+ args = self.fragments[node][1]
201
+ head = [(a, ancestors) for a in args if self.is_node(a)]
202
+ self._plug_nodes(head + queue[1:], potential_labels, plug_acc, record)
203
+ else:
204
+ raise Exception("queue empty")
205
+
206
+ def _plug_hole(self, hole, ancestors0, queue, potential_labels0, plug_acc0, record):
207
+ """
208
+ Try all possible ways of plugging a single hole.
209
+ See _plug_nodes for the meanings of the parameters.
210
+ """
211
+ # Add the current hole we're trying to plug into the list of ancestors.
212
+ assert hole not in ancestors0
213
+ ancestors = [hole] + ancestors0
214
+
215
+ # Try each potential label in this hole in turn.
216
+ for l in potential_labels0:
217
+ # Is the label valid in this hole?
218
+ if self._violates_constraints(l, ancestors):
219
+ continue
220
+
221
+ plug_acc = plug_acc0.copy()
222
+ plug_acc[hole] = l
223
+ potential_labels = potential_labels0.copy()
224
+ potential_labels.remove(l)
225
+
226
+ if len(potential_labels) == 0:
227
+ # No more potential labels. That must mean all the holes have
228
+ # been filled so we have found a legal plugging so remember it.
229
+ #
230
+ # Note that the queue might not be empty because there might
231
+ # be labels on there that point to formula fragments with
232
+ # no holes in them. _sanity_check_plugging will make sure
233
+ # all holes are filled.
234
+ self._sanity_check_plugging(plug_acc, self.top_hole, [])
235
+ record.append(plug_acc)
236
+ else:
237
+ # Recursively try to fill in the rest of the holes in the
238
+ # queue. The label we just plugged into the hole could have
239
+ # holes of its own so at the end of the queue. Putting it on
240
+ # the end of the queue gives us a breadth-first search, so that
241
+ # all the holes at level i of the formula tree are filled
242
+ # before filling level i+1.
243
+ # A depth-first search would work as well since the trees must
244
+ # be finite but the bookkeeping would be harder.
245
+ self._plug_nodes(
246
+ queue + [(l, ancestors)], potential_labels, plug_acc, record
247
+ )
248
+
249
+ def _violates_constraints(self, label, ancestors):
250
+ """
251
+ Return True if the `label' cannot be placed underneath the holes given
252
+ by the set `ancestors' because it would violate the constraints imposed
253
+ on it.
254
+ """
255
+ for c in self.constraints:
256
+ if c.lhs == label:
257
+ if c.rhs not in ancestors:
258
+ return True
259
+ return False
260
+
261
+ def _sanity_check_plugging(self, plugging, node, ancestors):
262
+ """
263
+ Make sure that a given plugging is legal. We recursively go through
264
+ each node and make sure that no constraints are violated.
265
+ We also check that all holes have been filled.
266
+ """
267
+ if node in self.holes:
268
+ ancestors = [node] + ancestors
269
+ label = plugging[node]
270
+ else:
271
+ label = node
272
+ assert label in self.labels
273
+ for c in self.constraints:
274
+ if c.lhs == label:
275
+ assert c.rhs in ancestors
276
+ args = self.fragments[label][1]
277
+ for arg in args:
278
+ if self.is_node(arg):
279
+ self._sanity_check_plugging(plugging, arg, [label] + ancestors)
280
+
281
+ def formula_tree(self, plugging):
282
+ """
283
+ Return the first-order logic formula tree for this underspecified
284
+ representation using the plugging given.
285
+ """
286
+ return self._formula_tree(plugging, self.top_hole)
287
+
288
+ def _formula_tree(self, plugging, node):
289
+ if node in plugging:
290
+ return self._formula_tree(plugging, plugging[node])
291
+ elif node in self.fragments:
292
+ pred, args = self.fragments[node]
293
+ children = [self._formula_tree(plugging, arg) for arg in args]
294
+ return reduce(Constants.MAP[pred.variable.name], children)
295
+ else:
296
+ return node
297
+
298
+
299
+ class Constraint:
300
+ """
301
+ This class represents a constraint of the form (L =< N),
302
+ where L is a label and N is a node (a label or a hole).
303
+ """
304
+
305
+ def __init__(self, lhs, rhs):
306
+ self.lhs = lhs
307
+ self.rhs = rhs
308
+
309
+ def __eq__(self, other):
310
+ if self.__class__ == other.__class__:
311
+ return self.lhs == other.lhs and self.rhs == other.rhs
312
+ else:
313
+ return False
314
+
315
+ def __ne__(self, other):
316
+ return not (self == other)
317
+
318
+ def __hash__(self):
319
+ return hash(repr(self))
320
+
321
+ def __repr__(self):
322
+ return f"({self.lhs} < {self.rhs})"
323
+
324
+
325
+ def hole_readings(sentence, grammar_filename=None, verbose=False):
326
+ if not grammar_filename:
327
+ grammar_filename = "grammars/sample_grammars/hole.fcfg"
328
+
329
+ if verbose:
330
+ print("Reading grammar file", grammar_filename)
331
+
332
+ parser = load_parser(grammar_filename)
333
+
334
+ # Parse the sentence.
335
+ tokens = sentence.split()
336
+ trees = list(parser.parse(tokens))
337
+ if verbose:
338
+ print("Got %d different parses" % len(trees))
339
+
340
+ all_readings = []
341
+ for tree in trees:
342
+ # Get the semantic feature from the top of the parse tree.
343
+ sem = tree.label()["SEM"].simplify()
344
+
345
+ # Print the raw semantic representation.
346
+ if verbose:
347
+ print("Raw: ", sem)
348
+
349
+ # Skolemize away all quantifiers. All variables become unique.
350
+ while isinstance(sem, LambdaExpression):
351
+ sem = sem.term
352
+ skolemized = skolemize(sem)
353
+
354
+ if verbose:
355
+ print("Skolemized:", skolemized)
356
+
357
+ # Break the hole semantics representation down into its components
358
+ # i.e. holes, labels, formula fragments and constraints.
359
+ hole_sem = HoleSemantics(skolemized)
360
+
361
+ # Maybe show the details of the semantic representation.
362
+ if verbose:
363
+ print("Holes: ", hole_sem.holes)
364
+ print("Labels: ", hole_sem.labels)
365
+ print("Constraints: ", hole_sem.constraints)
366
+ print("Top hole: ", hole_sem.top_hole)
367
+ print("Top labels: ", hole_sem.top_most_labels)
368
+ print("Fragments:")
369
+ for l, f in hole_sem.fragments.items():
370
+ print(f"\t{l}: {f}")
371
+
372
+ # Find all the possible ways to plug the formulas together.
373
+ pluggings = hole_sem.pluggings()
374
+
375
+ # Build FOL formula trees using the pluggings.
376
+ readings = list(map(hole_sem.formula_tree, pluggings))
377
+
378
+ # Print out the formulas in a textual format.
379
+ if verbose:
380
+ for i, r in enumerate(readings):
381
+ print()
382
+ print("%d. %s" % (i, r))
383
+ print()
384
+
385
+ all_readings.extend(readings)
386
+
387
+ return all_readings
388
+
389
+
390
+ if __name__ == "__main__":
391
+ for r in hole_readings("a dog barks"):
392
+ print(r)
393
+ print()
394
+ for r in hole_readings("every girl chases a dog"):
395
+ print(r)
llmeval-env/lib/python3.10/site-packages/nltk/sem/lfg.py ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Lexical Functional Grammar
2
+ #
3
+ # Author: Dan Garrette <[email protected]>
4
+ #
5
+ # Copyright (C) 2001-2023 NLTK Project
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ from itertools import chain
10
+
11
+ from nltk.internals import Counter
12
+
13
+
14
+ class FStructure(dict):
15
+ def safeappend(self, key, item):
16
+ """
17
+ Append 'item' to the list at 'key'. If no list exists for 'key', then
18
+ construct one.
19
+ """
20
+ if key not in self:
21
+ self[key] = []
22
+ self[key].append(item)
23
+
24
+ def __setitem__(self, key, value):
25
+ dict.__setitem__(self, key.lower(), value)
26
+
27
+ def __getitem__(self, key):
28
+ return dict.__getitem__(self, key.lower())
29
+
30
+ def __contains__(self, key):
31
+ return dict.__contains__(self, key.lower())
32
+
33
+ def to_glueformula_list(self, glue_dict):
34
+ depgraph = self.to_depgraph()
35
+ return glue_dict.to_glueformula_list(depgraph)
36
+
37
+ def to_depgraph(self, rel=None):
38
+ from nltk.parse.dependencygraph import DependencyGraph
39
+
40
+ depgraph = DependencyGraph()
41
+ nodes = depgraph.nodes
42
+
43
+ self._to_depgraph(nodes, 0, "ROOT")
44
+
45
+ # Add all the dependencies for all the nodes
46
+ for address, node in nodes.items():
47
+ for n2 in (n for n in nodes.values() if n["rel"] != "TOP"):
48
+ if n2["head"] == address:
49
+ relation = n2["rel"]
50
+ node["deps"].setdefault(relation, [])
51
+ node["deps"][relation].append(n2["address"])
52
+
53
+ depgraph.root = nodes[1]
54
+
55
+ return depgraph
56
+
57
+ def _to_depgraph(self, nodes, head, rel):
58
+ index = len(nodes)
59
+
60
+ nodes[index].update(
61
+ {
62
+ "address": index,
63
+ "word": self.pred[0],
64
+ "tag": self.pred[1],
65
+ "head": head,
66
+ "rel": rel,
67
+ }
68
+ )
69
+
70
+ for feature in sorted(self):
71
+ for item in sorted(self[feature]):
72
+ if isinstance(item, FStructure):
73
+ item._to_depgraph(nodes, index, feature)
74
+ elif isinstance(item, tuple):
75
+ new_index = len(nodes)
76
+ nodes[new_index].update(
77
+ {
78
+ "address": new_index,
79
+ "word": item[0],
80
+ "tag": item[1],
81
+ "head": index,
82
+ "rel": feature,
83
+ }
84
+ )
85
+ elif isinstance(item, list):
86
+ for n in item:
87
+ n._to_depgraph(nodes, index, feature)
88
+ else:
89
+ raise Exception(
90
+ "feature %s is not an FStruct, a list, or a tuple" % feature
91
+ )
92
+
93
+ @staticmethod
94
+ def read_depgraph(depgraph):
95
+ return FStructure._read_depgraph(depgraph.root, depgraph)
96
+
97
+ @staticmethod
98
+ def _read_depgraph(node, depgraph, label_counter=None, parent=None):
99
+ if not label_counter:
100
+ label_counter = Counter()
101
+
102
+ if node["rel"].lower() in ["spec", "punct"]:
103
+ # the value of a 'spec' entry is a word, not an FStructure
104
+ return (node["word"], node["tag"])
105
+
106
+ else:
107
+ fstruct = FStructure()
108
+ fstruct.pred = None
109
+ fstruct.label = FStructure._make_label(label_counter.get())
110
+
111
+ fstruct.parent = parent
112
+
113
+ word, tag = node["word"], node["tag"]
114
+ if tag[:2] == "VB":
115
+ if tag[2:3] == "D":
116
+ fstruct.safeappend("tense", ("PAST", "tense"))
117
+ fstruct.pred = (word, tag[:2])
118
+
119
+ if not fstruct.pred:
120
+ fstruct.pred = (word, tag)
121
+
122
+ children = [
123
+ depgraph.nodes[idx]
124
+ for idx in chain.from_iterable(node["deps"].values())
125
+ ]
126
+ for child in children:
127
+ fstruct.safeappend(
128
+ child["rel"],
129
+ FStructure._read_depgraph(child, depgraph, label_counter, fstruct),
130
+ )
131
+
132
+ return fstruct
133
+
134
+ @staticmethod
135
+ def _make_label(value):
136
+ """
137
+ Pick an alphabetic character as identifier for an entity in the model.
138
+
139
+ :param value: where to index into the list of characters
140
+ :type value: int
141
+ """
142
+ letter = [
143
+ "f",
144
+ "g",
145
+ "h",
146
+ "i",
147
+ "j",
148
+ "k",
149
+ "l",
150
+ "m",
151
+ "n",
152
+ "o",
153
+ "p",
154
+ "q",
155
+ "r",
156
+ "s",
157
+ "t",
158
+ "u",
159
+ "v",
160
+ "w",
161
+ "x",
162
+ "y",
163
+ "z",
164
+ "a",
165
+ "b",
166
+ "c",
167
+ "d",
168
+ "e",
169
+ ][value - 1]
170
+ num = int(value) // 26
171
+ if num > 0:
172
+ return letter + str(num)
173
+ else:
174
+ return letter
175
+
176
+ def __repr__(self):
177
+ return self.__str__().replace("\n", "")
178
+
179
+ def __str__(self):
180
+ return self.pretty_format()
181
+
182
+ def pretty_format(self, indent=3):
183
+ try:
184
+ accum = "%s:[" % self.label
185
+ except NameError:
186
+ accum = "["
187
+ try:
188
+ accum += "pred '%s'" % (self.pred[0])
189
+ except NameError:
190
+ pass
191
+
192
+ for feature in sorted(self):
193
+ for item in self[feature]:
194
+ if isinstance(item, FStructure):
195
+ next_indent = indent + len(feature) + 3 + len(self.label)
196
+ accum += "\n{}{} {}".format(
197
+ " " * (indent),
198
+ feature,
199
+ item.pretty_format(next_indent),
200
+ )
201
+ elif isinstance(item, tuple):
202
+ accum += "\n{}{} '{}'".format(" " * (indent), feature, item[0])
203
+ elif isinstance(item, list):
204
+ accum += "\n{}{} {{{}}}".format(
205
+ " " * (indent),
206
+ feature,
207
+ ("\n%s" % (" " * (indent + len(feature) + 2))).join(item),
208
+ )
209
+ else: # ERROR
210
+ raise Exception(
211
+ "feature %s is not an FStruct, a list, or a tuple" % feature
212
+ )
213
+ return accum + "]"
214
+
215
+
216
+ def demo_read_depgraph():
217
+ from nltk.parse.dependencygraph import DependencyGraph
218
+
219
+ dg1 = DependencyGraph(
220
+ """\
221
+ Esso NNP 2 SUB
222
+ said VBD 0 ROOT
223
+ the DT 5 NMOD
224
+ Whiting NNP 5 NMOD
225
+ field NN 6 SUB
226
+ started VBD 2 VMOD
227
+ production NN 6 OBJ
228
+ Tuesday NNP 6 VMOD
229
+ """
230
+ )
231
+ dg2 = DependencyGraph(
232
+ """\
233
+ John NNP 2 SUB
234
+ sees VBP 0 ROOT
235
+ Mary NNP 2 OBJ
236
+ """
237
+ )
238
+ dg3 = DependencyGraph(
239
+ """\
240
+ a DT 2 SPEC
241
+ man NN 3 SUBJ
242
+ walks VB 0 ROOT
243
+ """
244
+ )
245
+ dg4 = DependencyGraph(
246
+ """\
247
+ every DT 2 SPEC
248
+ girl NN 3 SUBJ
249
+ chases VB 0 ROOT
250
+ a DT 5 SPEC
251
+ dog NN 3 OBJ
252
+ """
253
+ )
254
+
255
+ depgraphs = [dg1, dg2, dg3, dg4]
256
+ for dg in depgraphs:
257
+ print(FStructure.read_depgraph(dg))
258
+
259
+
260
+ if __name__ == "__main__":
261
+ demo_read_depgraph()