diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/chat/__init__.py b/llmeval-env/lib/python3.10/site-packages/nltk/chat/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..462f0b517068657d149662cf990414f203491caf --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/chat/__init__.py @@ -0,0 +1,48 @@ +# Natural Language Toolkit: Chatbots +# +# Copyright (C) 2001-2023 NLTK Project +# Authors: Steven Bird +# URL: +# For license information, see LICENSE.TXT + +# Based on an Eliza implementation by Joe Strout , +# Jeff Epler and Jez Higgins . + +""" +A class for simple chatbots. These perform simple pattern matching on sentences +typed by users, and respond with automatically generated sentences. + +These chatbots may not work using the windows command line or the +windows IDLE GUI. +""" + +from nltk.chat.eliza import eliza_chat +from nltk.chat.iesha import iesha_chat +from nltk.chat.rude import rude_chat +from nltk.chat.suntsu import suntsu_chat +from nltk.chat.util import Chat +from nltk.chat.zen import zen_chat + +bots = [ + (eliza_chat, "Eliza (psycho-babble)"), + (iesha_chat, "Iesha (teen anime junky)"), + (rude_chat, "Rude (abusive bot)"), + (suntsu_chat, "Suntsu (Chinese sayings)"), + (zen_chat, "Zen (gems of wisdom)"), +] + + +def chatbots(): + print("Which chatbot would you like to talk to?") + botcount = len(bots) + for i in range(botcount): + print(" %d: %s" % (i + 1, bots[i][1])) + while True: + choice = input(f"\nEnter a number in the range 1-{botcount}: ").strip() + if choice.isdigit() and (int(choice) - 1) in range(botcount): + break + else: + print(" Error: bad chatbot number") + + chatbot = bots[int(choice) - 1][0] + chatbot() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/chat/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/chat/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ae72a4687317d34b8ff73e732d281511b01232a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/chat/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/chat/__pycache__/eliza.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/chat/__pycache__/eliza.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..47af3fb35e2beb4cb467bb26939bd2bd9e053cd7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/chat/__pycache__/eliza.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/chat/__pycache__/iesha.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/chat/__pycache__/iesha.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5bada9724449fc064064ddd9a7a8e84e731b0f90 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/chat/__pycache__/iesha.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/chat/__pycache__/rude.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/chat/__pycache__/rude.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c69857ebe53f74922e6b4696e0c4e4741e293219 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/chat/__pycache__/rude.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/chat/__pycache__/suntsu.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/chat/__pycache__/suntsu.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df4225e350a916e8c85339379c809655753d0dbd Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/chat/__pycache__/suntsu.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/chat/__pycache__/util.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/chat/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a08d555eea43bca3b8240677d1d0f1f2b3826f5b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/chat/__pycache__/util.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/chat/__pycache__/zen.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/chat/__pycache__/zen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..876e58b4e69b28c6e7fbade352a33b94c4ac5b76 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/chat/__pycache__/zen.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/chat/eliza.py b/llmeval-env/lib/python3.10/site-packages/nltk/chat/eliza.py new file mode 100644 index 0000000000000000000000000000000000000000..5dfb4a4be2caa084c89a169f4861bd7a4b3eacf3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/chat/eliza.py @@ -0,0 +1,337 @@ +# Natural Language Toolkit: Eliza +# +# Copyright (C) 2001-2023 NLTK Project +# Authors: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT + +# Based on an Eliza implementation by Joe Strout , +# Jeff Epler and Jez Higgins . + +# a translation table used to convert things you say into things the +# computer says back, e.g. "I am" --> "you are" + +from nltk.chat.util import Chat, reflections + +# a table of response pairs, where each pair consists of a +# regular expression, and a list of possible responses, +# with group-macros labelled as %1, %2. + +pairs = ( + ( + r"I need (.*)", + ( + "Why do you need %1?", + "Would it really help you to get %1?", + "Are you sure you need %1?", + ), + ), + ( + r"Why don\'t you (.*)", + ( + "Do you really think I don't %1?", + "Perhaps eventually I will %1.", + "Do you really want me to %1?", + ), + ), + ( + r"Why can\'t I (.*)", + ( + "Do you think you should be able to %1?", + "If you could %1, what would you do?", + "I don't know -- why can't you %1?", + "Have you really tried?", + ), + ), + ( + r"I can\'t (.*)", + ( + "How do you know you can't %1?", + "Perhaps you could %1 if you tried.", + "What would it take for you to %1?", + ), + ), + ( + r"I am (.*)", + ( + "Did you come to me because you are %1?", + "How long have you been %1?", + "How do you feel about being %1?", + ), + ), + ( + r"I\'m (.*)", + ( + "How does being %1 make you feel?", + "Do you enjoy being %1?", + "Why do you tell me you're %1?", + "Why do you think you're %1?", + ), + ), + ( + r"Are you (.*)", + ( + "Why does it matter whether I am %1?", + "Would you prefer it if I were not %1?", + "Perhaps you believe I am %1.", + "I may be %1 -- what do you think?", + ), + ), + ( + r"What (.*)", + ( + "Why do you ask?", + "How would an answer to that help you?", + "What do you think?", + ), + ), + ( + r"How (.*)", + ( + "How do you suppose?", + "Perhaps you can answer your own question.", + "What is it you're really asking?", + ), + ), + ( + r"Because (.*)", + ( + "Is that the real reason?", + "What other reasons come to mind?", + "Does that reason apply to anything else?", + "If %1, what else must be true?", + ), + ), + ( + r"(.*) sorry (.*)", + ( + "There are many times when no apology is needed.", + "What feelings do you have when you apologize?", + ), + ), + ( + r"Hello(.*)", + ( + "Hello... I'm glad you could drop by today.", + "Hi there... how are you today?", + "Hello, how are you feeling today?", + ), + ), + ( + r"I think (.*)", + ("Do you doubt %1?", "Do you really think so?", "But you're not sure %1?"), + ), + ( + r"(.*) friend (.*)", + ( + "Tell me more about your friends.", + "When you think of a friend, what comes to mind?", + "Why don't you tell me about a childhood friend?", + ), + ), + (r"Yes", ("You seem quite sure.", "OK, but can you elaborate a bit?")), + ( + r"(.*) computer(.*)", + ( + "Are you really talking about me?", + "Does it seem strange to talk to a computer?", + "How do computers make you feel?", + "Do you feel threatened by computers?", + ), + ), + ( + r"Is it (.*)", + ( + "Do you think it is %1?", + "Perhaps it's %1 -- what do you think?", + "If it were %1, what would you do?", + "It could well be that %1.", + ), + ), + ( + r"It is (.*)", + ( + "You seem very certain.", + "If I told you that it probably isn't %1, what would you feel?", + ), + ), + ( + r"Can you (.*)", + ( + "What makes you think I can't %1?", + "If I could %1, then what?", + "Why do you ask if I can %1?", + ), + ), + ( + r"Can I (.*)", + ( + "Perhaps you don't want to %1.", + "Do you want to be able to %1?", + "If you could %1, would you?", + ), + ), + ( + r"You are (.*)", + ( + "Why do you think I am %1?", + "Does it please you to think that I'm %1?", + "Perhaps you would like me to be %1.", + "Perhaps you're really talking about yourself?", + ), + ), + ( + r"You\'re (.*)", + ( + "Why do you say I am %1?", + "Why do you think I am %1?", + "Are we talking about you, or me?", + ), + ), + ( + r"I don\'t (.*)", + ("Don't you really %1?", "Why don't you %1?", "Do you want to %1?"), + ), + ( + r"I feel (.*)", + ( + "Good, tell me more about these feelings.", + "Do you often feel %1?", + "When do you usually feel %1?", + "When you feel %1, what do you do?", + ), + ), + ( + r"I have (.*)", + ( + "Why do you tell me that you've %1?", + "Have you really %1?", + "Now that you have %1, what will you do next?", + ), + ), + ( + r"I would (.*)", + ( + "Could you explain why you would %1?", + "Why would you %1?", + "Who else knows that you would %1?", + ), + ), + ( + r"Is there (.*)", + ( + "Do you think there is %1?", + "It's likely that there is %1.", + "Would you like there to be %1?", + ), + ), + ( + r"My (.*)", + ( + "I see, your %1.", + "Why do you say that your %1?", + "When your %1, how do you feel?", + ), + ), + ( + r"You (.*)", + ( + "We should be discussing you, not me.", + "Why do you say that about me?", + "Why do you care whether I %1?", + ), + ), + (r"Why (.*)", ("Why don't you tell me the reason why %1?", "Why do you think %1?")), + ( + r"I want (.*)", + ( + "What would it mean to you if you got %1?", + "Why do you want %1?", + "What would you do if you got %1?", + "If you got %1, then what would you do?", + ), + ), + ( + r"(.*) mother(.*)", + ( + "Tell me more about your mother.", + "What was your relationship with your mother like?", + "How do you feel about your mother?", + "How does this relate to your feelings today?", + "Good family relations are important.", + ), + ), + ( + r"(.*) father(.*)", + ( + "Tell me more about your father.", + "How did your father make you feel?", + "How do you feel about your father?", + "Does your relationship with your father relate to your feelings today?", + "Do you have trouble showing affection with your family?", + ), + ), + ( + r"(.*) child(.*)", + ( + "Did you have close friends as a child?", + "What is your favorite childhood memory?", + "Do you remember any dreams or nightmares from childhood?", + "Did the other children sometimes tease you?", + "How do you think your childhood experiences relate to your feelings today?", + ), + ), + ( + r"(.*)\?", + ( + "Why do you ask that?", + "Please consider whether you can answer your own question.", + "Perhaps the answer lies within yourself?", + "Why don't you tell me?", + ), + ), + ( + r"quit", + ( + "Thank you for talking with me.", + "Good-bye.", + "Thank you, that will be $150. Have a good day!", + ), + ), + ( + r"(.*)", + ( + "Please tell me more.", + "Let's change focus a bit... Tell me about your family.", + "Can you elaborate on that?", + "Why do you say that %1?", + "I see.", + "Very interesting.", + "%1.", + "I see. And what does that tell you?", + "How does that make you feel?", + "How do you feel when you say that?", + ), + ), +) + +eliza_chatbot = Chat(pairs, reflections) + + +def eliza_chat(): + print("Therapist\n---------") + print("Talk to the program by typing in plain English, using normal upper-") + print('and lower-case letters and punctuation. Enter "quit" when done.') + print("=" * 72) + print("Hello. How are you feeling today?") + + eliza_chatbot.converse() + + +def demo(): + eliza_chat() + + +if __name__ == "__main__": + demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/chat/iesha.py b/llmeval-env/lib/python3.10/site-packages/nltk/chat/iesha.py new file mode 100644 index 0000000000000000000000000000000000000000..552870caa30927f30b96c5dbdfd2ccb459cf48a8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/chat/iesha.py @@ -0,0 +1,160 @@ +# Natural Language Toolkit: Teen Chatbot +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Selina Dennis +# URL: +# For license information, see LICENSE.TXT + +""" +This chatbot is a tongue-in-cheek take on the average teen +anime junky that frequents YahooMessenger or MSNM. +All spelling mistakes and flawed grammar are intentional. +""" + +from nltk.chat.util import Chat + +reflections = { + "am": "r", + "was": "were", + "i": "u", + "i'd": "u'd", + "i've": "u'v", + "ive": "u'v", + "i'll": "u'll", + "my": "ur", + "are": "am", + "you're": "im", + "you've": "ive", + "you'll": "i'll", + "your": "my", + "yours": "mine", + "you": "me", + "u": "me", + "ur": "my", + "urs": "mine", + "me": "u", +} + +# Note: %1/2/etc are used without spaces prior as the chat bot seems +# to add a superfluous space when matching. + +pairs = ( + ( + r"I\'m (.*)", + ( + "ur%1?? that's so cool! kekekekeke ^_^ tell me more!", + "ur%1? neat!! kekeke >_<", + ), + ), + ( + r"(.*) don\'t you (.*)", + ( + r"u think I can%2??! really?? kekeke \<_\<", + "what do u mean%2??!", + "i could if i wanted, don't you think!! kekeke", + ), + ), + (r"ye[as] [iI] (.*)", ("u%1? cool!! how?", "how come u%1??", "u%1? so do i!!")), + ( + r"do (you|u) (.*)\??", + ("do i%2? only on tuesdays! kekeke *_*", "i dunno! do u%2??"), + ), + ( + r"(.*)\?", + ( + "man u ask lots of questions!", + "booooring! how old r u??", + "boooooring!! ur not very fun", + ), + ), + ( + r"(cos|because) (.*)", + ("hee! i don't believe u! >_<", "nuh-uh! >_<", "ooooh i agree!"), + ), + ( + r"why can\'t [iI] (.*)", + ( + "i dunno! y u askin me for!", + "try harder, silly! hee! ^_^", + "i dunno! but when i can't%1 i jump up and down!", + ), + ), + ( + r"I can\'t (.*)", + ( + "u can't what??! >_<", + "that's ok! i can't%1 either! kekekekeke ^_^", + "try harder, silly! hee! ^&^", + ), + ), + ( + r"(.*) (like|love|watch) anime", + ( + "omg i love anime!! do u like sailor moon??! ^&^", + "anime yay! anime rocks sooooo much!", + "oooh anime! i love anime more than anything!", + "anime is the bestest evar! evangelion is the best!", + "hee anime is the best! do you have ur fav??", + ), + ), + ( + r"I (like|love|watch|play) (.*)", + ("yay! %2 rocks!", "yay! %2 is neat!", "cool! do u like other stuff?? ^_^"), + ), + ( + r"anime sucks|(.*) (hate|detest) anime", + ( + "ur a liar! i'm not gonna talk to u nemore if u h8 anime *;*", + "no way! anime is the best ever!", + "nuh-uh, anime is the best!", + ), + ), + ( + r"(are|r) (you|u) (.*)", + ("am i%1??! how come u ask that!", "maybe! y shud i tell u?? kekeke >_>"), + ), + ( + r"what (.*)", + ("hee u think im gonna tell u? .v.", "booooooooring! ask me somethin else!"), + ), + (r"how (.*)", ("not tellin!! kekekekekeke ^_^",)), + (r"(hi|hello|hey) (.*)", ("hi!!! how r u!!",)), + ( + r"quit", + ( + "mom says i have to go eat dinner now :,( bye!!", + "awww u have to go?? see u next time!!", + "how to see u again soon! ^_^", + ), + ), + ( + r"(.*)", + ( + "ur funny! kekeke", + "boooooring! talk about something else! tell me wat u like!", + "do u like anime??", + "do u watch anime? i like sailor moon! ^_^", + "i wish i was a kitty!! kekekeke ^_^", + ), + ), +) + +iesha_chatbot = Chat(pairs, reflections) + + +def iesha_chat(): + print("Iesha the TeenBoT\n---------") + print("Talk to the program by typing in plain English, using normal upper-") + print('and lower-case letters and punctuation. Enter "quit" when done.') + print("=" * 72) + print("hi!! i'm iesha! who r u??!") + + iesha_chatbot.converse() + + +def demo(): + iesha_chat() + + +if __name__ == "__main__": + demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/chat/rude.py b/llmeval-env/lib/python3.10/site-packages/nltk/chat/rude.py new file mode 100644 index 0000000000000000000000000000000000000000..77404e42bc4d4c9c279540a7bac18fa47d78b9cc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/chat/rude.py @@ -0,0 +1,125 @@ +# Natural Language Toolkit: Rude Chatbot +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Peter Spiller +# URL: +# For license information, see LICENSE.TXT + +from nltk.chat.util import Chat, reflections + +pairs = ( + ( + r"We (.*)", + ( + "What do you mean, 'we'?", + "Don't include me in that!", + "I wouldn't be so sure about that.", + ), + ), + ( + r"You should (.*)", + ("Don't tell me what to do, buddy.", "Really? I should, should I?"), + ), + ( + r"You\'re(.*)", + ( + "More like YOU'RE %1!", + "Hah! Look who's talking.", + "Come over here and tell me I'm %1.", + ), + ), + ( + r"You are(.*)", + ( + "More like YOU'RE %1!", + "Hah! Look who's talking.", + "Come over here and tell me I'm %1.", + ), + ), + ( + r"I can\'t(.*)", + ( + "You do sound like the type who can't %1.", + "Hear that splashing sound? That's my heart bleeding for you.", + "Tell somebody who might actually care.", + ), + ), + ( + r"I think (.*)", + ( + "I wouldn't think too hard if I were you.", + "You actually think? I'd never have guessed...", + ), + ), + ( + r"I (.*)", + ( + "I'm getting a bit tired of hearing about you.", + "How about we talk about me instead?", + "Me, me, me... Frankly, I don't care.", + ), + ), + ( + r"How (.*)", + ( + "How do you think?", + "Take a wild guess.", + "I'm not even going to dignify that with an answer.", + ), + ), + (r"What (.*)", ("Do I look like an encyclopedia?", "Figure it out yourself.")), + ( + r"Why (.*)", + ( + "Why not?", + "That's so obvious I thought even you'd have already figured it out.", + ), + ), + ( + r"(.*)shut up(.*)", + ( + "Make me.", + "Getting angry at a feeble NLP assignment? Somebody's losing it.", + "Say that again, I dare you.", + ), + ), + ( + r"Shut up(.*)", + ( + "Make me.", + "Getting angry at a feeble NLP assignment? Somebody's losing it.", + "Say that again, I dare you.", + ), + ), + ( + r"Hello(.*)", + ("Oh good, somebody else to talk to. Joy.", "'Hello'? How original..."), + ), + ( + r"(.*)", + ( + "I'm getting bored here. Become more interesting.", + "Either become more thrilling or get lost, buddy.", + "Change the subject before I die of fatal boredom.", + ), + ), +) + +rude_chatbot = Chat(pairs, reflections) + + +def rude_chat(): + print("Talk to the program by typing in plain English, using normal upper-") + print('and lower-case letters and punctuation. Enter "quit" when done.') + print("=" * 72) + print("I suppose I should say hello.") + + rude_chatbot.converse() + + +def demo(): + rude_chat() + + +if __name__ == "__main__": + demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/chat/suntsu.py b/llmeval-env/lib/python3.10/site-packages/nltk/chat/suntsu.py new file mode 100644 index 0000000000000000000000000000000000000000..2130c7da1d630a2d8f78412d4b02d518d540af9f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/chat/suntsu.py @@ -0,0 +1,140 @@ +# Natural Language Toolkit: Sun Tsu-Bot +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Sam Huston 2007 +# URL: +# For license information, see LICENSE.TXT + +""" +Tsu bot responds to all queries with a Sun Tsu sayings + +Quoted from Sun Tsu's The Art of War +Translated by LIONEL GILES, M.A. 1910 +Hosted by the Gutenberg Project +https://www.gutenberg.org/ +""" + +from nltk.chat.util import Chat, reflections + +pairs = ( + (r"quit", ("Good-bye.", "Plan well", "May victory be your future")), + ( + r"[^\?]*\?", + ( + "Please consider whether you can answer your own question.", + "Ask me no questions!", + ), + ), + ( + r"[0-9]+(.*)", + ( + "It is the rule in war, if our forces are ten to the enemy's one, to surround him; if five to one, to attack him; if twice as numerous, to divide our army into two.", + "There are five essentials for victory", + ), + ), + ( + r"[A-Ca-c](.*)", + ( + "The art of war is of vital importance to the State.", + "All warfare is based on deception.", + "If your opponent is secure at all points, be prepared for him. If he is in superior strength, evade him.", + "If the campaign is protracted, the resources of the State will not be equal to the strain.", + "Attack him where he is unprepared, appear where you are not expected.", + "There is no instance of a country having benefited from prolonged warfare.", + ), + ), + ( + r"[D-Fd-f](.*)", + ( + "The skillful soldier does not raise a second levy, neither are his supply-wagons loaded more than twice.", + "Bring war material with you from home, but forage on the enemy.", + "In war, then, let your great object be victory, not lengthy campaigns.", + "To fight and conquer in all your battles is not supreme excellence; supreme excellence consists in breaking the enemy's resistance without fighting.", + ), + ), + ( + r"[G-Ig-i](.*)", + ( + "Heaven signifies night and day, cold and heat, times and seasons.", + "It is the rule in war, if our forces are ten to the enemy's one, to surround him; if five to one, to attack him; if twice as numerous, to divide our army into two.", + "The good fighters of old first put themselves beyond the possibility of defeat, and then waited for an opportunity of defeating the enemy.", + "One may know how to conquer without being able to do it.", + ), + ), + ( + r"[J-Lj-l](.*)", + ( + "There are three ways in which a ruler can bring misfortune upon his army.", + "By commanding the army to advance or to retreat, being ignorant of the fact that it cannot obey. This is called hobbling the army.", + "By attempting to govern an army in the same way as he administers a kingdom, being ignorant of the conditions which obtain in an army. This causes restlessness in the soldier's minds.", + "By employing the officers of his army without discrimination, through ignorance of the military principle of adaptation to circumstances. This shakes the confidence of the soldiers.", + "There are five essentials for victory", + "He will win who knows when to fight and when not to fight.", + "He will win who knows how to handle both superior and inferior forces.", + "He will win whose army is animated by the same spirit throughout all its ranks.", + "He will win who, prepared himself, waits to take the enemy unprepared.", + "He will win who has military capacity and is not interfered with by the sovereign.", + ), + ), + ( + r"[M-Om-o](.*)", + ( + "If you know the enemy and know yourself, you need not fear the result of a hundred battles.", + "If you know yourself but not the enemy, for every victory gained you will also suffer a defeat.", + "If you know neither the enemy nor yourself, you will succumb in every battle.", + "The control of a large force is the same principle as the control of a few men: it is merely a question of dividing up their numbers.", + ), + ), + ( + r"[P-Rp-r](.*)", + ( + "Security against defeat implies defensive tactics; ability to defeat the enemy means taking the offensive.", + "Standing on the defensive indicates insufficient strength; attacking, a superabundance of strength.", + "He wins his battles by making no mistakes. Making no mistakes is what establishes the certainty of victory, for it means conquering an enemy that is already defeated.", + "A victorious army opposed to a routed one, is as a pound's weight placed in the scale against a single grain.", + "The onrush of a conquering force is like the bursting of pent-up waters into a chasm a thousand fathoms deep.", + ), + ), + ( + r"[S-Us-u](.*)", + ( + "What the ancients called a clever fighter is one who not only wins, but excels in winning with ease.", + "Hence his victories bring him neither reputation for wisdom nor credit for courage.", + "Hence the skillful fighter puts himself into a position which makes defeat impossible, and does not miss the moment for defeating the enemy.", + "In war the victorious strategist only seeks battle after the victory has been won, whereas he who is destined to defeat first fights and afterwards looks for victory.", + "There are not more than five musical notes, yet the combinations of these five give rise to more melodies than can ever be heard.", + "Appear at points which the enemy must hasten to defend; march swiftly to places where you are not expected.", + ), + ), + ( + r"[V-Zv-z](.*)", + ( + "It is a matter of life and death, a road either to safety or to ruin.", + "Hold out baits to entice the enemy. Feign disorder, and crush him.", + "All men can see the tactics whereby I conquer, but what none can see is the strategy out of which victory is evolved.", + "Do not repeat the tactics which have gained you one victory, but let your methods be regulated by the infinite variety of circumstances.", + "So in war, the way is to avoid what is strong and to strike at what is weak.", + "Just as water retains no constant shape, so in warfare there are no constant conditions.", + ), + ), + (r"(.*)", ("Your statement insults me.", "")), +) + +suntsu_chatbot = Chat(pairs, reflections) + + +def suntsu_chat(): + print("Talk to the program by typing in plain English, using normal upper-") + print('and lower-case letters and punctuation. Enter "quit" when done.') + print("=" * 72) + print("You seek enlightenment?") + + suntsu_chatbot.converse() + + +def demo(): + suntsu_chat() + + +if __name__ == "__main__": + demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/chat/util.py b/llmeval-env/lib/python3.10/site-packages/nltk/chat/util.py new file mode 100644 index 0000000000000000000000000000000000000000..ddcb246ce3b74a15cd4c87bb180811553849af1b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/chat/util.py @@ -0,0 +1,124 @@ +# Natural Language Toolkit: Chatbot Utilities +# +# Copyright (C) 2001-2023 NLTK Project +# Authors: Steven Bird +# URL: +# For license information, see LICENSE.TXT + +# Based on an Eliza implementation by Joe Strout , +# Jeff Epler and Jez Higgins . + +import random +import re + +reflections = { + "i am": "you are", + "i was": "you were", + "i": "you", + "i'm": "you are", + "i'd": "you would", + "i've": "you have", + "i'll": "you will", + "my": "your", + "you are": "I am", + "you were": "I was", + "you've": "I have", + "you'll": "I will", + "your": "my", + "yours": "mine", + "you": "me", + "me": "you", +} + + +class Chat: + def __init__(self, pairs, reflections={}): + """ + Initialize the chatbot. Pairs is a list of patterns and responses. Each + pattern is a regular expression matching the user's statement or question, + e.g. r'I like (.*)'. For each such pattern a list of possible responses + is given, e.g. ['Why do you like %1', 'Did you ever dislike %1']. Material + which is matched by parenthesized sections of the patterns (e.g. .*) is mapped to + the numbered positions in the responses, e.g. %1. + + :type pairs: list of tuple + :param pairs: The patterns and responses + :type reflections: dict + :param reflections: A mapping between first and second person expressions + :rtype: None + """ + + self._pairs = [(re.compile(x, re.IGNORECASE), y) for (x, y) in pairs] + self._reflections = reflections + self._regex = self._compile_reflections() + + def _compile_reflections(self): + sorted_refl = sorted(self._reflections, key=len, reverse=True) + return re.compile( + r"\b({})\b".format("|".join(map(re.escape, sorted_refl))), re.IGNORECASE + ) + + def _substitute(self, str): + """ + Substitute words in the string, according to the specified reflections, + e.g. "I'm" -> "you are" + + :type str: str + :param str: The string to be mapped + :rtype: str + """ + + return self._regex.sub( + lambda mo: self._reflections[mo.string[mo.start() : mo.end()]], str.lower() + ) + + def _wildcards(self, response, match): + pos = response.find("%") + while pos >= 0: + num = int(response[pos + 1 : pos + 2]) + response = ( + response[:pos] + + self._substitute(match.group(num)) + + response[pos + 2 :] + ) + pos = response.find("%") + return response + + def respond(self, str): + """ + Generate a response to the user input. + + :type str: str + :param str: The string to be mapped + :rtype: str + """ + + # check each pattern + for (pattern, response) in self._pairs: + match = pattern.match(str) + + # did the pattern match? + if match: + resp = random.choice(response) # pick a random response + resp = self._wildcards(resp, match) # process wildcards + + # fix munged punctuation at the end + if resp[-2:] == "?.": + resp = resp[:-2] + "." + if resp[-2:] == "??": + resp = resp[:-2] + "?" + return resp + + # Hold a conversation with a chatbot + def converse(self, quit="quit"): + user_input = "" + while user_input != quit: + user_input = quit + try: + user_input = input(">") + except EOFError: + print(user_input) + if user_input: + while user_input[-1] in "!.": + user_input = user_input[:-1] + print(self.respond(user_input)) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/chat/zen.py b/llmeval-env/lib/python3.10/site-packages/nltk/chat/zen.py new file mode 100644 index 0000000000000000000000000000000000000000..5972d47764056b388fca1a24acb88bd87369274f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/chat/zen.py @@ -0,0 +1,329 @@ +# Natural Language Toolkit: Zen Chatbot +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Amy Holland +# URL: +# For license information, see LICENSE.TXT + +""" +Zen Chatbot talks in gems of Zen wisdom. + +This is a sample conversation with Zen Chatbot: +ZC: Welcome, my child. +me: Good afternoon. +ZC: Ask the question you have come to ask. +me: How can I achieve enlightenment? +ZC: How do you suppose? +me: Through meditation. +ZC: Form is emptiness, and emptiness form. +me: How can I empty my mind of worldly troubles? +ZC: Will an answer to that really help in your search for enlightenment? +me: Yes. +ZC: It is better to be right than to be certain. +me: I seek truth and wisdom. +ZC: The search for truth is a long journey. +me: Are you sure? +ZC: Maybe sure, maybe not sure. + + +The chatbot structure is based on that of chat.eliza. Thus, it uses +a translation table to convert from question to response +i.e. "I am" --> "you are" + +Of course, since Zen Chatbot does not understand the meaning of any words, +responses are very limited. Zen Chatbot will usually answer very vaguely, or +respond to a question by asking a different question, in much the same way +as Eliza. +""" + +from nltk.chat.util import Chat, reflections + +# responses are matched top to bottom, so non-specific matches occur later +# for each match, a list of possible responses is provided +responses = ( + # Zen Chatbot opens with the line "Welcome, my child." The usual + # response will be a greeting problem: 'good' matches "good morning", + # "good day" etc, but also "good grief!" and other sentences starting + # with the word 'good' that may not be a greeting + ( + r"(hello(.*))|(good [a-zA-Z]+)", + ( + "The path to enlightenment is often difficult to see.", + "Greetings. I sense your mind is troubled. Tell me of your troubles.", + "Ask the question you have come to ask.", + "Hello. Do you seek englightenment?", + ), + ), + # "I need" and "I want" can be followed by a thing (eg 'help') + # or an action (eg 'to see you') + # + # This is a problem with this style of response - + # person: "I need you" + # chatbot: "me can be achieved by hard work and dedication of the mind" + # i.e. 'you' is not really a thing that can be mapped this way, so this + # interpretation only makes sense for some inputs + # + ( + r"i need (.*)", + ( + "%1 can be achieved by hard work and dedication of the mind.", + "%1 is not a need, but a desire of the mind. Clear your mind of such concerns.", + "Focus your mind on%1, and you will find what you need.", + ), + ), + ( + r"i want (.*)", + ( + "Desires of the heart will distract you from the path to enlightenment.", + "Will%1 help you attain enlightenment?", + "Is%1 a desire of the mind, or of the heart?", + ), + ), + # why questions are separated into three types: + # "why..I" e.g. "why am I here?" "Why do I like cake?" + # "why..you" e.g. "why are you here?" "Why won't you tell me?" + # "why..." e.g. "Why is the sky blue?" + # problems: + # person: "Why can't you tell me?" + # chatbot: "Are you sure I tell you?" + # - this style works for positives (e.g. "why do you like cake?") + # but does not work for negatives (e.g. "why don't you like cake?") + (r"why (.*) i (.*)\?", ("You%1%2?", "Perhaps you only think you%1%2")), + (r"why (.*) you(.*)\?", ("Why%1 you%2?", "%2 I%1", "Are you sure I%2?")), + (r"why (.*)\?", ("I cannot tell you why%1.", "Why do you think %1?")), + # e.g. "are you listening?", "are you a duck" + ( + r"are you (.*)\?", + ("Maybe%1, maybe not%1.", "Whether I am%1 or not is God's business."), + ), + # e.g. "am I a duck?", "am I going to die?" + ( + r"am i (.*)\?", + ("Perhaps%1, perhaps not%1.", "Whether you are%1 or not is not for me to say."), + ), + # what questions, e.g. "what time is it?" + # problems: + # person: "What do you want?" + # chatbot: "Seek truth, not what do me want." + (r"what (.*)\?", ("Seek truth, not what%1.", "What%1 should not concern you.")), + # how questions, e.g. "how do you do?" + ( + r"how (.*)\?", + ( + "How do you suppose?", + "Will an answer to that really help in your search for enlightenment?", + "Ask yourself not how, but why.", + ), + ), + # can questions, e.g. "can you run?", "can you come over here please?" + ( + r"can you (.*)\?", + ( + "I probably can, but I may not.", + "Maybe I can%1, and maybe I cannot.", + "I can do all, and I can do nothing.", + ), + ), + # can questions, e.g. "can I have some cake?", "can I know truth?" + ( + r"can i (.*)\?", + ( + "You can%1 if you believe you can%1, and have a pure spirit.", + "Seek truth and you will know if you can%1.", + ), + ), + # e.g. "It is raining" - implies the speaker is certain of a fact + ( + r"it is (.*)", + ( + "How can you be certain that%1, when you do not even know yourself?", + "Whether it is%1 or not does not change the way the world is.", + ), + ), + # e.g. "is there a doctor in the house?" + ( + r"is there (.*)\?", + ("There is%1 if you believe there is.", "It is possible that there is%1."), + ), + # e.g. "is it possible?", "is this true?" + (r"is(.*)\?", ("%1 is not relevant.", "Does this matter?")), + # non-specific question + ( + r"(.*)\?", + ( + "Do you think %1?", + "You seek the truth. Does the truth seek you?", + "If you intentionally pursue the answers to your questions, the answers become hard to see.", + "The answer to your question cannot be told. It must be experienced.", + ), + ), + # expression of hate of form "I hate you" or "Kelly hates cheese" + ( + r"(.*) (hate[s]?)|(dislike[s]?)|(don\'t like)(.*)", + ( + "Perhaps it is not about hating %2, but about hate from within.", + "Weeds only grow when we dislike them", + "Hate is a very strong emotion.", + ), + ), + # statement containing the word 'truth' + ( + r"(.*) truth(.*)", + ( + "Seek truth, and truth will seek you.", + "Remember, it is not the spoon which bends - only yourself.", + "The search for truth is a long journey.", + ), + ), + # desire to do an action + # e.g. "I want to go shopping" + ( + r"i want to (.*)", + ("You may %1 if your heart truly desires to.", "You may have to %1."), + ), + # desire for an object + # e.g. "I want a pony" + ( + r"i want (.*)", + ( + "Does your heart truly desire %1?", + "Is this a desire of the heart, or of the mind?", + ), + ), + # e.g. "I can't wait" or "I can't do this" + ( + r"i can\'t (.*)", + ( + "What we can and can't do is a limitation of the mind.", + "There are limitations of the body, and limitations of the mind.", + "Have you tried to%1 with a clear mind?", + ), + ), + # "I think.." indicates uncertainty. e.g. "I think so." + # problem: exceptions... + # e.g. "I think, therefore I am" + ( + r"i think (.*)", + ( + "Uncertainty in an uncertain world.", + "Indeed, how can we be certain of anything in such uncertain times.", + "Are you not, in fact, certain that%1?", + ), + ), + # "I feel...emotions/sick/light-headed..." + ( + r"i feel (.*)", + ( + "Your body and your emotions are both symptoms of your mind." + "What do you believe is the root of such feelings?", + "Feeling%1 can be a sign of your state-of-mind.", + ), + ), + # exclaimation mark indicating emotion + # e.g. "Wow!" or "No!" + ( + r"(.*)!", + ( + "I sense that you are feeling emotional today.", + "You need to calm your emotions.", + ), + ), + # because [statement] + # e.g. "because I said so" + ( + r"because (.*)", + ( + "Does knowning the reasons behind things help you to understand" + " the things themselves?", + "If%1, what else must be true?", + ), + ), + # yes or no - raise an issue of certainty/correctness + ( + r"(yes)|(no)", + ( + "Is there certainty in an uncertain world?", + "It is better to be right than to be certain.", + ), + ), + # sentence containing word 'love' + ( + r"(.*)love(.*)", + ( + "Think of the trees: they let the birds perch and fly with no intention to call them when they come, and no longing for their return when they fly away. Let your heart be like the trees.", + "Free love!", + ), + ), + # sentence containing word 'understand' - r + ( + r"(.*)understand(.*)", + ( + "If you understand, things are just as they are;" + " if you do not understand, things are just as they are.", + "Imagination is more important than knowledge.", + ), + ), + # 'I', 'me', 'my' - person is talking about themself. + # this breaks down when words contain these - eg 'Thyme', 'Irish' + ( + r"(.*)(me )|( me)|(my)|(mine)|(i)(.*)", + ( + "'I', 'me', 'my'... these are selfish expressions.", + "Have you ever considered that you might be a selfish person?", + "Try to consider others, not just yourself.", + "Think not just of yourself, but of others.", + ), + ), + # 'you' starting a sentence + # e.g. "you stink!" + ( + r"you (.*)", + ("My path is not of concern to you.", "I am but one, and you but one more."), + ), + # say goodbye with some extra Zen wisdom. + ( + r"exit", + ( + "Farewell. The obstacle is the path.", + "Farewell. Life is a journey, not a destination.", + "Good bye. We are cups, constantly and quietly being filled." + "\nThe trick is knowning how to tip ourselves over and let the beautiful stuff out.", + ), + ), + # fall through case - + # when stumped, respond with generic zen wisdom + # + ( + r"(.*)", + ( + "When you're enlightened, every word is wisdom.", + "Random talk is useless.", + "The reverse side also has a reverse side.", + "Form is emptiness, and emptiness is form.", + "I pour out a cup of water. Is the cup empty?", + ), + ), +) + +zen_chatbot = Chat(responses, reflections) + + +def zen_chat(): + print("*" * 75) + print("Zen Chatbot!".center(75)) + print("*" * 75) + print('"Look beyond mere words and letters - look into your mind"'.center(75)) + print("* Talk your way to truth with Zen Chatbot.") + print("* Type 'quit' when you have had enough.") + print("*" * 75) + print("Welcome, my child.") + + zen_chatbot.converse() + + +def demo(): + zen_chat() + + +if __name__ == "__main__": + demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/inference/__pycache__/tableau.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/inference/__pycache__/tableau.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25b1ee79a29d319ef80a471e9f5f158c4b2531fc Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/inference/__pycache__/tableau.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/agreement.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/agreement.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a67adc5befaf8729c2fb86a245761be8f0e6deb Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/agreement.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/aline.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/aline.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a4787baa2b3a37941a9df654c033c48eae4d468 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/aline.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/association.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/association.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f17a0baacce32cde2e001b332619723a002604a8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/association.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/paice.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/paice.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d10085e21e44d654f27818f9cc4e3e7b38be83c9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/paice.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/scores.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/scores.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..270065c3b365cc8a588713298c58b7cb2cba104b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/scores.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/segmentation.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/segmentation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..280d89b7c400452ded57370c18b10c1abfcf7901 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/segmentation.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/spearman.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/spearman.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ad3081e01d0e486906196c7af560d3cc3fc141cc Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/metrics/__pycache__/spearman.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/misc/babelfish.py b/llmeval-env/lib/python3.10/site-packages/nltk/misc/babelfish.py new file mode 100644 index 0000000000000000000000000000000000000000..d317d65a194578e28ffad94bd53803395b5e3c58 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/misc/babelfish.py @@ -0,0 +1,10 @@ +""" +This module previously provided an interface to Babelfish online +translation service; this service is no longer available; this +module is kept in NLTK source code in order to provide better error +messages for people following the NLTK Book 2.0. +""" + + +def babelize_shell(): + print("Babelfish online translation service is no longer available.") diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/misc/chomsky.py b/llmeval-env/lib/python3.10/site-packages/nltk/misc/chomsky.py new file mode 100644 index 0000000000000000000000000000000000000000..0632bca034512041b3e0cf9a6231f8ac1c131e4b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/misc/chomsky.py @@ -0,0 +1,134 @@ +# Chomsky random text generator, version 1.1, Raymond Hettinger, 2005/09/13 +# https://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/440546 + +""" +CHOMSKY is an aid to writing linguistic papers in the style +of the great master. It is based on selected phrases taken +from actual books and articles written by Noam Chomsky. +Upon request, it assembles the phrases in the elegant +stylistic patterns that Chomsky is noted for. +To generate n sentences of linguistic wisdom, type + + (CHOMSKY n) -- for example + (CHOMSKY 5) generates half a screen of linguistic truth. +""" + +leadins = """To characterize a linguistic level L, + On the other hand, + This suggests that + It appears that + Furthermore, + We will bring evidence in favor of the following thesis: + To provide a constituent structure for T(Z,K), + From C1, it follows that + For any transformation which is sufficiently diversified in \ +application to be of any interest, + Analogously, + Clearly, + Note that + Of course, + Suppose, for instance, that + Thus + With this clarification, + Conversely, + We have already seen that + By combining adjunctions and certain deformations, + I suggested that these results would follow from the assumption that + If the position of the trace in (99c) were only relatively \ +inaccessible to movement, + However, this assumption is not correct, since + Comparing these examples with their parasitic gap counterparts in \ +(96) and (97), we see that + In the discussion of resumptive pronouns following (81), + So far, + Nevertheless, + For one thing, + Summarizing, then, we assume that + A consequence of the approach just outlined is that + Presumably, + On our assumptions, + It may be, then, that + It must be emphasized, once again, that + Let us continue to suppose that + Notice, incidentally, that """ +# List of LEADINs to buy time. + +subjects = """ the notion of level of grammaticalness + a case of semigrammaticalness of a different sort + most of the methodological work in modern linguistics + a subset of English sentences interesting on quite independent grounds + the natural general principle that will subsume this case + an important property of these three types of EC + any associated supporting element + the appearance of parasitic gaps in domains relatively inaccessible \ +to ordinary extraction + the speaker-hearer's linguistic intuition + the descriptive power of the base component + the earlier discussion of deviance + this analysis of a formative as a pair of sets of features + this selectionally introduced contextual feature + a descriptively adequate grammar + the fundamental error of regarding functional notions as categorial + relational information + the systematic use of complex symbols + the theory of syntactic features developed earlier""" +# List of SUBJECTs chosen for maximum professorial macho. + +verbs = """can be defined in such a way as to impose + delimits + suffices to account for + cannot be arbitrary in + is not subject to + does not readily tolerate + raises serious doubts about + is not quite equivalent to + does not affect the structure of + may remedy and, at the same time, eliminate + is not to be considered in determining + is to be regarded as + is unspecified with respect to + is, apparently, determined by + is necessary to impose an interpretation on + appears to correlate rather closely with + is rather different from""" +# List of VERBs chosen for autorecursive obfuscation. + +objects = """ problems of phonemic and morphological analysis. + a corpus of utterance tokens upon which conformity has been defined \ +by the paired utterance test. + the traditional practice of grammarians. + the levels of acceptability from fairly high (e.g. (99a)) to virtual \ +gibberish (e.g. (98d)). + a stipulation to place the constructions into these various categories. + a descriptive fact. + a parasitic gap construction. + the extended c-command discussed in connection with (34). + the ultimate standard that determines the accuracy of any proposed grammar. + the system of base rules exclusive of the lexicon. + irrelevant intervening contexts in selectional rules. + nondistinctness in the sense of distinctive feature theory. + a general convention regarding the forms of the grammar. + an abstract underlying order. + an important distinction in language use. + the requirement that branching is not tolerated within the dominance \ +scope of a complex symbol. + the strong generative capacity of the theory.""" +# List of OBJECTs selected for profound sententiousness. + +import random +import textwrap +from itertools import chain, islice + + +def generate_chomsky(times=5, line_length=72): + parts = [] + for part in (leadins, subjects, verbs, objects): + phraselist = list(map(str.strip, part.splitlines())) + random.shuffle(phraselist) + parts.append(phraselist) + output = chain.from_iterable(islice(zip(*parts), 0, times)) + print(textwrap.fill(" ".join(output), line_length)) + + +if __name__ == "__main__": + generate_chomsky() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/misc/minimalset.py b/llmeval-env/lib/python3.10/site-packages/nltk/misc/minimalset.py new file mode 100644 index 0000000000000000000000000000000000000000..50d1fa5b6e45193d15e6fa1d2aec687de503f1d2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/misc/minimalset.py @@ -0,0 +1,85 @@ +# Natural Language Toolkit: Minimal Sets +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# URL: +# For license information, see LICENSE.TXT + +from collections import defaultdict + + +class MinimalSet: + """ + Find contexts where more than one possible target value can + appear. E.g. if targets are word-initial letters, and contexts + are the remainders of words, then we would like to find cases like + "fat" vs "cat", and "training" vs "draining". If targets are + parts-of-speech and contexts are words, then we would like to find + cases like wind (noun) 'air in rapid motion', vs wind (verb) + 'coil, wrap'. + """ + + def __init__(self, parameters=None): + """ + Create a new minimal set. + + :param parameters: The (context, target, display) tuples for the item + :type parameters: list(tuple(str, str, str)) + """ + self._targets = set() # the contrastive information + self._contexts = set() # what we are controlling for + self._seen = defaultdict(set) # to record what we have seen + self._displays = {} # what we will display + + if parameters: + for context, target, display in parameters: + self.add(context, target, display) + + def add(self, context, target, display): + """ + Add a new item to the minimal set, having the specified + context, target, and display form. + + :param context: The context in which the item of interest appears + :type context: str + :param target: The item of interest + :type target: str + :param display: The information to be reported for each item + :type display: str + """ + # Store the set of targets that occurred in this context + self._seen[context].add(target) + + # Keep track of which contexts and targets we have seen + self._contexts.add(context) + self._targets.add(target) + + # For a given context and target, store the display form + self._displays[(context, target)] = display + + def contexts(self, minimum=2): + """ + Determine which contexts occurred with enough distinct targets. + + :param minimum: the minimum number of distinct target forms + :type minimum: int + :rtype: list + """ + return [c for c in self._contexts if len(self._seen[c]) >= minimum] + + def display(self, context, target, default=""): + if (context, target) in self._displays: + return self._displays[(context, target)] + else: + return default + + def display_all(self, context): + result = [] + for target in self._targets: + x = self.display(context, target) + if x: + result.append(x) + return result + + def targets(self): + return self._targets diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/sem/__init__.py b/llmeval-env/lib/python3.10/site-packages/nltk/sem/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5bbb3f032bef5ce79ab7232566bc73aa17ff661b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/sem/__init__.py @@ -0,0 +1,75 @@ +# Natural Language Toolkit: Semantic Interpretation +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ewan Klein +# URL: +# For license information, see LICENSE.TXT + +""" +NLTK Semantic Interpretation Package + +This package contains classes for representing semantic structure in +formulas of first-order logic and for evaluating such formulas in +set-theoretic models. + + >>> from nltk.sem import logic + >>> logic._counter._value = 0 + +The package has two main components: + + - ``logic`` provides support for analyzing expressions of First + Order Logic (FOL). + - ``evaluate`` allows users to recursively determine truth in a + model for formulas of FOL. + +A model consists of a domain of discourse and a valuation function, +which assigns values to non-logical constants. We assume that entities +in the domain are represented as strings such as ``'b1'``, ``'g1'``, +etc. A ``Valuation`` is initialized with a list of (symbol, value) +pairs, where values are entities, sets of entities or sets of tuples +of entities. +The domain of discourse can be inferred from the valuation, and model +is then created with domain and valuation as parameters. + + >>> from nltk.sem import Valuation, Model + >>> v = [('adam', 'b1'), ('betty', 'g1'), ('fido', 'd1'), + ... ('girl', set(['g1', 'g2'])), ('boy', set(['b1', 'b2'])), + ... ('dog', set(['d1'])), + ... ('love', set([('b1', 'g1'), ('b2', 'g2'), ('g1', 'b1'), ('g2', 'b1')]))] + >>> val = Valuation(v) + >>> dom = val.domain + >>> m = Model(dom, val) +""" + +from nltk.sem.boxer import Boxer +from nltk.sem.drt import DRS, DrtExpression +from nltk.sem.evaluate import ( + Assignment, + Model, + Undefined, + Valuation, + arity, + is_rel, + read_valuation, + set2rel, +) +from nltk.sem.lfg import FStructure +from nltk.sem.logic import ( + ApplicationExpression, + Expression, + LogicalExpressionException, + Variable, + binding_ops, + boolean_ops, + equality_preds, + read_logic, +) +from nltk.sem.relextract import clause, extract_rels, rtuple +from nltk.sem.skolemize import skolemize +from nltk.sem.util import evaluate_sents, interpret_sents, parse_sents, root_semrep + +# from nltk.sem.glue import Glue +# from nltk.sem.hole import HoleSemantics +# from nltk.sem.cooper_storage import CooperStore + +# don't import chat80 as its names are too generic diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f6bb8921b97f53075e03365f15c048d7a1127696 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/boxer.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/boxer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3978d20311862fd989b7350b28ed9d40ac44374a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/boxer.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/chat80.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/chat80.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7acb91e1d89bcd753c9762adc8cd60be98216115 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/chat80.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/cooper_storage.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/cooper_storage.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..27875d02a0e40aeed96b8eede41c2f39bbf9b224 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/cooper_storage.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/drt.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/drt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f14f5e6673917055c2e39f127ef49623a3c8c5f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/drt.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/drt_glue_demo.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/drt_glue_demo.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df1ddb2f6d578bdb14cae9fa71a66b419a1096b5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/drt_glue_demo.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/evaluate.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/evaluate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6776d344987da6423ebb53c6b66da9f2960f540 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/evaluate.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/glue.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/glue.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c51d8479f5221f795167d1de801f3e794422338 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/glue.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/hole.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/hole.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd7b2968cd4d154944dce35574b0198ed9a56d75 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/hole.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/lfg.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/lfg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ac32a5352f03c1ae7acff157c519e5e87fe55cd Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/lfg.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/linearlogic.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/linearlogic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..05a502a7da4d91e548a9f33635c9b011ec8b1013 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/linearlogic.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/logic.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/logic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e5cce3bd44f49865bec773a2850a6372740e56be Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/logic.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/relextract.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/relextract.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6ee9be78253946fe694b74c7f06f8d7146cd0376 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/relextract.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/skolemize.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/skolemize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..415e184a84f42175a503904e97b2c713cf79b319 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/skolemize.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/util.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bcd42d5ac478951cb443d3d328ce3222a4090b22 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/sem/__pycache__/util.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/sem/boxer.py b/llmeval-env/lib/python3.10/site-packages/nltk/sem/boxer.py new file mode 100644 index 0000000000000000000000000000000000000000..d0acd4a607e3bf3481b3f896e8103a9069870c56 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/sem/boxer.py @@ -0,0 +1,1605 @@ +# Natural Language Toolkit: Interface to Boxer +# +# +# Author: Dan Garrette +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +""" +An interface to Boxer. + +This interface relies on the latest version of the development (subversion) version of +C&C and Boxer. + +Usage +===== + +Set the environment variable CANDC to the bin directory of your CandC installation. +The models directory should be in the CandC root directory. +For example:: + + /path/to/candc/ + bin/ + candc + boxer + models/ + boxer/ +""" + +import operator +import os +import re +import subprocess +import tempfile +from functools import reduce +from optparse import OptionParser + +from nltk.internals import find_binary +from nltk.sem.drt import ( + DRS, + DrtApplicationExpression, + DrtEqualityExpression, + DrtNegatedExpression, + DrtOrExpression, + DrtParser, + DrtProposition, + DrtTokens, + DrtVariableExpression, +) +from nltk.sem.logic import ( + ExpectedMoreTokensException, + LogicalExpressionException, + UnexpectedTokenException, + Variable, +) + + +class Boxer: + """ + This class is an interface to Johan Bos's program Boxer, a wide-coverage + semantic parser that produces Discourse Representation Structures (DRSs). + """ + + def __init__( + self, + boxer_drs_interpreter=None, + elimeq=False, + bin_dir=None, + verbose=False, + resolve=True, + ): + """ + :param boxer_drs_interpreter: A class that converts from the + ``AbstractBoxerDrs`` object hierarchy to a different object. The + default is ``NltkDrtBoxerDrsInterpreter``, which converts to the NLTK + DRT hierarchy. + :param elimeq: When set to true, Boxer removes all equalities from the + DRSs and discourse referents standing in the equality relation are + unified, but only if this can be done in a meaning-preserving manner. + :param resolve: When set to true, Boxer will resolve all anaphoric DRSs and perform merge-reduction. + Resolution follows Van der Sandt's theory of binding and accommodation. + """ + if boxer_drs_interpreter is None: + boxer_drs_interpreter = NltkDrtBoxerDrsInterpreter() + self._boxer_drs_interpreter = boxer_drs_interpreter + + self._resolve = resolve + self._elimeq = elimeq + + self.set_bin_dir(bin_dir, verbose) + + def set_bin_dir(self, bin_dir, verbose=False): + self._candc_bin = self._find_binary("candc", bin_dir, verbose) + self._candc_models_path = os.path.normpath( + os.path.join(self._candc_bin[:-5], "../models") + ) + self._boxer_bin = self._find_binary("boxer", bin_dir, verbose) + + def interpret(self, input, discourse_id=None, question=False, verbose=False): + """ + Use Boxer to give a first order representation. + + :param input: str Input sentence to parse + :param occur_index: bool Should predicates be occurrence indexed? + :param discourse_id: str An identifier to be inserted to each occurrence-indexed predicate. + :return: ``drt.DrtExpression`` + """ + discourse_ids = [discourse_id] if discourse_id is not None else None + (d,) = self.interpret_multi_sents([[input]], discourse_ids, question, verbose) + if not d: + raise Exception(f'Unable to interpret: "{input}"') + return d + + def interpret_multi(self, input, discourse_id=None, question=False, verbose=False): + """ + Use Boxer to give a first order representation. + + :param input: list of str Input sentences to parse as a single discourse + :param occur_index: bool Should predicates be occurrence indexed? + :param discourse_id: str An identifier to be inserted to each occurrence-indexed predicate. + :return: ``drt.DrtExpression`` + """ + discourse_ids = [discourse_id] if discourse_id is not None else None + (d,) = self.interpret_multi_sents([input], discourse_ids, question, verbose) + if not d: + raise Exception(f'Unable to interpret: "{input}"') + return d + + def interpret_sents( + self, inputs, discourse_ids=None, question=False, verbose=False + ): + """ + Use Boxer to give a first order representation. + + :param inputs: list of str Input sentences to parse as individual discourses + :param occur_index: bool Should predicates be occurrence indexed? + :param discourse_ids: list of str Identifiers to be inserted to each occurrence-indexed predicate. + :return: list of ``drt.DrtExpression`` + """ + return self.interpret_multi_sents( + [[input] for input in inputs], discourse_ids, question, verbose + ) + + def interpret_multi_sents( + self, inputs, discourse_ids=None, question=False, verbose=False + ): + """ + Use Boxer to give a first order representation. + + :param inputs: list of list of str Input discourses to parse + :param occur_index: bool Should predicates be occurrence indexed? + :param discourse_ids: list of str Identifiers to be inserted to each occurrence-indexed predicate. + :return: ``drt.DrtExpression`` + """ + if discourse_ids is not None: + assert len(inputs) == len(discourse_ids) + assert reduce(operator.and_, (id is not None for id in discourse_ids)) + use_disc_id = True + else: + discourse_ids = list(map(str, range(len(inputs)))) + use_disc_id = False + + candc_out = self._call_candc(inputs, discourse_ids, question, verbose=verbose) + boxer_out = self._call_boxer(candc_out, verbose=verbose) + + # if 'ERROR: input file contains no ccg/2 terms.' in boxer_out: + # raise UnparseableInputException('Could not parse with candc: "%s"' % input_str) + + drs_dict = self._parse_to_drs_dict(boxer_out, use_disc_id) + return [drs_dict.get(id, None) for id in discourse_ids] + + def _call_candc(self, inputs, discourse_ids, question, verbose=False): + """ + Call the ``candc`` binary with the given input. + + :param inputs: list of list of str Input discourses to parse + :param discourse_ids: list of str Identifiers to be inserted to each occurrence-indexed predicate. + :param filename: str A filename for the output file + :return: stdout + """ + args = [ + "--models", + os.path.join(self._candc_models_path, ["boxer", "questions"][question]), + "--candc-printer", + "boxer", + ] + return self._call( + "\n".join( + sum( + ([f"'{id}'"] + d for d, id in zip(inputs, discourse_ids)), + [], + ) + ), + self._candc_bin, + args, + verbose, + ) + + def _call_boxer(self, candc_out, verbose=False): + """ + Call the ``boxer`` binary with the given input. + + :param candc_out: str output from C&C parser + :return: stdout + """ + f = None + try: + fd, temp_filename = tempfile.mkstemp( + prefix="boxer-", suffix=".in", text=True + ) + f = os.fdopen(fd, "w") + f.write(candc_out.decode("utf-8")) + finally: + if f: + f.close() + + args = [ + "--box", + "false", + "--semantics", + "drs", + #'--flat', 'false', # removed from boxer + "--resolve", + ["false", "true"][self._resolve], + "--elimeq", + ["false", "true"][self._elimeq], + "--format", + "prolog", + "--instantiate", + "true", + "--input", + temp_filename, + ] + stdout = self._call(None, self._boxer_bin, args, verbose) + os.remove(temp_filename) + return stdout + + def _find_binary(self, name, bin_dir, verbose=False): + return find_binary( + name, + path_to_bin=bin_dir, + env_vars=["CANDC"], + url="http://svn.ask.it.usyd.edu.au/trac/candc/", + binary_names=[name, name + ".exe"], + verbose=verbose, + ) + + def _call(self, input_str, binary, args=[], verbose=False): + """ + Call the binary with the given input. + + :param input_str: A string whose contents are used as stdin. + :param binary: The location of the binary to call + :param args: A list of command-line arguments. + :return: stdout + """ + if verbose: + print("Calling:", binary) + print("Args:", args) + print("Input:", input_str) + print("Command:", binary + " " + " ".join(args)) + + # Call via a subprocess + if input_str is None: + cmd = [binary] + args + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + else: + cmd = 'echo "{}" | {} {}'.format(input_str, binary, " ".join(args)) + p = subprocess.Popen( + cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True + ) + stdout, stderr = p.communicate() + + if verbose: + print("Return code:", p.returncode) + if stdout: + print("stdout:\n", stdout, "\n") + if stderr: + print("stderr:\n", stderr, "\n") + if p.returncode != 0: + raise Exception( + "ERROR CALLING: {} {}\nReturncode: {}\n{}".format( + binary, " ".join(args), p.returncode, stderr + ) + ) + + return stdout + + def _parse_to_drs_dict(self, boxer_out, use_disc_id): + lines = boxer_out.decode("utf-8").split("\n") + drs_dict = {} + i = 0 + while i < len(lines): + line = lines[i] + if line.startswith("id("): + comma_idx = line.index(",") + discourse_id = line[3:comma_idx] + if discourse_id[0] == "'" and discourse_id[-1] == "'": + discourse_id = discourse_id[1:-1] + drs_id = line[comma_idx + 1 : line.index(")")] + i += 1 + line = lines[i] + assert line.startswith(f"sem({drs_id},") + if line[-4:] == "').'": + line = line[:-4] + ")." + assert line.endswith(")."), f"can't parse line: {line}" + + search_start = len(f"sem({drs_id},[") + brace_count = 1 + drs_start = -1 + for j, c in enumerate(line[search_start:]): + if c == "[": + brace_count += 1 + if c == "]": + brace_count -= 1 + if brace_count == 0: + drs_start = search_start + j + 1 + if line[drs_start : drs_start + 3] == "','": + drs_start = drs_start + 3 + else: + drs_start = drs_start + 1 + break + assert drs_start > -1 + + drs_input = line[drs_start:-2].strip() + parsed = self._parse_drs(drs_input, discourse_id, use_disc_id) + drs_dict[discourse_id] = self._boxer_drs_interpreter.interpret(parsed) + i += 1 + return drs_dict + + def _parse_drs(self, drs_string, discourse_id, use_disc_id): + return BoxerOutputDrsParser([None, discourse_id][use_disc_id]).parse(drs_string) + + +class BoxerOutputDrsParser(DrtParser): + def __init__(self, discourse_id=None): + """ + This class is used to parse the Prolog DRS output from Boxer into a + hierarchy of python objects. + """ + DrtParser.__init__(self) + self.discourse_id = discourse_id + self.sentence_id_offset = None + self.quote_chars = [("'", "'", "\\", False)] + + def parse(self, data, signature=None): + return DrtParser.parse(self, data, signature) + + def get_all_symbols(self): + return ["(", ")", ",", "[", "]", ":"] + + def handle(self, tok, context): + return self.handle_drs(tok) + + def attempt_adjuncts(self, expression, context): + return expression + + def parse_condition(self, indices): + """ + Parse a DRS condition + + :return: list of ``DrtExpression`` + """ + tok = self.token() + accum = self.handle_condition(tok, indices) + if accum is None: + raise UnexpectedTokenException(tok) + return accum + + def handle_drs(self, tok): + if tok == "drs": + return self.parse_drs() + elif tok in ["merge", "smerge"]: + return self._handle_binary_expression(self._make_merge_expression)(None, []) + elif tok in ["alfa"]: + return self._handle_alfa(self._make_merge_expression)(None, []) + + def handle_condition(self, tok, indices): + """ + Handle a DRS condition + + :param indices: list of int + :return: list of ``DrtExpression`` + """ + if tok == "not": + return [self._handle_not()] + + if tok == "or": + conds = [self._handle_binary_expression(self._make_or_expression)] + elif tok == "imp": + conds = [self._handle_binary_expression(self._make_imp_expression)] + elif tok == "eq": + conds = [self._handle_eq()] + elif tok == "prop": + conds = [self._handle_prop()] + + elif tok == "pred": + conds = [self._handle_pred()] + elif tok == "named": + conds = [self._handle_named()] + elif tok == "rel": + conds = [self._handle_rel()] + elif tok == "timex": + conds = self._handle_timex() + elif tok == "card": + conds = [self._handle_card()] + + elif tok == "whq": + conds = [self._handle_whq()] + elif tok == "duplex": + conds = [self._handle_duplex()] + + else: + conds = [] + + return sum( + ( + [cond(sent_index, word_indices) for cond in conds] + for sent_index, word_indices in self._sent_and_word_indices(indices) + ), + [], + ) + + def _handle_not(self): + self.assertToken(self.token(), "(") + drs = self.process_next_expression(None) + self.assertToken(self.token(), ")") + return BoxerNot(drs) + + def _handle_pred(self): + # pred(_G3943, dog, n, 0) + self.assertToken(self.token(), "(") + variable = self.parse_variable() + self.assertToken(self.token(), ",") + name = self.token() + self.assertToken(self.token(), ",") + pos = self.token() + self.assertToken(self.token(), ",") + sense = int(self.token()) + self.assertToken(self.token(), ")") + + def _handle_pred_f(sent_index, word_indices): + return BoxerPred( + self.discourse_id, sent_index, word_indices, variable, name, pos, sense + ) + + return _handle_pred_f + + def _handle_duplex(self): + # duplex(whq, drs(...), var, drs(...)) + self.assertToken(self.token(), "(") + # self.assertToken(self.token(), '[') + ans_types = [] + # while self.token(0) != ']': + # cat = self.token() + # self.assertToken(self.token(), ':') + # if cat == 'des': + # ans_types.append(self.token()) + # elif cat == 'num': + # ans_types.append('number') + # typ = self.token() + # if typ == 'cou': + # ans_types.append('count') + # else: + # ans_types.append(typ) + # else: + # ans_types.append(self.token()) + # self.token() #swallow the ']' + + self.assertToken(self.token(), "whq") + self.assertToken(self.token(), ",") + d1 = self.process_next_expression(None) + self.assertToken(self.token(), ",") + ref = self.parse_variable() + self.assertToken(self.token(), ",") + d2 = self.process_next_expression(None) + self.assertToken(self.token(), ")") + return lambda sent_index, word_indices: BoxerWhq( + self.discourse_id, sent_index, word_indices, ans_types, d1, ref, d2 + ) + + def _handle_named(self): + # named(x0, john, per, 0) + self.assertToken(self.token(), "(") + variable = self.parse_variable() + self.assertToken(self.token(), ",") + name = self.token() + self.assertToken(self.token(), ",") + type = self.token() + self.assertToken(self.token(), ",") + sense = self.token() # as per boxer rev 2554 + self.assertToken(self.token(), ")") + return lambda sent_index, word_indices: BoxerNamed( + self.discourse_id, sent_index, word_indices, variable, name, type, sense + ) + + def _handle_rel(self): + # rel(_G3993, _G3943, agent, 0) + self.assertToken(self.token(), "(") + var1 = self.parse_variable() + self.assertToken(self.token(), ",") + var2 = self.parse_variable() + self.assertToken(self.token(), ",") + rel = self.token() + self.assertToken(self.token(), ",") + sense = int(self.token()) + self.assertToken(self.token(), ")") + return lambda sent_index, word_indices: BoxerRel( + self.discourse_id, sent_index, word_indices, var1, var2, rel, sense + ) + + def _handle_timex(self): + # timex(_G18322, date([]: (+), []:'XXXX', [1004]:'04', []:'XX')) + self.assertToken(self.token(), "(") + arg = self.parse_variable() + self.assertToken(self.token(), ",") + new_conds = self._handle_time_expression(arg) + self.assertToken(self.token(), ")") + return new_conds + + def _handle_time_expression(self, arg): + # date([]: (+), []:'XXXX', [1004]:'04', []:'XX') + tok = self.token() + self.assertToken(self.token(), "(") + if tok == "date": + conds = self._handle_date(arg) + elif tok == "time": + conds = self._handle_time(arg) + else: + return None + self.assertToken(self.token(), ")") + return [ + lambda sent_index, word_indices: BoxerPred( + self.discourse_id, sent_index, word_indices, arg, tok, "n", 0 + ) + ] + [lambda sent_index, word_indices: cond for cond in conds] + + def _handle_date(self, arg): + # []: (+), []:'XXXX', [1004]:'04', []:'XX' + conds = [] + ((sent_index, word_indices),) = self._sent_and_word_indices( + self._parse_index_list() + ) + self.assertToken(self.token(), "(") + pol = self.token() + self.assertToken(self.token(), ")") + conds.append( + BoxerPred( + self.discourse_id, + sent_index, + word_indices, + arg, + f"date_pol_{pol}", + "a", + 0, + ) + ) + self.assertToken(self.token(), ",") + + ((sent_index, word_indices),) = self._sent_and_word_indices( + self._parse_index_list() + ) + year = self.token() + if year != "XXXX": + year = year.replace(":", "_") + conds.append( + BoxerPred( + self.discourse_id, + sent_index, + word_indices, + arg, + f"date_year_{year}", + "a", + 0, + ) + ) + self.assertToken(self.token(), ",") + + ((sent_index, word_indices),) = self._sent_and_word_indices( + self._parse_index_list() + ) + month = self.token() + if month != "XX": + conds.append( + BoxerPred( + self.discourse_id, + sent_index, + word_indices, + arg, + f"date_month_{month}", + "a", + 0, + ) + ) + self.assertToken(self.token(), ",") + + ((sent_index, word_indices),) = self._sent_and_word_indices( + self._parse_index_list() + ) + day = self.token() + if day != "XX": + conds.append( + BoxerPred( + self.discourse_id, + sent_index, + word_indices, + arg, + f"date_day_{day}", + "a", + 0, + ) + ) + + return conds + + def _handle_time(self, arg): + # time([1018]:'18', []:'XX', []:'XX') + conds = [] + self._parse_index_list() + hour = self.token() + if hour != "XX": + conds.append(self._make_atom("r_hour_2", arg, hour)) + self.assertToken(self.token(), ",") + + self._parse_index_list() + min = self.token() + if min != "XX": + conds.append(self._make_atom("r_min_2", arg, min)) + self.assertToken(self.token(), ",") + + self._parse_index_list() + sec = self.token() + if sec != "XX": + conds.append(self._make_atom("r_sec_2", arg, sec)) + + return conds + + def _handle_card(self): + # card(_G18535, 28, ge) + self.assertToken(self.token(), "(") + variable = self.parse_variable() + self.assertToken(self.token(), ",") + value = self.token() + self.assertToken(self.token(), ",") + type = self.token() + self.assertToken(self.token(), ")") + return lambda sent_index, word_indices: BoxerCard( + self.discourse_id, sent_index, word_indices, variable, value, type + ) + + def _handle_prop(self): + # prop(_G15949, drs(...)) + self.assertToken(self.token(), "(") + variable = self.parse_variable() + self.assertToken(self.token(), ",") + drs = self.process_next_expression(None) + self.assertToken(self.token(), ")") + return lambda sent_index, word_indices: BoxerProp( + self.discourse_id, sent_index, word_indices, variable, drs + ) + + def _parse_index_list(self): + # [1001,1002]: + indices = [] + self.assertToken(self.token(), "[") + while self.token(0) != "]": + indices.append(self.parse_index()) + if self.token(0) == ",": + self.token() # swallow ',' + self.token() # swallow ']' + self.assertToken(self.token(), ":") + return indices + + def parse_drs(self): + # drs([[1001]:_G3943], + # [[1002]:pred(_G3943, dog, n, 0)] + # ) + self.assertToken(self.token(), "(") + self.assertToken(self.token(), "[") + refs = set() + while self.token(0) != "]": + indices = self._parse_index_list() + refs.add(self.parse_variable()) + if self.token(0) == ",": + self.token() # swallow ',' + self.token() # swallow ']' + self.assertToken(self.token(), ",") + self.assertToken(self.token(), "[") + conds = [] + while self.token(0) != "]": + indices = self._parse_index_list() + conds.extend(self.parse_condition(indices)) + if self.token(0) == ",": + self.token() # swallow ',' + self.token() # swallow ']' + self.assertToken(self.token(), ")") + return BoxerDrs(list(refs), conds) + + def _handle_binary_expression(self, make_callback): + self.assertToken(self.token(), "(") + drs1 = self.process_next_expression(None) + self.assertToken(self.token(), ",") + drs2 = self.process_next_expression(None) + self.assertToken(self.token(), ")") + return lambda sent_index, word_indices: make_callback( + sent_index, word_indices, drs1, drs2 + ) + + def _handle_alfa(self, make_callback): + self.assertToken(self.token(), "(") + type = self.token() + self.assertToken(self.token(), ",") + drs1 = self.process_next_expression(None) + self.assertToken(self.token(), ",") + drs2 = self.process_next_expression(None) + self.assertToken(self.token(), ")") + return lambda sent_index, word_indices: make_callback( + sent_index, word_indices, drs1, drs2 + ) + + def _handle_eq(self): + self.assertToken(self.token(), "(") + var1 = self.parse_variable() + self.assertToken(self.token(), ",") + var2 = self.parse_variable() + self.assertToken(self.token(), ")") + return lambda sent_index, word_indices: BoxerEq( + self.discourse_id, sent_index, word_indices, var1, var2 + ) + + def _handle_whq(self): + self.assertToken(self.token(), "(") + self.assertToken(self.token(), "[") + ans_types = [] + while self.token(0) != "]": + cat = self.token() + self.assertToken(self.token(), ":") + if cat == "des": + ans_types.append(self.token()) + elif cat == "num": + ans_types.append("number") + typ = self.token() + if typ == "cou": + ans_types.append("count") + else: + ans_types.append(typ) + else: + ans_types.append(self.token()) + self.token() # swallow the ']' + + self.assertToken(self.token(), ",") + d1 = self.process_next_expression(None) + self.assertToken(self.token(), ",") + ref = self.parse_variable() + self.assertToken(self.token(), ",") + d2 = self.process_next_expression(None) + self.assertToken(self.token(), ")") + return lambda sent_index, word_indices: BoxerWhq( + self.discourse_id, sent_index, word_indices, ans_types, d1, ref, d2 + ) + + def _make_merge_expression(self, sent_index, word_indices, drs1, drs2): + return BoxerDrs(drs1.refs + drs2.refs, drs1.conds + drs2.conds) + + def _make_or_expression(self, sent_index, word_indices, drs1, drs2): + return BoxerOr(self.discourse_id, sent_index, word_indices, drs1, drs2) + + def _make_imp_expression(self, sent_index, word_indices, drs1, drs2): + return BoxerDrs(drs1.refs, drs1.conds, drs2) + + def parse_variable(self): + var = self.token() + assert re.match(r"^[exps]\d+$", var), var + return var + + def parse_index(self): + return int(self.token()) + + def _sent_and_word_indices(self, indices): + """ + :return: list of (sent_index, word_indices) tuples + """ + sent_indices = {(i / 1000) - 1 for i in indices if i >= 0} + if sent_indices: + pairs = [] + for sent_index in sent_indices: + word_indices = [ + (i % 1000) - 1 for i in indices if sent_index == (i / 1000) - 1 + ] + pairs.append((sent_index, word_indices)) + return pairs + else: + word_indices = [(i % 1000) - 1 for i in indices] + return [(None, word_indices)] + + +class BoxerDrsParser(DrtParser): + """ + Reparse the str form of subclasses of ``AbstractBoxerDrs`` + """ + + def __init__(self, discourse_id=None): + DrtParser.__init__(self) + self.discourse_id = discourse_id + + def get_all_symbols(self): + return [ + DrtTokens.OPEN, + DrtTokens.CLOSE, + DrtTokens.COMMA, + DrtTokens.OPEN_BRACKET, + DrtTokens.CLOSE_BRACKET, + ] + + def attempt_adjuncts(self, expression, context): + return expression + + def handle(self, tok, context): + try: + # if tok == 'drs': + # self.assertNextToken(DrtTokens.OPEN) + # label = int(self.token()) + # self.assertNextToken(DrtTokens.COMMA) + # refs = list(map(int, self.handle_refs())) + # self.assertNextToken(DrtTokens.COMMA) + # conds = self.handle_conds(None) + # self.assertNextToken(DrtTokens.CLOSE) + # return BoxerDrs(label, refs, conds) + if tok == "pred": + self.assertNextToken(DrtTokens.OPEN) + disc_id = ( + self.discourse_id if self.discourse_id is not None else self.token() + ) + self.assertNextToken(DrtTokens.COMMA) + sent_id = self.nullableIntToken() + self.assertNextToken(DrtTokens.COMMA) + word_ids = list(map(int, self.handle_refs())) + self.assertNextToken(DrtTokens.COMMA) + variable = int(self.token()) + self.assertNextToken(DrtTokens.COMMA) + name = self.token() + self.assertNextToken(DrtTokens.COMMA) + pos = self.token() + self.assertNextToken(DrtTokens.COMMA) + sense = int(self.token()) + self.assertNextToken(DrtTokens.CLOSE) + return BoxerPred(disc_id, sent_id, word_ids, variable, name, pos, sense) + elif tok == "named": + self.assertNextToken(DrtTokens.OPEN) + disc_id = ( + self.discourse_id if self.discourse_id is not None else self.token() + ) + self.assertNextToken(DrtTokens.COMMA) + sent_id = int(self.token()) + self.assertNextToken(DrtTokens.COMMA) + word_ids = map(int, self.handle_refs()) + self.assertNextToken(DrtTokens.COMMA) + variable = int(self.token()) + self.assertNextToken(DrtTokens.COMMA) + name = self.token() + self.assertNextToken(DrtTokens.COMMA) + type = self.token() + self.assertNextToken(DrtTokens.COMMA) + sense = int(self.token()) + self.assertNextToken(DrtTokens.CLOSE) + return BoxerNamed( + disc_id, sent_id, word_ids, variable, name, type, sense + ) + elif tok == "rel": + self.assertNextToken(DrtTokens.OPEN) + disc_id = ( + self.discourse_id if self.discourse_id is not None else self.token() + ) + self.assertNextToken(DrtTokens.COMMA) + sent_id = self.nullableIntToken() + self.assertNextToken(DrtTokens.COMMA) + word_ids = list(map(int, self.handle_refs())) + self.assertNextToken(DrtTokens.COMMA) + var1 = int(self.token()) + self.assertNextToken(DrtTokens.COMMA) + var2 = int(self.token()) + self.assertNextToken(DrtTokens.COMMA) + rel = self.token() + self.assertNextToken(DrtTokens.COMMA) + sense = int(self.token()) + self.assertNextToken(DrtTokens.CLOSE) + return BoxerRel(disc_id, sent_id, word_ids, var1, var2, rel, sense) + elif tok == "prop": + self.assertNextToken(DrtTokens.OPEN) + disc_id = ( + self.discourse_id if self.discourse_id is not None else self.token() + ) + self.assertNextToken(DrtTokens.COMMA) + sent_id = int(self.token()) + self.assertNextToken(DrtTokens.COMMA) + word_ids = list(map(int, self.handle_refs())) + self.assertNextToken(DrtTokens.COMMA) + variable = int(self.token()) + self.assertNextToken(DrtTokens.COMMA) + drs = self.process_next_expression(None) + self.assertNextToken(DrtTokens.CLOSE) + return BoxerProp(disc_id, sent_id, word_ids, variable, drs) + elif tok == "not": + self.assertNextToken(DrtTokens.OPEN) + drs = self.process_next_expression(None) + self.assertNextToken(DrtTokens.CLOSE) + return BoxerNot(drs) + elif tok == "imp": + self.assertNextToken(DrtTokens.OPEN) + drs1 = self.process_next_expression(None) + self.assertNextToken(DrtTokens.COMMA) + drs2 = self.process_next_expression(None) + self.assertNextToken(DrtTokens.CLOSE) + return BoxerDrs(drs1.refs, drs1.conds, drs2) + elif tok == "or": + self.assertNextToken(DrtTokens.OPEN) + disc_id = ( + self.discourse_id if self.discourse_id is not None else self.token() + ) + self.assertNextToken(DrtTokens.COMMA) + sent_id = self.nullableIntToken() + self.assertNextToken(DrtTokens.COMMA) + word_ids = map(int, self.handle_refs()) + self.assertNextToken(DrtTokens.COMMA) + drs1 = self.process_next_expression(None) + self.assertNextToken(DrtTokens.COMMA) + drs2 = self.process_next_expression(None) + self.assertNextToken(DrtTokens.CLOSE) + return BoxerOr(disc_id, sent_id, word_ids, drs1, drs2) + elif tok == "eq": + self.assertNextToken(DrtTokens.OPEN) + disc_id = ( + self.discourse_id if self.discourse_id is not None else self.token() + ) + self.assertNextToken(DrtTokens.COMMA) + sent_id = self.nullableIntToken() + self.assertNextToken(DrtTokens.COMMA) + word_ids = list(map(int, self.handle_refs())) + self.assertNextToken(DrtTokens.COMMA) + var1 = int(self.token()) + self.assertNextToken(DrtTokens.COMMA) + var2 = int(self.token()) + self.assertNextToken(DrtTokens.CLOSE) + return BoxerEq(disc_id, sent_id, word_ids, var1, var2) + elif tok == "card": + self.assertNextToken(DrtTokens.OPEN) + disc_id = ( + self.discourse_id if self.discourse_id is not None else self.token() + ) + self.assertNextToken(DrtTokens.COMMA) + sent_id = self.nullableIntToken() + self.assertNextToken(DrtTokens.COMMA) + word_ids = map(int, self.handle_refs()) + self.assertNextToken(DrtTokens.COMMA) + var = int(self.token()) + self.assertNextToken(DrtTokens.COMMA) + value = self.token() + self.assertNextToken(DrtTokens.COMMA) + type = self.token() + self.assertNextToken(DrtTokens.CLOSE) + return BoxerCard(disc_id, sent_id, word_ids, var, value, type) + elif tok == "whq": + self.assertNextToken(DrtTokens.OPEN) + disc_id = ( + self.discourse_id if self.discourse_id is not None else self.token() + ) + self.assertNextToken(DrtTokens.COMMA) + sent_id = self.nullableIntToken() + self.assertNextToken(DrtTokens.COMMA) + word_ids = list(map(int, self.handle_refs())) + self.assertNextToken(DrtTokens.COMMA) + ans_types = self.handle_refs() + self.assertNextToken(DrtTokens.COMMA) + drs1 = self.process_next_expression(None) + self.assertNextToken(DrtTokens.COMMA) + var = int(self.token()) + self.assertNextToken(DrtTokens.COMMA) + drs2 = self.process_next_expression(None) + self.assertNextToken(DrtTokens.CLOSE) + return BoxerWhq(disc_id, sent_id, word_ids, ans_types, drs1, var, drs2) + except Exception as e: + raise LogicalExpressionException(self._currentIndex, str(e)) from e + assert False, repr(tok) + + def nullableIntToken(self): + t = self.token() + return int(t) if t != "None" else None + + def get_next_token_variable(self, description): + try: + return self.token() + except ExpectedMoreTokensException as e: + raise ExpectedMoreTokensException(e.index, "Variable expected.") from e + + +class AbstractBoxerDrs: + def variables(self): + """ + :return: (set, set, set) + """ + variables, events, propositions = self._variables() + return (variables - (events | propositions), events, propositions - events) + + def variable_types(self): + vartypes = {} + for t, vars in zip(("z", "e", "p"), self.variables()): + for v in vars: + vartypes[v] = t + return vartypes + + def _variables(self): + """ + :return: (set, set, set) + """ + return (set(), set(), set()) + + def atoms(self): + return set() + + def clean(self): + return self + + def _clean_name(self, name): + return name.replace("-", "_").replace("'", "_") + + def renumber_sentences(self, f): + return self + + def __hash__(self): + return hash(f"{self}") + + +class BoxerDrs(AbstractBoxerDrs): + def __init__(self, refs, conds, consequent=None): + AbstractBoxerDrs.__init__(self) + self.refs = refs + self.conds = conds + self.consequent = consequent + + def _variables(self): + variables = (set(), set(), set()) + for cond in self.conds: + for s, v in zip(variables, cond._variables()): + s.update(v) + if self.consequent is not None: + for s, v in zip(variables, self.consequent._variables()): + s.update(v) + return variables + + def atoms(self): + atoms = reduce(operator.or_, (cond.atoms() for cond in self.conds), set()) + if self.consequent is not None: + atoms.update(self.consequent.atoms()) + return atoms + + def clean(self): + consequent = self.consequent.clean() if self.consequent else None + return BoxerDrs(self.refs, [c.clean() for c in self.conds], consequent) + + def renumber_sentences(self, f): + consequent = self.consequent.renumber_sentences(f) if self.consequent else None + return BoxerDrs( + self.refs, [c.renumber_sentences(f) for c in self.conds], consequent + ) + + def __repr__(self): + s = "drs([{}], [{}])".format( + ", ".join("%s" % r for r in self.refs), + ", ".join("%s" % c for c in self.conds), + ) + if self.consequent is not None: + s = f"imp({s}, {self.consequent})" + return s + + def __eq__(self, other): + return ( + self.__class__ == other.__class__ + and self.refs == other.refs + and len(self.conds) == len(other.conds) + and reduce( + operator.and_, (c1 == c2 for c1, c2 in zip(self.conds, other.conds)) + ) + and self.consequent == other.consequent + ) + + def __ne__(self, other): + return not self == other + + __hash__ = AbstractBoxerDrs.__hash__ + + +class BoxerNot(AbstractBoxerDrs): + def __init__(self, drs): + AbstractBoxerDrs.__init__(self) + self.drs = drs + + def _variables(self): + return self.drs._variables() + + def atoms(self): + return self.drs.atoms() + + def clean(self): + return BoxerNot(self.drs.clean()) + + def renumber_sentences(self, f): + return BoxerNot(self.drs.renumber_sentences(f)) + + def __repr__(self): + return "not(%s)" % (self.drs) + + def __eq__(self, other): + return self.__class__ == other.__class__ and self.drs == other.drs + + def __ne__(self, other): + return not self == other + + __hash__ = AbstractBoxerDrs.__hash__ + + +class BoxerIndexed(AbstractBoxerDrs): + def __init__(self, discourse_id, sent_index, word_indices): + AbstractBoxerDrs.__init__(self) + self.discourse_id = discourse_id + self.sent_index = sent_index + self.word_indices = word_indices + + def atoms(self): + return {self} + + def __eq__(self, other): + return ( + self.__class__ == other.__class__ + and self.discourse_id == other.discourse_id + and self.sent_index == other.sent_index + and self.word_indices == other.word_indices + and reduce(operator.and_, (s == o for s, o in zip(self, other))) + ) + + def __ne__(self, other): + return not self == other + + __hash__ = AbstractBoxerDrs.__hash__ + + def __repr__(self): + s = "{}({}, {}, [{}]".format( + self._pred(), + self.discourse_id, + self.sent_index, + ", ".join("%s" % wi for wi in self.word_indices), + ) + for v in self: + s += ", %s" % v + return s + ")" + + +class BoxerPred(BoxerIndexed): + def __init__(self, discourse_id, sent_index, word_indices, var, name, pos, sense): + BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices) + self.var = var + self.name = name + self.pos = pos + self.sense = sense + + def _variables(self): + return ({self.var}, set(), set()) + + def change_var(self, var): + return BoxerPred( + self.discourse_id, + self.sent_index, + self.word_indices, + var, + self.name, + self.pos, + self.sense, + ) + + def clean(self): + return BoxerPred( + self.discourse_id, + self.sent_index, + self.word_indices, + self.var, + self._clean_name(self.name), + self.pos, + self.sense, + ) + + def renumber_sentences(self, f): + new_sent_index = f(self.sent_index) + return BoxerPred( + self.discourse_id, + new_sent_index, + self.word_indices, + self.var, + self.name, + self.pos, + self.sense, + ) + + def __iter__(self): + return iter((self.var, self.name, self.pos, self.sense)) + + def _pred(self): + return "pred" + + +class BoxerNamed(BoxerIndexed): + def __init__(self, discourse_id, sent_index, word_indices, var, name, type, sense): + BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices) + self.var = var + self.name = name + self.type = type + self.sense = sense + + def _variables(self): + return ({self.var}, set(), set()) + + def change_var(self, var): + return BoxerNamed( + self.discourse_id, + self.sent_index, + self.word_indices, + var, + self.name, + self.type, + self.sense, + ) + + def clean(self): + return BoxerNamed( + self.discourse_id, + self.sent_index, + self.word_indices, + self.var, + self._clean_name(self.name), + self.type, + self.sense, + ) + + def renumber_sentences(self, f): + return BoxerNamed( + self.discourse_id, + f(self.sent_index), + self.word_indices, + self.var, + self.name, + self.type, + self.sense, + ) + + def __iter__(self): + return iter((self.var, self.name, self.type, self.sense)) + + def _pred(self): + return "named" + + +class BoxerRel(BoxerIndexed): + def __init__(self, discourse_id, sent_index, word_indices, var1, var2, rel, sense): + BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices) + self.var1 = var1 + self.var2 = var2 + self.rel = rel + self.sense = sense + + def _variables(self): + return ({self.var1, self.var2}, set(), set()) + + def clean(self): + return BoxerRel( + self.discourse_id, + self.sent_index, + self.word_indices, + self.var1, + self.var2, + self._clean_name(self.rel), + self.sense, + ) + + def renumber_sentences(self, f): + return BoxerRel( + self.discourse_id, + f(self.sent_index), + self.word_indices, + self.var1, + self.var2, + self.rel, + self.sense, + ) + + def __iter__(self): + return iter((self.var1, self.var2, self.rel, self.sense)) + + def _pred(self): + return "rel" + + +class BoxerProp(BoxerIndexed): + def __init__(self, discourse_id, sent_index, word_indices, var, drs): + BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices) + self.var = var + self.drs = drs + + def _variables(self): + return tuple( + map(operator.or_, (set(), set(), {self.var}), self.drs._variables()) + ) + + def referenced_labels(self): + return {self.drs} + + def atoms(self): + return self.drs.atoms() + + def clean(self): + return BoxerProp( + self.discourse_id, + self.sent_index, + self.word_indices, + self.var, + self.drs.clean(), + ) + + def renumber_sentences(self, f): + return BoxerProp( + self.discourse_id, + f(self.sent_index), + self.word_indices, + self.var, + self.drs.renumber_sentences(f), + ) + + def __iter__(self): + return iter((self.var, self.drs)) + + def _pred(self): + return "prop" + + +class BoxerEq(BoxerIndexed): + def __init__(self, discourse_id, sent_index, word_indices, var1, var2): + BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices) + self.var1 = var1 + self.var2 = var2 + + def _variables(self): + return ({self.var1, self.var2}, set(), set()) + + def atoms(self): + return set() + + def renumber_sentences(self, f): + return BoxerEq( + self.discourse_id, + f(self.sent_index), + self.word_indices, + self.var1, + self.var2, + ) + + def __iter__(self): + return iter((self.var1, self.var2)) + + def _pred(self): + return "eq" + + +class BoxerCard(BoxerIndexed): + def __init__(self, discourse_id, sent_index, word_indices, var, value, type): + BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices) + self.var = var + self.value = value + self.type = type + + def _variables(self): + return ({self.var}, set(), set()) + + def renumber_sentences(self, f): + return BoxerCard( + self.discourse_id, + f(self.sent_index), + self.word_indices, + self.var, + self.value, + self.type, + ) + + def __iter__(self): + return iter((self.var, self.value, self.type)) + + def _pred(self): + return "card" + + +class BoxerOr(BoxerIndexed): + def __init__(self, discourse_id, sent_index, word_indices, drs1, drs2): + BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices) + self.drs1 = drs1 + self.drs2 = drs2 + + def _variables(self): + return tuple(map(operator.or_, self.drs1._variables(), self.drs2._variables())) + + def atoms(self): + return self.drs1.atoms() | self.drs2.atoms() + + def clean(self): + return BoxerOr( + self.discourse_id, + self.sent_index, + self.word_indices, + self.drs1.clean(), + self.drs2.clean(), + ) + + def renumber_sentences(self, f): + return BoxerOr( + self.discourse_id, + f(self.sent_index), + self.word_indices, + self.drs1, + self.drs2, + ) + + def __iter__(self): + return iter((self.drs1, self.drs2)) + + def _pred(self): + return "or" + + +class BoxerWhq(BoxerIndexed): + def __init__( + self, discourse_id, sent_index, word_indices, ans_types, drs1, variable, drs2 + ): + BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices) + self.ans_types = ans_types + self.drs1 = drs1 + self.variable = variable + self.drs2 = drs2 + + def _variables(self): + return tuple( + map( + operator.or_, + ({self.variable}, set(), set()), + self.drs1._variables(), + self.drs2._variables(), + ) + ) + + def atoms(self): + return self.drs1.atoms() | self.drs2.atoms() + + def clean(self): + return BoxerWhq( + self.discourse_id, + self.sent_index, + self.word_indices, + self.ans_types, + self.drs1.clean(), + self.variable, + self.drs2.clean(), + ) + + def renumber_sentences(self, f): + return BoxerWhq( + self.discourse_id, + f(self.sent_index), + self.word_indices, + self.ans_types, + self.drs1, + self.variable, + self.drs2, + ) + + def __iter__(self): + return iter( + ("[" + ",".join(self.ans_types) + "]", self.drs1, self.variable, self.drs2) + ) + + def _pred(self): + return "whq" + + +class PassthroughBoxerDrsInterpreter: + def interpret(self, ex): + return ex + + +class NltkDrtBoxerDrsInterpreter: + def __init__(self, occur_index=False): + self._occur_index = occur_index + + def interpret(self, ex): + """ + :param ex: ``AbstractBoxerDrs`` + :return: ``DrtExpression`` + """ + if isinstance(ex, BoxerDrs): + drs = DRS( + [Variable(r) for r in ex.refs], list(map(self.interpret, ex.conds)) + ) + if ex.consequent is not None: + drs.consequent = self.interpret(ex.consequent) + return drs + elif isinstance(ex, BoxerNot): + return DrtNegatedExpression(self.interpret(ex.drs)) + elif isinstance(ex, BoxerPred): + pred = self._add_occur_indexing(f"{ex.pos}_{ex.name}", ex) + return self._make_atom(pred, ex.var) + elif isinstance(ex, BoxerNamed): + pred = self._add_occur_indexing(f"ne_{ex.type}_{ex.name}", ex) + return self._make_atom(pred, ex.var) + elif isinstance(ex, BoxerRel): + pred = self._add_occur_indexing("%s" % (ex.rel), ex) + return self._make_atom(pred, ex.var1, ex.var2) + elif isinstance(ex, BoxerProp): + return DrtProposition(Variable(ex.var), self.interpret(ex.drs)) + elif isinstance(ex, BoxerEq): + return DrtEqualityExpression( + DrtVariableExpression(Variable(ex.var1)), + DrtVariableExpression(Variable(ex.var2)), + ) + elif isinstance(ex, BoxerCard): + pred = self._add_occur_indexing(f"card_{ex.type}_{ex.value}", ex) + return self._make_atom(pred, ex.var) + elif isinstance(ex, BoxerOr): + return DrtOrExpression(self.interpret(ex.drs1), self.interpret(ex.drs2)) + elif isinstance(ex, BoxerWhq): + drs1 = self.interpret(ex.drs1) + drs2 = self.interpret(ex.drs2) + return DRS(drs1.refs + drs2.refs, drs1.conds + drs2.conds) + assert False, f"{ex.__class__.__name__}: {ex}" + + def _make_atom(self, pred, *args): + accum = DrtVariableExpression(Variable(pred)) + for arg in args: + accum = DrtApplicationExpression( + accum, DrtVariableExpression(Variable(arg)) + ) + return accum + + def _add_occur_indexing(self, base, ex): + if self._occur_index and ex.sent_index is not None: + if ex.discourse_id: + base += "_%s" % ex.discourse_id + base += "_s%s" % ex.sent_index + base += "_w%s" % sorted(ex.word_indices)[0] + return base + + +class UnparseableInputException(Exception): + pass + + +if __name__ == "__main__": + opts = OptionParser("usage: %prog TEXT [options]") + opts.add_option( + "--verbose", + "-v", + help="display verbose logs", + action="store_true", + default=False, + dest="verbose", + ) + opts.add_option( + "--fol", "-f", help="output FOL", action="store_true", default=False, dest="fol" + ) + opts.add_option( + "--question", + "-q", + help="input is a question", + action="store_true", + default=False, + dest="question", + ) + opts.add_option( + "--occur", + "-o", + help="occurrence index", + action="store_true", + default=False, + dest="occur_index", + ) + (options, args) = opts.parse_args() + + if len(args) != 1: + opts.error("incorrect number of arguments") + + interpreter = NltkDrtBoxerDrsInterpreter(occur_index=options.occur_index) + drs = Boxer(interpreter).interpret_multi( + args[0].split(r"\n"), question=options.question, verbose=options.verbose + ) + if drs is None: + print(None) + else: + drs = drs.simplify().eliminate_equality() + if options.fol: + print(drs.fol().normalize()) + else: + drs.pretty_print() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/sem/chat80.py b/llmeval-env/lib/python3.10/site-packages/nltk/sem/chat80.py new file mode 100644 index 0000000000000000000000000000000000000000..3d1e77a49f19b2e4414f66741570cdb033ec7ca6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/sem/chat80.py @@ -0,0 +1,857 @@ +# Natural Language Toolkit: Chat-80 KB Reader +# See https://www.w3.org/TR/swbp-skos-core-guide/ +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ewan Klein , +# URL: +# For license information, see LICENSE.TXT + +r""" +Overview +======== + +Chat-80 was a natural language system which allowed the user to +interrogate a Prolog knowledge base in the domain of world +geography. It was developed in the early '80s by Warren and Pereira; see +``https://www.aclweb.org/anthology/J82-3002.pdf`` for a description and +``http://www.cis.upenn.edu/~pereira/oldies.html`` for the source +files. + +This module contains functions to extract data from the Chat-80 +relation files ('the world database'), and convert then into a format +that can be incorporated in the FOL models of +``nltk.sem.evaluate``. The code assumes that the Prolog +input files are available in the NLTK corpora directory. + +The Chat-80 World Database consists of the following files:: + + world0.pl + rivers.pl + cities.pl + countries.pl + contain.pl + borders.pl + +This module uses a slightly modified version of ``world0.pl``, in which +a set of Prolog rules have been omitted. The modified file is named +``world1.pl``. Currently, the file ``rivers.pl`` is not read in, since +it uses a list rather than a string in the second field. + +Reading Chat-80 Files +===================== + +Chat-80 relations are like tables in a relational database. The +relation acts as the name of the table; the first argument acts as the +'primary key'; and subsequent arguments are further fields in the +table. In general, the name of the table provides a label for a unary +predicate whose extension is all the primary keys. For example, +relations in ``cities.pl`` are of the following form:: + + 'city(athens,greece,1368).' + +Here, ``'athens'`` is the key, and will be mapped to a member of the +unary predicate *city*. + +The fields in the table are mapped to binary predicates. The first +argument of the predicate is the primary key, while the second +argument is the data in the relevant field. Thus, in the above +example, the third field is mapped to the binary predicate +*population_of*, whose extension is a set of pairs such as +``'(athens, 1368)'``. + +An exception to this general framework is required by the relations in +the files ``borders.pl`` and ``contains.pl``. These contain facts of the +following form:: + + 'borders(albania,greece).' + + 'contains0(africa,central_africa).' + +We do not want to form a unary concept out the element in +the first field of these records, and we want the label of the binary +relation just to be ``'border'``/``'contain'`` respectively. + +In order to drive the extraction process, we use 'relation metadata bundles' +which are Python dictionaries such as the following:: + + city = {'label': 'city', + 'closures': [], + 'schema': ['city', 'country', 'population'], + 'filename': 'cities.pl'} + +According to this, the file ``city['filename']`` contains a list of +relational tuples (or more accurately, the corresponding strings in +Prolog form) whose predicate symbol is ``city['label']`` and whose +relational schema is ``city['schema']``. The notion of a ``closure`` is +discussed in the next section. + +Concepts +======== +In order to encapsulate the results of the extraction, a class of +``Concept`` objects is introduced. A ``Concept`` object has a number of +attributes, in particular a ``prefLabel`` and ``extension``, which make +it easier to inspect the output of the extraction. In addition, the +``extension`` can be further processed: in the case of the ``'border'`` +relation, we check that the relation is symmetric, and in the case +of the ``'contain'`` relation, we carry out the transitive +closure. The closure properties associated with a concept is +indicated in the relation metadata, as indicated earlier. + +The ``extension`` of a ``Concept`` object is then incorporated into a +``Valuation`` object. + +Persistence +=========== +The functions ``val_dump`` and ``val_load`` are provided to allow a +valuation to be stored in a persistent database and re-loaded, rather +than having to be re-computed each time. + +Individuals and Lexical Items +============================= +As well as deriving relations from the Chat-80 data, we also create a +set of individual constants, one for each entity in the domain. The +individual constants are string-identical to the entities. For +example, given a data item such as ``'zloty'``, we add to the valuation +a pair ``('zloty', 'zloty')``. In order to parse English sentences that +refer to these entities, we also create a lexical item such as the +following for each individual constant:: + + PropN[num=sg, sem=<\P.(P zloty)>] -> 'Zloty' + +The set of rules is written to the file ``chat_pnames.cfg`` in the +current directory. + +""" + +import os +import re +import shelve +import sys + +import nltk.data + +########################################################################### +# Chat-80 relation metadata bundles needed to build the valuation +########################################################################### + +borders = { + "rel_name": "borders", + "closures": ["symmetric"], + "schema": ["region", "border"], + "filename": "borders.pl", +} + +contains = { + "rel_name": "contains0", + "closures": ["transitive"], + "schema": ["region", "contain"], + "filename": "contain.pl", +} + +city = { + "rel_name": "city", + "closures": [], + "schema": ["city", "country", "population"], + "filename": "cities.pl", +} + +country = { + "rel_name": "country", + "closures": [], + "schema": [ + "country", + "region", + "latitude", + "longitude", + "area", + "population", + "capital", + "currency", + ], + "filename": "countries.pl", +} + +circle_of_lat = { + "rel_name": "circle_of_latitude", + "closures": [], + "schema": ["circle_of_latitude", "degrees"], + "filename": "world1.pl", +} + +circle_of_long = { + "rel_name": "circle_of_longitude", + "closures": [], + "schema": ["circle_of_longitude", "degrees"], + "filename": "world1.pl", +} + +continent = { + "rel_name": "continent", + "closures": [], + "schema": ["continent"], + "filename": "world1.pl", +} + +region = { + "rel_name": "in_continent", + "closures": [], + "schema": ["region", "continent"], + "filename": "world1.pl", +} + +ocean = { + "rel_name": "ocean", + "closures": [], + "schema": ["ocean"], + "filename": "world1.pl", +} + +sea = {"rel_name": "sea", "closures": [], "schema": ["sea"], "filename": "world1.pl"} + + +items = [ + "borders", + "contains", + "city", + "country", + "circle_of_lat", + "circle_of_long", + "continent", + "region", + "ocean", + "sea", +] +items = tuple(sorted(items)) + +item_metadata = { + "borders": borders, + "contains": contains, + "city": city, + "country": country, + "circle_of_lat": circle_of_lat, + "circle_of_long": circle_of_long, + "continent": continent, + "region": region, + "ocean": ocean, + "sea": sea, +} + +rels = item_metadata.values() + +not_unary = ["borders.pl", "contain.pl"] + +########################################################################### + + +class Concept: + """ + A Concept class, loosely based on SKOS + (https://www.w3.org/TR/swbp-skos-core-guide/). + """ + + def __init__(self, prefLabel, arity, altLabels=[], closures=[], extension=set()): + """ + :param prefLabel: the preferred label for the concept + :type prefLabel: str + :param arity: the arity of the concept + :type arity: int + :param altLabels: other (related) labels + :type altLabels: list + :param closures: closure properties of the extension + (list items can be ``symmetric``, ``reflexive``, ``transitive``) + :type closures: list + :param extension: the extensional value of the concept + :type extension: set + """ + self.prefLabel = prefLabel + self.arity = arity + self.altLabels = altLabels + self.closures = closures + # keep _extension internally as a set + self._extension = extension + # public access is via a list (for slicing) + self.extension = sorted(list(extension)) + + def __str__(self): + # _extension = '' + # for element in sorted(self.extension): + # if isinstance(element, tuple): + # element = '(%s, %s)' % (element) + # _extension += element + ', ' + # _extension = _extension[:-1] + + return "Label = '{}'\nArity = {}\nExtension = {}".format( + self.prefLabel, + self.arity, + self.extension, + ) + + def __repr__(self): + return "Concept('%s')" % self.prefLabel + + def augment(self, data): + """ + Add more data to the ``Concept``'s extension set. + + :param data: a new semantic value + :type data: string or pair of strings + :rtype: set + + """ + self._extension.add(data) + self.extension = sorted(list(self._extension)) + return self._extension + + def _make_graph(self, s): + """ + Convert a set of pairs into an adjacency linked list encoding of a graph. + """ + g = {} + for (x, y) in s: + if x in g: + g[x].append(y) + else: + g[x] = [y] + return g + + def _transclose(self, g): + """ + Compute the transitive closure of a graph represented as a linked list. + """ + for x in g: + for adjacent in g[x]: + # check that adjacent is a key + if adjacent in g: + for y in g[adjacent]: + if y not in g[x]: + g[x].append(y) + return g + + def _make_pairs(self, g): + """ + Convert an adjacency linked list back into a set of pairs. + """ + pairs = [] + for node in g: + for adjacent in g[node]: + pairs.append((node, adjacent)) + return set(pairs) + + def close(self): + """ + Close a binary relation in the ``Concept``'s extension set. + + :return: a new extension for the ``Concept`` in which the + relation is closed under a given property + """ + from nltk.sem import is_rel + + assert is_rel(self._extension) + if "symmetric" in self.closures: + pairs = [] + for (x, y) in self._extension: + pairs.append((y, x)) + sym = set(pairs) + self._extension = self._extension.union(sym) + if "transitive" in self.closures: + all = self._make_graph(self._extension) + closed = self._transclose(all) + trans = self._make_pairs(closed) + self._extension = self._extension.union(trans) + self.extension = sorted(list(self._extension)) + + +def clause2concepts(filename, rel_name, schema, closures=[]): + """ + Convert a file of Prolog clauses into a list of ``Concept`` objects. + + :param filename: filename containing the relations + :type filename: str + :param rel_name: name of the relation + :type rel_name: str + :param schema: the schema used in a set of relational tuples + :type schema: list + :param closures: closure properties for the extension of the concept + :type closures: list + :return: a list of ``Concept`` objects + :rtype: list + """ + concepts = [] + # position of the subject of a binary relation + subj = 0 + # label of the 'primary key' + pkey = schema[0] + # fields other than the primary key + fields = schema[1:] + + # convert a file into a list of lists + records = _str2records(filename, rel_name) + + # add a unary concept corresponding to the set of entities + # in the primary key position + # relations in 'not_unary' are more like ordinary binary relations + if not filename in not_unary: + concepts.append(unary_concept(pkey, subj, records)) + + # add a binary concept for each non-key field + for field in fields: + obj = schema.index(field) + concepts.append(binary_concept(field, closures, subj, obj, records)) + + return concepts + + +def cities2table(filename, rel_name, dbname, verbose=False, setup=False): + """ + Convert a file of Prolog clauses into a database table. + + This is not generic, since it doesn't allow arbitrary + schemas to be set as a parameter. + + Intended usage:: + + cities2table('cities.pl', 'city', 'city.db', verbose=True, setup=True) + + :param filename: filename containing the relations + :type filename: str + :param rel_name: name of the relation + :type rel_name: str + :param dbname: filename of persistent store + :type schema: str + """ + import sqlite3 + + records = _str2records(filename, rel_name) + connection = sqlite3.connect(dbname) + cur = connection.cursor() + if setup: + cur.execute( + """CREATE TABLE city_table + (City text, Country text, Population int)""" + ) + + table_name = "city_table" + for t in records: + cur.execute("insert into %s values (?,?,?)" % table_name, t) + if verbose: + print("inserting values into %s: " % table_name, t) + connection.commit() + if verbose: + print("Committing update to %s" % dbname) + cur.close() + + +def sql_query(dbname, query): + """ + Execute an SQL query over a database. + :param dbname: filename of persistent store + :type schema: str + :param query: SQL query + :type rel_name: str + """ + import sqlite3 + + try: + path = nltk.data.find(dbname) + connection = sqlite3.connect(str(path)) + cur = connection.cursor() + return cur.execute(query) + except (ValueError, sqlite3.OperationalError): + import warnings + + warnings.warn( + "Make sure the database file %s is installed and uncompressed." % dbname + ) + raise + + +def _str2records(filename, rel): + """ + Read a file into memory and convert each relation clause into a list. + """ + recs = [] + contents = nltk.data.load("corpora/chat80/%s" % filename, format="text") + for line in contents.splitlines(): + if line.startswith(rel): + line = re.sub(rel + r"\(", "", line) + line = re.sub(r"\)\.$", "", line) + record = line.split(",") + recs.append(record) + return recs + + +def unary_concept(label, subj, records): + """ + Make a unary concept out of the primary key in a record. + + A record is a list of entities in some relation, such as + ``['france', 'paris']``, where ``'france'`` is acting as the primary + key. + + :param label: the preferred label for the concept + :type label: string + :param subj: position in the record of the subject of the predicate + :type subj: int + :param records: a list of records + :type records: list of lists + :return: ``Concept`` of arity 1 + :rtype: Concept + """ + c = Concept(label, arity=1, extension=set()) + for record in records: + c.augment(record[subj]) + return c + + +def binary_concept(label, closures, subj, obj, records): + """ + Make a binary concept out of the primary key and another field in a record. + + A record is a list of entities in some relation, such as + ``['france', 'paris']``, where ``'france'`` is acting as the primary + key, and ``'paris'`` stands in the ``'capital_of'`` relation to + ``'france'``. + + More generally, given a record such as ``['a', 'b', 'c']``, where + label is bound to ``'B'``, and ``obj`` bound to 1, the derived + binary concept will have label ``'B_of'``, and its extension will + be a set of pairs such as ``('a', 'b')``. + + + :param label: the base part of the preferred label for the concept + :type label: str + :param closures: closure properties for the extension of the concept + :type closures: list + :param subj: position in the record of the subject of the predicate + :type subj: int + :param obj: position in the record of the object of the predicate + :type obj: int + :param records: a list of records + :type records: list of lists + :return: ``Concept`` of arity 2 + :rtype: Concept + """ + if not label == "border" and not label == "contain": + label = label + "_of" + c = Concept(label, arity=2, closures=closures, extension=set()) + for record in records: + c.augment((record[subj], record[obj])) + # close the concept's extension according to the properties in closures + c.close() + return c + + +def process_bundle(rels): + """ + Given a list of relation metadata bundles, make a corresponding + dictionary of concepts, indexed by the relation name. + + :param rels: bundle of metadata needed for constructing a concept + :type rels: list(dict) + :return: a dictionary of concepts, indexed by the relation name. + :rtype: dict(str): Concept + """ + concepts = {} + for rel in rels: + rel_name = rel["rel_name"] + closures = rel["closures"] + schema = rel["schema"] + filename = rel["filename"] + + concept_list = clause2concepts(filename, rel_name, schema, closures) + for c in concept_list: + label = c.prefLabel + if label in concepts: + for data in c.extension: + concepts[label].augment(data) + concepts[label].close() + else: + concepts[label] = c + return concepts + + +def make_valuation(concepts, read=False, lexicon=False): + """ + Convert a list of ``Concept`` objects into a list of (label, extension) pairs; + optionally create a ``Valuation`` object. + + :param concepts: concepts + :type concepts: list(Concept) + :param read: if ``True``, ``(symbol, set)`` pairs are read into a ``Valuation`` + :type read: bool + :rtype: list or Valuation + """ + vals = [] + + for c in concepts: + vals.append((c.prefLabel, c.extension)) + if lexicon: + read = True + if read: + from nltk.sem import Valuation + + val = Valuation({}) + val.update(vals) + # add labels for individuals + val = label_indivs(val, lexicon=lexicon) + return val + else: + return vals + + +def val_dump(rels, db): + """ + Make a ``Valuation`` from a list of relation metadata bundles and dump to + persistent database. + + :param rels: bundle of metadata needed for constructing a concept + :type rels: list of dict + :param db: name of file to which data is written. + The suffix '.db' will be automatically appended. + :type db: str + """ + concepts = process_bundle(rels).values() + valuation = make_valuation(concepts, read=True) + db_out = shelve.open(db, "n") + + db_out.update(valuation) + + db_out.close() + + +def val_load(db): + """ + Load a ``Valuation`` from a persistent database. + + :param db: name of file from which data is read. + The suffix '.db' should be omitted from the name. + :type db: str + """ + dbname = db + ".db" + + if not os.access(dbname, os.R_OK): + sys.exit("Cannot read file: %s" % dbname) + else: + db_in = shelve.open(db) + from nltk.sem import Valuation + + val = Valuation(db_in) + # val.read(db_in.items()) + return val + + +# def alpha(str): +# """ +# Utility to filter out non-alphabetic constants. + +#:param str: candidate constant +#:type str: string +#:rtype: bool +# """ +# try: +# int(str) +# return False +# except ValueError: +## some unknown values in records are labeled '?' +# if not str == '?': +# return True + + +def label_indivs(valuation, lexicon=False): + """ + Assign individual constants to the individuals in the domain of a ``Valuation``. + + Given a valuation with an entry of the form ``{'rel': {'a': True}}``, + add a new entry ``{'a': 'a'}``. + + :type valuation: Valuation + :rtype: Valuation + """ + # collect all the individuals into a domain + domain = valuation.domain + # convert the domain into a sorted list of alphabetic terms + # use the same string as a label + pairs = [(e, e) for e in domain] + if lexicon: + lex = make_lex(domain) + with open("chat_pnames.cfg", "w") as outfile: + outfile.writelines(lex) + # read the pairs into the valuation + valuation.update(pairs) + return valuation + + +def make_lex(symbols): + """ + Create lexical CFG rules for each individual symbol. + + Given a valuation with an entry of the form ``{'zloty': 'zloty'}``, + create a lexical rule for the proper name 'Zloty'. + + :param symbols: a list of individual constants in the semantic representation + :type symbols: sequence -- set(str) + :rtype: list(str) + """ + lex = [] + header = """ +################################################################## +# Lexical rules automatically generated by running 'chat80.py -x'. +################################################################## + +""" + lex.append(header) + template = r"PropN[num=sg, sem=<\P.(P %s)>] -> '%s'\n" + + for s in symbols: + parts = s.split("_") + caps = [p.capitalize() for p in parts] + pname = "_".join(caps) + rule = template % (s, pname) + lex.append(rule) + return lex + + +########################################################################### +# Interface function to emulate other corpus readers +########################################################################### + + +def concepts(items=items): + """ + Build a list of concepts corresponding to the relation names in ``items``. + + :param items: names of the Chat-80 relations to extract + :type items: list(str) + :return: the ``Concept`` objects which are extracted from the relations + :rtype: list(Concept) + """ + if isinstance(items, str): + items = (items,) + + rels = [item_metadata[r] for r in items] + + concept_map = process_bundle(rels) + return concept_map.values() + + +########################################################################### + + +def main(): + import sys + from optparse import OptionParser + + description = """ +Extract data from the Chat-80 Prolog files and convert them into a +Valuation object for use in the NLTK semantics package. + """ + + opts = OptionParser(description=description) + opts.set_defaults(verbose=True, lex=False, vocab=False) + opts.add_option( + "-s", "--store", dest="outdb", help="store a valuation in DB", metavar="DB" + ) + opts.add_option( + "-l", + "--load", + dest="indb", + help="load a stored valuation from DB", + metavar="DB", + ) + opts.add_option( + "-c", + "--concepts", + action="store_true", + help="print concepts instead of a valuation", + ) + opts.add_option( + "-r", + "--relation", + dest="label", + help="print concept with label REL (check possible labels with '-v' option)", + metavar="REL", + ) + opts.add_option( + "-q", + "--quiet", + action="store_false", + dest="verbose", + help="don't print out progress info", + ) + opts.add_option( + "-x", + "--lex", + action="store_true", + dest="lex", + help="write a file of lexical entries for country names, then exit", + ) + opts.add_option( + "-v", + "--vocab", + action="store_true", + dest="vocab", + help="print out the vocabulary of concept labels and their arity, then exit", + ) + + (options, args) = opts.parse_args() + if options.outdb and options.indb: + opts.error("Options --store and --load are mutually exclusive") + + if options.outdb: + # write the valuation to a persistent database + if options.verbose: + outdb = options.outdb + ".db" + print("Dumping a valuation to %s" % outdb) + val_dump(rels, options.outdb) + sys.exit(0) + else: + # try to read in a valuation from a database + if options.indb is not None: + dbname = options.indb + ".db" + if not os.access(dbname, os.R_OK): + sys.exit("Cannot read file: %s" % dbname) + else: + valuation = val_load(options.indb) + # we need to create the valuation from scratch + else: + # build some concepts + concept_map = process_bundle(rels) + concepts = concept_map.values() + # just print out the vocabulary + if options.vocab: + items = sorted((c.arity, c.prefLabel) for c in concepts) + for (arity, label) in items: + print(label, arity) + sys.exit(0) + # show all the concepts + if options.concepts: + for c in concepts: + print(c) + print() + if options.label: + print(concept_map[options.label]) + sys.exit(0) + else: + # turn the concepts into a Valuation + if options.lex: + if options.verbose: + print("Writing out lexical rules") + make_valuation(concepts, lexicon=True) + else: + valuation = make_valuation(concepts, read=True) + print(valuation) + + +def sql_demo(): + """ + Print out every row from the 'city.db' database. + """ + print() + print("Using SQL to extract rows from 'city.db' RDB.") + for row in sql_query("corpora/city_database/city.db", "SELECT * FROM city_table"): + print(row) + + +if __name__ == "__main__": + main() + sql_demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/sem/cooper_storage.py b/llmeval-env/lib/python3.10/site-packages/nltk/sem/cooper_storage.py new file mode 100644 index 0000000000000000000000000000000000000000..a41502187ed1dfbfae5bc21bdf7c29624cab1e0f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/sem/cooper_storage.py @@ -0,0 +1,124 @@ +# Natural Language Toolkit: Cooper storage for Quantifier Ambiguity +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ewan Klein +# URL: +# For license information, see LICENSE.TXT + +from nltk.parse import load_parser +from nltk.parse.featurechart import InstantiateVarsChart +from nltk.sem.logic import ApplicationExpression, LambdaExpression, Variable + + +class CooperStore: + """ + A container for handling quantifier ambiguity via Cooper storage. + """ + + def __init__(self, featstruct): + """ + :param featstruct: The value of the ``sem`` node in a tree from + ``parse_with_bindops()`` + :type featstruct: FeatStruct (with features ``core`` and ``store``) + + """ + self.featstruct = featstruct + self.readings = [] + try: + self.core = featstruct["CORE"] + self.store = featstruct["STORE"] + except KeyError: + print("%s is not a Cooper storage structure" % featstruct) + + def _permute(self, lst): + """ + :return: An iterator over the permutations of the input list + :type lst: list + :rtype: iter + """ + remove = lambda lst0, index: lst0[:index] + lst0[index + 1 :] + if lst: + for index, x in enumerate(lst): + for y in self._permute(remove(lst, index)): + yield (x,) + y + else: + yield () + + def s_retrieve(self, trace=False): + r""" + Carry out S-Retrieval of binding operators in store. If hack=True, + serialize the bindop and core as strings and reparse. Ugh. + + Each permutation of the store (i.e. list of binding operators) is + taken to be a possible scoping of quantifiers. We iterate through the + binding operators in each permutation, and successively apply them to + the current term, starting with the core semantic representation, + working from the inside out. + + Binding operators are of the form:: + + bo(\P.all x.(man(x) -> P(x)),z1) + """ + for perm, store_perm in enumerate(self._permute(self.store)): + if trace: + print("Permutation %s" % (perm + 1)) + term = self.core + for bindop in store_perm: + # we just want the arguments that are wrapped by the 'bo' predicate + quant, varex = tuple(bindop.args) + # use var to make an abstraction over the current term and then + # apply the quantifier to it + term = ApplicationExpression( + quant, LambdaExpression(varex.variable, term) + ) + if trace: + print(" ", term) + term = term.simplify() + self.readings.append(term) + + +def parse_with_bindops(sentence, grammar=None, trace=0): + """ + Use a grammar with Binding Operators to parse a sentence. + """ + if not grammar: + grammar = "grammars/book_grammars/storage.fcfg" + parser = load_parser(grammar, trace=trace, chart_class=InstantiateVarsChart) + # Parse the sentence. + tokens = sentence.split() + return list(parser.parse(tokens)) + + +def demo(): + from nltk.sem import cooper_storage as cs + + sentence = "every girl chases a dog" + # sentence = "a man gives a bone to every dog" + print() + print("Analysis of sentence '%s'" % sentence) + print("=" * 50) + trees = cs.parse_with_bindops(sentence, trace=0) + for tree in trees: + semrep = cs.CooperStore(tree.label()["SEM"]) + print() + print("Binding operators:") + print("-" * 15) + for s in semrep.store: + print(s) + print() + print("Core:") + print("-" * 15) + print(semrep.core) + print() + print("S-Retrieval:") + print("-" * 15) + semrep.s_retrieve(trace=True) + print("Readings:") + print("-" * 15) + + for i, reading in enumerate(semrep.readings): + print(f"{i + 1}: {reading}") + + +if __name__ == "__main__": + demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/sem/drt.py b/llmeval-env/lib/python3.10/site-packages/nltk/sem/drt.py new file mode 100644 index 0000000000000000000000000000000000000000..53441d6617310683bab97bb7abd84f656ebc28af --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/sem/drt.py @@ -0,0 +1,1460 @@ +# Natural Language Toolkit: Discourse Representation Theory (DRT) +# +# Author: Dan Garrette +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +import operator +from functools import reduce +from itertools import chain + +from nltk.sem.logic import ( + APP, + AbstractVariableExpression, + AllExpression, + AndExpression, + ApplicationExpression, + BinaryExpression, + BooleanExpression, + ConstantExpression, + EqualityExpression, + EventVariableExpression, + ExistsExpression, + Expression, + FunctionVariableExpression, + ImpExpression, + IndividualVariableExpression, + LambdaExpression, + LogicParser, + NegatedExpression, + OrExpression, + Tokens, + Variable, + is_eventvar, + is_funcvar, + is_indvar, + unique_variable, +) + +# Import Tkinter-based modules if they are available +try: + from tkinter import Canvas, Tk + from tkinter.font import Font + + from nltk.util import in_idle + +except ImportError: + # No need to print a warning here, nltk.draw has already printed one. + pass + + +class DrtTokens(Tokens): + DRS = "DRS" + DRS_CONC = "+" + PRONOUN = "PRO" + OPEN_BRACKET = "[" + CLOSE_BRACKET = "]" + COLON = ":" + + PUNCT = [DRS_CONC, OPEN_BRACKET, CLOSE_BRACKET, COLON] + + SYMBOLS = Tokens.SYMBOLS + PUNCT + + TOKENS = Tokens.TOKENS + [DRS] + PUNCT + + +class DrtParser(LogicParser): + """A lambda calculus expression parser.""" + + def __init__(self): + LogicParser.__init__(self) + + self.operator_precedence = dict( + [(x, 1) for x in DrtTokens.LAMBDA_LIST] + + [(x, 2) for x in DrtTokens.NOT_LIST] + + [(APP, 3)] + + [(x, 4) for x in DrtTokens.EQ_LIST + Tokens.NEQ_LIST] + + [(DrtTokens.COLON, 5)] + + [(DrtTokens.DRS_CONC, 6)] + + [(x, 7) for x in DrtTokens.OR_LIST] + + [(x, 8) for x in DrtTokens.IMP_LIST] + + [(None, 9)] + ) + + def get_all_symbols(self): + """This method exists to be overridden""" + return DrtTokens.SYMBOLS + + def isvariable(self, tok): + return tok not in DrtTokens.TOKENS + + def handle(self, tok, context): + """This method is intended to be overridden for logics that + use different operators or expressions""" + if tok in DrtTokens.NOT_LIST: + return self.handle_negation(tok, context) + + elif tok in DrtTokens.LAMBDA_LIST: + return self.handle_lambda(tok, context) + + elif tok == DrtTokens.OPEN: + if self.inRange(0) and self.token(0) == DrtTokens.OPEN_BRACKET: + return self.handle_DRS(tok, context) + else: + return self.handle_open(tok, context) + + elif tok.upper() == DrtTokens.DRS: + self.assertNextToken(DrtTokens.OPEN) + return self.handle_DRS(tok, context) + + elif self.isvariable(tok): + if self.inRange(0) and self.token(0) == DrtTokens.COLON: + return self.handle_prop(tok, context) + else: + return self.handle_variable(tok, context) + + def make_NegatedExpression(self, expression): + return DrtNegatedExpression(expression) + + def handle_DRS(self, tok, context): + # a DRS + refs = self.handle_refs() + if ( + self.inRange(0) and self.token(0) == DrtTokens.COMMA + ): # if there is a comma (it's optional) + self.token() # swallow the comma + conds = self.handle_conds(context) + self.assertNextToken(DrtTokens.CLOSE) + return DRS(refs, conds, None) + + def handle_refs(self): + self.assertNextToken(DrtTokens.OPEN_BRACKET) + refs = [] + while self.inRange(0) and self.token(0) != DrtTokens.CLOSE_BRACKET: + # Support expressions like: DRS([x y],C) == DRS([x,y],C) + if refs and self.token(0) == DrtTokens.COMMA: + self.token() # swallow the comma + refs.append(self.get_next_token_variable("quantified")) + self.assertNextToken(DrtTokens.CLOSE_BRACKET) + return refs + + def handle_conds(self, context): + self.assertNextToken(DrtTokens.OPEN_BRACKET) + conds = [] + while self.inRange(0) and self.token(0) != DrtTokens.CLOSE_BRACKET: + # Support expressions like: DRS([x y],C) == DRS([x, y],C) + if conds and self.token(0) == DrtTokens.COMMA: + self.token() # swallow the comma + conds.append(self.process_next_expression(context)) + self.assertNextToken(DrtTokens.CLOSE_BRACKET) + return conds + + def handle_prop(self, tok, context): + variable = self.make_VariableExpression(tok) + self.assertNextToken(":") + drs = self.process_next_expression(DrtTokens.COLON) + return DrtProposition(variable, drs) + + def make_EqualityExpression(self, first, second): + """This method serves as a hook for other logic parsers that + have different equality expression classes""" + return DrtEqualityExpression(first, second) + + def get_BooleanExpression_factory(self, tok): + """This method serves as a hook for other logic parsers that + have different boolean operators""" + if tok == DrtTokens.DRS_CONC: + return lambda first, second: DrtConcatenation(first, second, None) + elif tok in DrtTokens.OR_LIST: + return DrtOrExpression + elif tok in DrtTokens.IMP_LIST: + + def make_imp_expression(first, second): + if isinstance(first, DRS): + return DRS(first.refs, first.conds, second) + if isinstance(first, DrtConcatenation): + return DrtConcatenation(first.first, first.second, second) + raise Exception("Antecedent of implication must be a DRS") + + return make_imp_expression + else: + return None + + def make_BooleanExpression(self, factory, first, second): + return factory(first, second) + + def make_ApplicationExpression(self, function, argument): + return DrtApplicationExpression(function, argument) + + def make_VariableExpression(self, name): + return DrtVariableExpression(Variable(name)) + + def make_LambdaExpression(self, variables, term): + return DrtLambdaExpression(variables, term) + + +class DrtExpression: + """ + This is the base abstract DRT Expression from which every DRT + Expression extends. + """ + + _drt_parser = DrtParser() + + @classmethod + def fromstring(cls, s): + return cls._drt_parser.parse(s) + + def applyto(self, other): + return DrtApplicationExpression(self, other) + + def __neg__(self): + return DrtNegatedExpression(self) + + def __and__(self, other): + return NotImplemented + + def __or__(self, other): + assert isinstance(other, DrtExpression) + return DrtOrExpression(self, other) + + def __gt__(self, other): + assert isinstance(other, DrtExpression) + if isinstance(self, DRS): + return DRS(self.refs, self.conds, other) + if isinstance(self, DrtConcatenation): + return DrtConcatenation(self.first, self.second, other) + raise Exception("Antecedent of implication must be a DRS") + + def equiv(self, other, prover=None): + """ + Check for logical equivalence. + Pass the expression (self <-> other) to the theorem prover. + If the prover says it is valid, then the self and other are equal. + + :param other: an ``DrtExpression`` to check equality against + :param prover: a ``nltk.inference.api.Prover`` + """ + assert isinstance(other, DrtExpression) + + f1 = self.simplify().fol() + f2 = other.simplify().fol() + return f1.equiv(f2, prover) + + @property + def type(self): + raise AttributeError( + "'%s' object has no attribute 'type'" % self.__class__.__name__ + ) + + def typecheck(self, signature=None): + raise NotImplementedError() + + def __add__(self, other): + return DrtConcatenation(self, other, None) + + def get_refs(self, recursive=False): + """ + Return the set of discourse referents in this DRS. + :param recursive: bool Also find discourse referents in subterms? + :return: list of ``Variable`` objects + """ + raise NotImplementedError() + + def is_pronoun_function(self): + """Is self of the form "PRO(x)"?""" + return ( + isinstance(self, DrtApplicationExpression) + and isinstance(self.function, DrtAbstractVariableExpression) + and self.function.variable.name == DrtTokens.PRONOUN + and isinstance(self.argument, DrtIndividualVariableExpression) + ) + + def make_EqualityExpression(self, first, second): + return DrtEqualityExpression(first, second) + + def make_VariableExpression(self, variable): + return DrtVariableExpression(variable) + + def resolve_anaphora(self): + return resolve_anaphora(self) + + def eliminate_equality(self): + return self.visit_structured(lambda e: e.eliminate_equality(), self.__class__) + + def pretty_format(self): + """ + Draw the DRS + :return: the pretty print string + """ + return "\n".join(self._pretty()) + + def pretty_print(self): + print(self.pretty_format()) + + def draw(self): + DrsDrawer(self).draw() + + +class DRS(DrtExpression, Expression): + """A Discourse Representation Structure.""" + + def __init__(self, refs, conds, consequent=None): + """ + :param refs: list of ``DrtIndividualVariableExpression`` for the + discourse referents + :param conds: list of ``Expression`` for the conditions + """ + self.refs = refs + self.conds = conds + self.consequent = consequent + + def replace(self, variable, expression, replace_bound=False, alpha_convert=True): + """Replace all instances of variable v with expression E in self, + where v is free in self.""" + if variable in self.refs: + # if a bound variable is the thing being replaced + if not replace_bound: + return self + else: + i = self.refs.index(variable) + if self.consequent: + consequent = self.consequent.replace( + variable, expression, True, alpha_convert + ) + else: + consequent = None + return DRS( + self.refs[:i] + [expression.variable] + self.refs[i + 1 :], + [ + cond.replace(variable, expression, True, alpha_convert) + for cond in self.conds + ], + consequent, + ) + else: + if alpha_convert: + # any bound variable that appears in the expression must + # be alpha converted to avoid a conflict + for ref in set(self.refs) & expression.free(): + newvar = unique_variable(ref) + newvarex = DrtVariableExpression(newvar) + i = self.refs.index(ref) + if self.consequent: + consequent = self.consequent.replace( + ref, newvarex, True, alpha_convert + ) + else: + consequent = None + self = DRS( + self.refs[:i] + [newvar] + self.refs[i + 1 :], + [ + cond.replace(ref, newvarex, True, alpha_convert) + for cond in self.conds + ], + consequent, + ) + + # replace in the conditions + if self.consequent: + consequent = self.consequent.replace( + variable, expression, replace_bound, alpha_convert + ) + else: + consequent = None + return DRS( + self.refs, + [ + cond.replace(variable, expression, replace_bound, alpha_convert) + for cond in self.conds + ], + consequent, + ) + + def free(self): + """:see: Expression.free()""" + conds_free = reduce(operator.or_, [c.free() for c in self.conds], set()) + if self.consequent: + conds_free.update(self.consequent.free()) + return conds_free - set(self.refs) + + def get_refs(self, recursive=False): + """:see: AbstractExpression.get_refs()""" + if recursive: + conds_refs = self.refs + list( + chain.from_iterable(c.get_refs(True) for c in self.conds) + ) + if self.consequent: + conds_refs.extend(self.consequent.get_refs(True)) + return conds_refs + else: + return self.refs + + def visit(self, function, combinator): + """:see: Expression.visit()""" + parts = list(map(function, self.conds)) + if self.consequent: + parts.append(function(self.consequent)) + return combinator(parts) + + def visit_structured(self, function, combinator): + """:see: Expression.visit_structured()""" + consequent = function(self.consequent) if self.consequent else None + return combinator(self.refs, list(map(function, self.conds)), consequent) + + def eliminate_equality(self): + drs = self + i = 0 + while i < len(drs.conds): + cond = drs.conds[i] + if ( + isinstance(cond, EqualityExpression) + and isinstance(cond.first, AbstractVariableExpression) + and isinstance(cond.second, AbstractVariableExpression) + ): + drs = DRS( + list(set(drs.refs) - {cond.second.variable}), + drs.conds[:i] + drs.conds[i + 1 :], + drs.consequent, + ) + if cond.second.variable != cond.first.variable: + drs = drs.replace(cond.second.variable, cond.first, False, False) + i = 0 + i -= 1 + i += 1 + + conds = [] + for cond in drs.conds: + new_cond = cond.eliminate_equality() + new_cond_simp = new_cond.simplify() + if ( + not isinstance(new_cond_simp, DRS) + or new_cond_simp.refs + or new_cond_simp.conds + or new_cond_simp.consequent + ): + conds.append(new_cond) + + consequent = drs.consequent.eliminate_equality() if drs.consequent else None + return DRS(drs.refs, conds, consequent) + + def fol(self): + if self.consequent: + accum = None + if self.conds: + accum = reduce(AndExpression, [c.fol() for c in self.conds]) + + if accum: + accum = ImpExpression(accum, self.consequent.fol()) + else: + accum = self.consequent.fol() + + for ref in self.refs[::-1]: + accum = AllExpression(ref, accum) + + return accum + + else: + if not self.conds: + raise Exception("Cannot convert DRS with no conditions to FOL.") + accum = reduce(AndExpression, [c.fol() for c in self.conds]) + for ref in map(Variable, self._order_ref_strings(self.refs)[::-1]): + accum = ExistsExpression(ref, accum) + return accum + + def _pretty(self): + refs_line = " ".join(self._order_ref_strings(self.refs)) + + cond_lines = [ + cond + for cond_line in [ + filter(lambda s: s.strip(), cond._pretty()) for cond in self.conds + ] + for cond in cond_line + ] + length = max([len(refs_line)] + list(map(len, cond_lines))) + drs = ( + [ + " _" + "_" * length + "_ ", + "| " + refs_line.ljust(length) + " |", + "|-" + "-" * length + "-|", + ] + + ["| " + line.ljust(length) + " |" for line in cond_lines] + + ["|_" + "_" * length + "_|"] + ) + if self.consequent: + return DrtBinaryExpression._assemble_pretty( + drs, DrtTokens.IMP, self.consequent._pretty() + ) + return drs + + def _order_ref_strings(self, refs): + strings = ["%s" % ref for ref in refs] + ind_vars = [] + func_vars = [] + event_vars = [] + other_vars = [] + for s in strings: + if is_indvar(s): + ind_vars.append(s) + elif is_funcvar(s): + func_vars.append(s) + elif is_eventvar(s): + event_vars.append(s) + else: + other_vars.append(s) + return ( + sorted(other_vars) + + sorted(event_vars, key=lambda v: int([v[2:], -1][len(v[2:]) == 0])) + + sorted(func_vars, key=lambda v: (v[0], int([v[1:], -1][len(v[1:]) == 0]))) + + sorted(ind_vars, key=lambda v: (v[0], int([v[1:], -1][len(v[1:]) == 0]))) + ) + + def __eq__(self, other): + r"""Defines equality modulo alphabetic variance. + If we are comparing \x.M and \y.N, then check equality of M and N[x/y].""" + if isinstance(other, DRS): + if len(self.refs) == len(other.refs): + converted_other = other + for (r1, r2) in zip(self.refs, converted_other.refs): + varex = self.make_VariableExpression(r1) + converted_other = converted_other.replace(r2, varex, True) + if self.consequent == converted_other.consequent and len( + self.conds + ) == len(converted_other.conds): + for c1, c2 in zip(self.conds, converted_other.conds): + if not (c1 == c2): + return False + return True + return False + + def __ne__(self, other): + return not self == other + + __hash__ = Expression.__hash__ + + def __str__(self): + drs = "([{}],[{}])".format( + ",".join(self._order_ref_strings(self.refs)), + ", ".join("%s" % cond for cond in self.conds), + ) # map(str, self.conds))) + if self.consequent: + return ( + DrtTokens.OPEN + + drs + + " " + + DrtTokens.IMP + + " " + + "%s" % self.consequent + + DrtTokens.CLOSE + ) + return drs + + +def DrtVariableExpression(variable): + """ + This is a factory method that instantiates and returns a subtype of + ``DrtAbstractVariableExpression`` appropriate for the given variable. + """ + if is_indvar(variable.name): + return DrtIndividualVariableExpression(variable) + elif is_funcvar(variable.name): + return DrtFunctionVariableExpression(variable) + elif is_eventvar(variable.name): + return DrtEventVariableExpression(variable) + else: + return DrtConstantExpression(variable) + + +class DrtAbstractVariableExpression(DrtExpression, AbstractVariableExpression): + def fol(self): + return self + + def get_refs(self, recursive=False): + """:see: AbstractExpression.get_refs()""" + return [] + + def _pretty(self): + s = "%s" % self + blank = " " * len(s) + return [blank, blank, s, blank] + + def eliminate_equality(self): + return self + + +class DrtIndividualVariableExpression( + DrtAbstractVariableExpression, IndividualVariableExpression +): + pass + + +class DrtFunctionVariableExpression( + DrtAbstractVariableExpression, FunctionVariableExpression +): + pass + + +class DrtEventVariableExpression( + DrtIndividualVariableExpression, EventVariableExpression +): + pass + + +class DrtConstantExpression(DrtAbstractVariableExpression, ConstantExpression): + pass + + +class DrtProposition(DrtExpression, Expression): + def __init__(self, variable, drs): + self.variable = variable + self.drs = drs + + def replace(self, variable, expression, replace_bound=False, alpha_convert=True): + if self.variable == variable: + assert isinstance( + expression, DrtAbstractVariableExpression + ), "Can only replace a proposition label with a variable" + return DrtProposition( + expression.variable, + self.drs.replace(variable, expression, replace_bound, alpha_convert), + ) + else: + return DrtProposition( + self.variable, + self.drs.replace(variable, expression, replace_bound, alpha_convert), + ) + + def eliminate_equality(self): + return DrtProposition(self.variable, self.drs.eliminate_equality()) + + def get_refs(self, recursive=False): + return self.drs.get_refs(True) if recursive else [] + + def __eq__(self, other): + return ( + self.__class__ == other.__class__ + and self.variable == other.variable + and self.drs == other.drs + ) + + def __ne__(self, other): + return not self == other + + __hash__ = Expression.__hash__ + + def fol(self): + return self.drs.fol() + + def _pretty(self): + drs_s = self.drs._pretty() + blank = " " * len("%s" % self.variable) + return ( + [blank + " " + line for line in drs_s[:1]] + + ["%s" % self.variable + ":" + line for line in drs_s[1:2]] + + [blank + " " + line for line in drs_s[2:]] + ) + + def visit(self, function, combinator): + """:see: Expression.visit()""" + return combinator([function(self.drs)]) + + def visit_structured(self, function, combinator): + """:see: Expression.visit_structured()""" + return combinator(self.variable, function(self.drs)) + + def __str__(self): + return f"prop({self.variable}, {self.drs})" + + +class DrtNegatedExpression(DrtExpression, NegatedExpression): + def fol(self): + return NegatedExpression(self.term.fol()) + + def get_refs(self, recursive=False): + """:see: AbstractExpression.get_refs()""" + return self.term.get_refs(recursive) + + def _pretty(self): + term_lines = self.term._pretty() + return ( + [" " + line for line in term_lines[:2]] + + ["__ " + line for line in term_lines[2:3]] + + [" | " + line for line in term_lines[3:4]] + + [" " + line for line in term_lines[4:]] + ) + + +class DrtLambdaExpression(DrtExpression, LambdaExpression): + def alpha_convert(self, newvar): + """Rename all occurrences of the variable introduced by this variable + binder in the expression to ``newvar``. + :param newvar: ``Variable``, for the new variable + """ + return self.__class__( + newvar, + self.term.replace(self.variable, DrtVariableExpression(newvar), True), + ) + + def fol(self): + return LambdaExpression(self.variable, self.term.fol()) + + def _pretty(self): + variables = [self.variable] + term = self.term + while term.__class__ == self.__class__: + variables.append(term.variable) + term = term.term + var_string = " ".join("%s" % v for v in variables) + DrtTokens.DOT + term_lines = term._pretty() + blank = " " * len(var_string) + return ( + [" " + blank + line for line in term_lines[:1]] + + [r" \ " + blank + line for line in term_lines[1:2]] + + [r" /\ " + var_string + line for line in term_lines[2:3]] + + [" " + blank + line for line in term_lines[3:]] + ) + + def get_refs(self, recursive=False): + """:see: AbstractExpression.get_refs()""" + return ( + [self.variable] + self.term.get_refs(True) if recursive else [self.variable] + ) + + +class DrtBinaryExpression(DrtExpression, BinaryExpression): + def get_refs(self, recursive=False): + """:see: AbstractExpression.get_refs()""" + return ( + self.first.get_refs(True) + self.second.get_refs(True) if recursive else [] + ) + + def _pretty(self): + return DrtBinaryExpression._assemble_pretty( + self._pretty_subex(self.first), + self.getOp(), + self._pretty_subex(self.second), + ) + + @staticmethod + def _assemble_pretty(first_lines, op, second_lines): + max_lines = max(len(first_lines), len(second_lines)) + first_lines = _pad_vertically(first_lines, max_lines) + second_lines = _pad_vertically(second_lines, max_lines) + blank = " " * len(op) + first_second_lines = list(zip(first_lines, second_lines)) + return ( + [ + " " + first_line + " " + blank + " " + second_line + " " + for first_line, second_line in first_second_lines[:2] + ] + + [ + "(" + first_line + " " + op + " " + second_line + ")" + for first_line, second_line in first_second_lines[2:3] + ] + + [ + " " + first_line + " " + blank + " " + second_line + " " + for first_line, second_line in first_second_lines[3:] + ] + ) + + def _pretty_subex(self, subex): + return subex._pretty() + + +class DrtBooleanExpression(DrtBinaryExpression, BooleanExpression): + pass + + +class DrtOrExpression(DrtBooleanExpression, OrExpression): + def fol(self): + return OrExpression(self.first.fol(), self.second.fol()) + + def _pretty_subex(self, subex): + if isinstance(subex, DrtOrExpression): + return [line[1:-1] for line in subex._pretty()] + return DrtBooleanExpression._pretty_subex(self, subex) + + +class DrtEqualityExpression(DrtBinaryExpression, EqualityExpression): + def fol(self): + return EqualityExpression(self.first.fol(), self.second.fol()) + + +class DrtConcatenation(DrtBooleanExpression): + """DRS of the form '(DRS + DRS)'""" + + def __init__(self, first, second, consequent=None): + DrtBooleanExpression.__init__(self, first, second) + self.consequent = consequent + + def replace(self, variable, expression, replace_bound=False, alpha_convert=True): + """Replace all instances of variable v with expression E in self, + where v is free in self.""" + first = self.first + second = self.second + consequent = self.consequent + + # If variable is bound + if variable in self.get_refs(): + if replace_bound: + first = first.replace( + variable, expression, replace_bound, alpha_convert + ) + second = second.replace( + variable, expression, replace_bound, alpha_convert + ) + if consequent: + consequent = consequent.replace( + variable, expression, replace_bound, alpha_convert + ) + else: + if alpha_convert: + # alpha convert every ref that is free in 'expression' + for ref in set(self.get_refs(True)) & expression.free(): + v = DrtVariableExpression(unique_variable(ref)) + first = first.replace(ref, v, True, alpha_convert) + second = second.replace(ref, v, True, alpha_convert) + if consequent: + consequent = consequent.replace(ref, v, True, alpha_convert) + + first = first.replace(variable, expression, replace_bound, alpha_convert) + second = second.replace(variable, expression, replace_bound, alpha_convert) + if consequent: + consequent = consequent.replace( + variable, expression, replace_bound, alpha_convert + ) + + return self.__class__(first, second, consequent) + + def eliminate_equality(self): + # TODO: at some point. for now, simplify. + drs = self.simplify() + assert not isinstance(drs, DrtConcatenation) + return drs.eliminate_equality() + + def simplify(self): + first = self.first.simplify() + second = self.second.simplify() + consequent = self.consequent.simplify() if self.consequent else None + + if isinstance(first, DRS) and isinstance(second, DRS): + # For any ref that is in both 'first' and 'second' + for ref in set(first.get_refs(True)) & set(second.get_refs(True)): + # alpha convert the ref in 'second' to prevent collision + newvar = DrtVariableExpression(unique_variable(ref)) + second = second.replace(ref, newvar, True) + + return DRS(first.refs + second.refs, first.conds + second.conds, consequent) + else: + return self.__class__(first, second, consequent) + + def get_refs(self, recursive=False): + """:see: AbstractExpression.get_refs()""" + refs = self.first.get_refs(recursive) + self.second.get_refs(recursive) + if self.consequent and recursive: + refs.extend(self.consequent.get_refs(True)) + return refs + + def getOp(self): + return DrtTokens.DRS_CONC + + def __eq__(self, other): + r"""Defines equality modulo alphabetic variance. + If we are comparing \x.M and \y.N, then check equality of M and N[x/y].""" + if isinstance(other, DrtConcatenation): + self_refs = self.get_refs() + other_refs = other.get_refs() + if len(self_refs) == len(other_refs): + converted_other = other + for (r1, r2) in zip(self_refs, other_refs): + varex = self.make_VariableExpression(r1) + converted_other = converted_other.replace(r2, varex, True) + return ( + self.first == converted_other.first + and self.second == converted_other.second + and self.consequent == converted_other.consequent + ) + return False + + def __ne__(self, other): + return not self == other + + __hash__ = DrtBooleanExpression.__hash__ + + def fol(self): + e = AndExpression(self.first.fol(), self.second.fol()) + if self.consequent: + e = ImpExpression(e, self.consequent.fol()) + return e + + def _pretty(self): + drs = DrtBinaryExpression._assemble_pretty( + self._pretty_subex(self.first), + self.getOp(), + self._pretty_subex(self.second), + ) + if self.consequent: + drs = DrtBinaryExpression._assemble_pretty( + drs, DrtTokens.IMP, self.consequent._pretty() + ) + return drs + + def _pretty_subex(self, subex): + if isinstance(subex, DrtConcatenation): + return [line[1:-1] for line in subex._pretty()] + return DrtBooleanExpression._pretty_subex(self, subex) + + def visit(self, function, combinator): + """:see: Expression.visit()""" + if self.consequent: + return combinator( + [function(self.first), function(self.second), function(self.consequent)] + ) + else: + return combinator([function(self.first), function(self.second)]) + + def __str__(self): + first = self._str_subex(self.first) + second = self._str_subex(self.second) + drs = Tokens.OPEN + first + " " + self.getOp() + " " + second + Tokens.CLOSE + if self.consequent: + return ( + DrtTokens.OPEN + + drs + + " " + + DrtTokens.IMP + + " " + + "%s" % self.consequent + + DrtTokens.CLOSE + ) + return drs + + def _str_subex(self, subex): + s = "%s" % subex + if isinstance(subex, DrtConcatenation) and subex.consequent is None: + return s[1:-1] + return s + + +class DrtApplicationExpression(DrtExpression, ApplicationExpression): + def fol(self): + return ApplicationExpression(self.function.fol(), self.argument.fol()) + + def get_refs(self, recursive=False): + """:see: AbstractExpression.get_refs()""" + return ( + self.function.get_refs(True) + self.argument.get_refs(True) + if recursive + else [] + ) + + def _pretty(self): + function, args = self.uncurry() + function_lines = function._pretty() + args_lines = [arg._pretty() for arg in args] + max_lines = max(map(len, [function_lines] + args_lines)) + function_lines = _pad_vertically(function_lines, max_lines) + args_lines = [_pad_vertically(arg_lines, max_lines) for arg_lines in args_lines] + func_args_lines = list(zip(function_lines, list(zip(*args_lines)))) + return ( + [ + func_line + " " + " ".join(args_line) + " " + for func_line, args_line in func_args_lines[:2] + ] + + [ + func_line + "(" + ",".join(args_line) + ")" + for func_line, args_line in func_args_lines[2:3] + ] + + [ + func_line + " " + " ".join(args_line) + " " + for func_line, args_line in func_args_lines[3:] + ] + ) + + +def _pad_vertically(lines, max_lines): + pad_line = [" " * len(lines[0])] + return lines + pad_line * (max_lines - len(lines)) + + +class PossibleAntecedents(list, DrtExpression, Expression): + def free(self): + """Set of free variables.""" + return set(self) + + def replace(self, variable, expression, replace_bound=False, alpha_convert=True): + """Replace all instances of variable v with expression E in self, + where v is free in self.""" + result = PossibleAntecedents() + for item in self: + if item == variable: + self.append(expression) + else: + self.append(item) + return result + + def _pretty(self): + s = "%s" % self + blank = " " * len(s) + return [blank, blank, s] + + def __str__(self): + return "[" + ",".join("%s" % it for it in self) + "]" + + +class AnaphoraResolutionException(Exception): + pass + + +def resolve_anaphora(expression, trail=[]): + if isinstance(expression, ApplicationExpression): + if expression.is_pronoun_function(): + possible_antecedents = PossibleAntecedents() + for ancestor in trail: + for ref in ancestor.get_refs(): + refex = expression.make_VariableExpression(ref) + + # ========================================================== + # Don't allow resolution to itself or other types + # ========================================================== + if refex.__class__ == expression.argument.__class__ and not ( + refex == expression.argument + ): + possible_antecedents.append(refex) + + if len(possible_antecedents) == 1: + resolution = possible_antecedents[0] + else: + resolution = possible_antecedents + return expression.make_EqualityExpression(expression.argument, resolution) + else: + r_function = resolve_anaphora(expression.function, trail + [expression]) + r_argument = resolve_anaphora(expression.argument, trail + [expression]) + return expression.__class__(r_function, r_argument) + + elif isinstance(expression, DRS): + r_conds = [] + for cond in expression.conds: + r_cond = resolve_anaphora(cond, trail + [expression]) + + # if the condition is of the form '(x = [])' then raise exception + if isinstance(r_cond, EqualityExpression): + if isinstance(r_cond.first, PossibleAntecedents): + # Reverse the order so that the variable is on the left + temp = r_cond.first + r_cond.first = r_cond.second + r_cond.second = temp + if isinstance(r_cond.second, PossibleAntecedents): + if not r_cond.second: + raise AnaphoraResolutionException( + "Variable '%s' does not " + "resolve to anything." % r_cond.first + ) + + r_conds.append(r_cond) + if expression.consequent: + consequent = resolve_anaphora(expression.consequent, trail + [expression]) + else: + consequent = None + return expression.__class__(expression.refs, r_conds, consequent) + + elif isinstance(expression, AbstractVariableExpression): + return expression + + elif isinstance(expression, NegatedExpression): + return expression.__class__( + resolve_anaphora(expression.term, trail + [expression]) + ) + + elif isinstance(expression, DrtConcatenation): + if expression.consequent: + consequent = resolve_anaphora(expression.consequent, trail + [expression]) + else: + consequent = None + return expression.__class__( + resolve_anaphora(expression.first, trail + [expression]), + resolve_anaphora(expression.second, trail + [expression]), + consequent, + ) + + elif isinstance(expression, BinaryExpression): + return expression.__class__( + resolve_anaphora(expression.first, trail + [expression]), + resolve_anaphora(expression.second, trail + [expression]), + ) + + elif isinstance(expression, LambdaExpression): + return expression.__class__( + expression.variable, resolve_anaphora(expression.term, trail + [expression]) + ) + + +class DrsDrawer: + BUFFER = 3 # Space between elements + TOPSPACE = 10 # Space above whole DRS + OUTERSPACE = 6 # Space to the left, right, and bottom of the while DRS + + def __init__(self, drs, size_canvas=True, canvas=None): + """ + :param drs: ``DrtExpression``, The DRS to be drawn + :param size_canvas: bool, True if the canvas size should be the exact size of the DRS + :param canvas: ``Canvas`` The canvas on which to draw the DRS. If none is given, create a new canvas. + """ + master = None + if not canvas: + master = Tk() + master.title("DRT") + + font = Font(family="helvetica", size=12) + + if size_canvas: + canvas = Canvas(master, width=0, height=0) + canvas.font = font + self.canvas = canvas + (right, bottom) = self._visit(drs, self.OUTERSPACE, self.TOPSPACE) + + width = max(right + self.OUTERSPACE, 100) + height = bottom + self.OUTERSPACE + canvas = Canvas(master, width=width, height=height) # , bg='white') + else: + canvas = Canvas(master, width=300, height=300) + + canvas.pack() + canvas.font = font + + self.canvas = canvas + self.drs = drs + self.master = master + + def _get_text_height(self): + """Get the height of a line of text""" + return self.canvas.font.metrics("linespace") + + def draw(self, x=OUTERSPACE, y=TOPSPACE): + """Draw the DRS""" + self._handle(self.drs, self._draw_command, x, y) + + if self.master and not in_idle(): + self.master.mainloop() + else: + return self._visit(self.drs, x, y) + + def _visit(self, expression, x, y): + """ + Return the bottom-rightmost point without actually drawing the item + + :param expression: the item to visit + :param x: the top of the current drawing area + :param y: the left side of the current drawing area + :return: the bottom-rightmost point + """ + return self._handle(expression, self._visit_command, x, y) + + def _draw_command(self, item, x, y): + """ + Draw the given item at the given location + + :param item: the item to draw + :param x: the top of the current drawing area + :param y: the left side of the current drawing area + :return: the bottom-rightmost point + """ + if isinstance(item, str): + self.canvas.create_text(x, y, anchor="nw", font=self.canvas.font, text=item) + elif isinstance(item, tuple): + # item is the lower-right of a box + (right, bottom) = item + self.canvas.create_rectangle(x, y, right, bottom) + horiz_line_y = ( + y + self._get_text_height() + (self.BUFFER * 2) + ) # the line separating refs from conds + self.canvas.create_line(x, horiz_line_y, right, horiz_line_y) + + return self._visit_command(item, x, y) + + def _visit_command(self, item, x, y): + """ + Return the bottom-rightmost point without actually drawing the item + + :param item: the item to visit + :param x: the top of the current drawing area + :param y: the left side of the current drawing area + :return: the bottom-rightmost point + """ + if isinstance(item, str): + return (x + self.canvas.font.measure(item), y + self._get_text_height()) + elif isinstance(item, tuple): + return item + + def _handle(self, expression, command, x=0, y=0): + """ + :param expression: the expression to handle + :param command: the function to apply, either _draw_command or _visit_command + :param x: the top of the current drawing area + :param y: the left side of the current drawing area + :return: the bottom-rightmost point + """ + if command == self._visit_command: + # if we don't need to draw the item, then we can use the cached values + try: + # attempt to retrieve cached values + right = expression._drawing_width + x + bottom = expression._drawing_height + y + return (right, bottom) + except AttributeError: + # the values have not been cached yet, so compute them + pass + + if isinstance(expression, DrtAbstractVariableExpression): + factory = self._handle_VariableExpression + elif isinstance(expression, DRS): + factory = self._handle_DRS + elif isinstance(expression, DrtNegatedExpression): + factory = self._handle_NegatedExpression + elif isinstance(expression, DrtLambdaExpression): + factory = self._handle_LambdaExpression + elif isinstance(expression, BinaryExpression): + factory = self._handle_BinaryExpression + elif isinstance(expression, DrtApplicationExpression): + factory = self._handle_ApplicationExpression + elif isinstance(expression, PossibleAntecedents): + factory = self._handle_VariableExpression + elif isinstance(expression, DrtProposition): + factory = self._handle_DrtProposition + else: + raise Exception(expression.__class__.__name__) + + (right, bottom) = factory(expression, command, x, y) + + # cache the values + expression._drawing_width = right - x + expression._drawing_height = bottom - y + + return (right, bottom) + + def _handle_VariableExpression(self, expression, command, x, y): + return command("%s" % expression, x, y) + + def _handle_NegatedExpression(self, expression, command, x, y): + # Find the width of the negation symbol + right = self._visit_command(DrtTokens.NOT, x, y)[0] + + # Handle term + (right, bottom) = self._handle(expression.term, command, right, y) + + # Handle variables now that we know the y-coordinate + command( + DrtTokens.NOT, + x, + self._get_centered_top(y, bottom - y, self._get_text_height()), + ) + + return (right, bottom) + + def _handle_DRS(self, expression, command, x, y): + left = x + self.BUFFER # indent the left side + bottom = y + self.BUFFER # indent the top + + # Handle Discourse Referents + if expression.refs: + refs = " ".join("%s" % r for r in expression.refs) + else: + refs = " " + (max_right, bottom) = command(refs, left, bottom) + bottom += self.BUFFER * 2 + + # Handle Conditions + if expression.conds: + for cond in expression.conds: + (right, bottom) = self._handle(cond, command, left, bottom) + max_right = max(max_right, right) + bottom += self.BUFFER + else: + bottom += self._get_text_height() + self.BUFFER + + # Handle Box + max_right += self.BUFFER + return command((max_right, bottom), x, y) + + def _handle_ApplicationExpression(self, expression, command, x, y): + function, args = expression.uncurry() + if not isinstance(function, DrtAbstractVariableExpression): + # It's not a predicate expression ("P(x,y)"), so leave arguments curried + function = expression.function + args = [expression.argument] + + # Get the max bottom of any element on the line + function_bottom = self._visit(function, x, y)[1] + max_bottom = max( + [function_bottom] + [self._visit(arg, x, y)[1] for arg in args] + ) + + line_height = max_bottom - y + + # Handle 'function' + function_drawing_top = self._get_centered_top( + y, line_height, function._drawing_height + ) + right = self._handle(function, command, x, function_drawing_top)[0] + + # Handle open paren + centred_string_top = self._get_centered_top( + y, line_height, self._get_text_height() + ) + right = command(DrtTokens.OPEN, right, centred_string_top)[0] + + # Handle each arg + for (i, arg) in enumerate(args): + arg_drawing_top = self._get_centered_top( + y, line_height, arg._drawing_height + ) + right = self._handle(arg, command, right, arg_drawing_top)[0] + + if i + 1 < len(args): + # since it's not the last arg, add a comma + right = command(DrtTokens.COMMA + " ", right, centred_string_top)[0] + + # Handle close paren + right = command(DrtTokens.CLOSE, right, centred_string_top)[0] + + return (right, max_bottom) + + def _handle_LambdaExpression(self, expression, command, x, y): + # Find the width of the lambda symbol and abstracted variables + variables = DrtTokens.LAMBDA + "%s" % expression.variable + DrtTokens.DOT + right = self._visit_command(variables, x, y)[0] + + # Handle term + (right, bottom) = self._handle(expression.term, command, right, y) + + # Handle variables now that we know the y-coordinate + command( + variables, x, self._get_centered_top(y, bottom - y, self._get_text_height()) + ) + + return (right, bottom) + + def _handle_BinaryExpression(self, expression, command, x, y): + # Get the full height of the line, based on the operands + first_height = self._visit(expression.first, 0, 0)[1] + second_height = self._visit(expression.second, 0, 0)[1] + line_height = max(first_height, second_height) + + # Handle open paren + centred_string_top = self._get_centered_top( + y, line_height, self._get_text_height() + ) + right = command(DrtTokens.OPEN, x, centred_string_top)[0] + + # Handle the first operand + first_height = expression.first._drawing_height + (right, first_bottom) = self._handle( + expression.first, + command, + right, + self._get_centered_top(y, line_height, first_height), + ) + + # Handle the operator + right = command(" %s " % expression.getOp(), right, centred_string_top)[0] + + # Handle the second operand + second_height = expression.second._drawing_height + (right, second_bottom) = self._handle( + expression.second, + command, + right, + self._get_centered_top(y, line_height, second_height), + ) + + # Handle close paren + right = command(DrtTokens.CLOSE, right, centred_string_top)[0] + + return (right, max(first_bottom, second_bottom)) + + def _handle_DrtProposition(self, expression, command, x, y): + # Find the width of the negation symbol + right = command(expression.variable, x, y)[0] + + # Handle term + (right, bottom) = self._handle(expression.term, command, right, y) + + return (right, bottom) + + def _get_centered_top(self, top, full_height, item_height): + """Get the y-coordinate of the point that a figure should start at if + its height is 'item_height' and it needs to be centered in an area that + starts at 'top' and is 'full_height' tall.""" + return top + (full_height - item_height) / 2 + + +def demo(): + print("=" * 20 + "TEST PARSE" + "=" * 20) + dexpr = DrtExpression.fromstring + print(dexpr(r"([x,y],[sees(x,y)])")) + print(dexpr(r"([x],[man(x), walks(x)])")) + print(dexpr(r"\x.\y.([],[sees(x,y)])")) + print(dexpr(r"\x.([],[walks(x)])(john)")) + print(dexpr(r"(([x],[walks(x)]) + ([y],[runs(y)]))")) + print(dexpr(r"(([],[walks(x)]) -> ([],[runs(x)]))")) + print(dexpr(r"([x],[PRO(x), sees(John,x)])")) + print(dexpr(r"([x],[man(x), -([],[walks(x)])])")) + print(dexpr(r"([],[(([x],[man(x)]) -> ([],[walks(x)]))])")) + + print("=" * 20 + "Test fol()" + "=" * 20) + print(dexpr(r"([x,y],[sees(x,y)])").fol()) + + print("=" * 20 + "Test alpha conversion and lambda expression equality" + "=" * 20) + e1 = dexpr(r"\x.([],[P(x)])") + print(e1) + e2 = e1.alpha_convert(Variable("z")) + print(e2) + print(e1 == e2) + + print("=" * 20 + "Test resolve_anaphora()" + "=" * 20) + print(resolve_anaphora(dexpr(r"([x,y,z],[dog(x), cat(y), walks(z), PRO(z)])"))) + print( + resolve_anaphora(dexpr(r"([],[(([x],[dog(x)]) -> ([y],[walks(y), PRO(y)]))])")) + ) + print(resolve_anaphora(dexpr(r"(([x,y],[]) + ([],[PRO(x)]))"))) + + print("=" * 20 + "Test pretty_print()" + "=" * 20) + dexpr(r"([],[])").pretty_print() + dexpr( + r"([],[([x],[big(x), dog(x)]) -> ([],[bark(x)]) -([x],[walk(x)])])" + ).pretty_print() + dexpr(r"([x,y],[x=y]) + ([z],[dog(z), walk(z)])").pretty_print() + dexpr(r"([],[([x],[]) | ([y],[]) | ([z],[dog(z), walk(z)])])").pretty_print() + dexpr(r"\P.\Q.(([x],[]) + P(x) + Q(x))(\x.([],[dog(x)]))").pretty_print() + + +def test_draw(): + try: + from tkinter import Tk + except ImportError as e: + raise ValueError("tkinter is required, but it's not available.") + + expressions = [ + r"x", + r"([],[])", + r"([x],[])", + r"([x],[man(x)])", + r"([x,y],[sees(x,y)])", + r"([x],[man(x), walks(x)])", + r"\x.([],[man(x), walks(x)])", + r"\x y.([],[sees(x,y)])", + r"([],[(([],[walks(x)]) + ([],[runs(x)]))])", + r"([x],[man(x), -([],[walks(x)])])", + r"([],[(([x],[man(x)]) -> ([],[walks(x)]))])", + ] + + for e in expressions: + d = DrtExpression.fromstring(e) + d.draw() + + +if __name__ == "__main__": + demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/sem/drt_glue_demo.py b/llmeval-env/lib/python3.10/site-packages/nltk/sem/drt_glue_demo.py new file mode 100644 index 0000000000000000000000000000000000000000..fe27c9fc66f92600ebdcb13eb622d3d07db36985 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/sem/drt_glue_demo.py @@ -0,0 +1,553 @@ +# Natural Language Toolkit: GUI Demo for Glue Semantics with Discourse +# Representation Theory (DRT) as meaning language +# +# Author: Dan Garrette +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +try: + from tkinter import Button, Frame, IntVar, Label, Listbox, Menu, Scrollbar, Tk + from tkinter.font import Font + + from nltk.draw.util import CanvasFrame, ShowText + +except ImportError: + """Ignore ImportError because tkinter might not be available.""" + +from nltk.parse import MaltParser +from nltk.sem.drt import DrsDrawer, DrtVariableExpression +from nltk.sem.glue import DrtGlue +from nltk.sem.logic import Variable +from nltk.tag import RegexpTagger +from nltk.util import in_idle + + +class DrtGlueDemo: + def __init__(self, examples): + # Set up the main window. + self._top = Tk() + self._top.title("DRT Glue Demo") + + # Set up key bindings. + self._init_bindings() + + # Initialize the fonts.self._error = None + self._init_fonts(self._top) + + self._examples = examples + self._readingCache = [None for example in examples] + + # The user can hide the grammar. + self._show_grammar = IntVar(self._top) + self._show_grammar.set(1) + + # Set the data to None + self._curExample = -1 + self._readings = [] + self._drs = None + self._drsWidget = None + self._error = None + + self._init_glue() + + # Create the basic frames. + self._init_menubar(self._top) + self._init_buttons(self._top) + self._init_exampleListbox(self._top) + self._init_readingListbox(self._top) + self._init_canvas(self._top) + + # Resize callback + self._canvas.bind("", self._configure) + + ######################################### + ## Initialization Helpers + ######################################### + + def _init_glue(self): + tagger = RegexpTagger( + [ + ("^(David|Mary|John)$", "NNP"), + ( + "^(walks|sees|eats|chases|believes|gives|sleeps|chases|persuades|tries|seems|leaves)$", + "VB", + ), + ("^(go|order|vanish|find|approach)$", "VB"), + ("^(a)$", "ex_quant"), + ("^(every)$", "univ_quant"), + ("^(sandwich|man|dog|pizza|unicorn|cat|senator)$", "NN"), + ("^(big|gray|former)$", "JJ"), + ("^(him|himself)$", "PRP"), + ] + ) + + depparser = MaltParser(tagger=tagger) + self._glue = DrtGlue(depparser=depparser, remove_duplicates=False) + + def _init_fonts(self, root): + # See: + self._sysfont = Font(font=Button()["font"]) + root.option_add("*Font", self._sysfont) + + # TWhat's our font size (default=same as sysfont) + self._size = IntVar(root) + self._size.set(self._sysfont.cget("size")) + + self._boldfont = Font(family="helvetica", weight="bold", size=self._size.get()) + self._font = Font(family="helvetica", size=self._size.get()) + if self._size.get() < 0: + big = self._size.get() - 2 + else: + big = self._size.get() + 2 + self._bigfont = Font(family="helvetica", weight="bold", size=big) + + def _init_exampleListbox(self, parent): + self._exampleFrame = listframe = Frame(parent) + self._exampleFrame.pack(fill="both", side="left", padx=2) + self._exampleList_label = Label( + self._exampleFrame, font=self._boldfont, text="Examples" + ) + self._exampleList_label.pack() + self._exampleList = Listbox( + self._exampleFrame, + selectmode="single", + relief="groove", + background="white", + foreground="#909090", + font=self._font, + selectforeground="#004040", + selectbackground="#c0f0c0", + ) + + self._exampleList.pack(side="right", fill="both", expand=1) + + for example in self._examples: + self._exampleList.insert("end", (" %s" % example)) + self._exampleList.config(height=min(len(self._examples), 25), width=40) + + # Add a scrollbar if there are more than 25 examples. + if len(self._examples) > 25: + listscroll = Scrollbar(self._exampleFrame, orient="vertical") + self._exampleList.config(yscrollcommand=listscroll.set) + listscroll.config(command=self._exampleList.yview) + listscroll.pack(side="left", fill="y") + + # If they select a example, apply it. + self._exampleList.bind("<>", self._exampleList_select) + + def _init_readingListbox(self, parent): + self._readingFrame = listframe = Frame(parent) + self._readingFrame.pack(fill="both", side="left", padx=2) + self._readingList_label = Label( + self._readingFrame, font=self._boldfont, text="Readings" + ) + self._readingList_label.pack() + self._readingList = Listbox( + self._readingFrame, + selectmode="single", + relief="groove", + background="white", + foreground="#909090", + font=self._font, + selectforeground="#004040", + selectbackground="#c0f0c0", + ) + + self._readingList.pack(side="right", fill="both", expand=1) + + # Add a scrollbar if there are more than 25 examples. + listscroll = Scrollbar(self._readingFrame, orient="vertical") + self._readingList.config(yscrollcommand=listscroll.set) + listscroll.config(command=self._readingList.yview) + listscroll.pack(side="right", fill="y") + + self._populate_readingListbox() + + def _populate_readingListbox(self): + # Populate the listbox with integers + self._readingList.delete(0, "end") + for i in range(len(self._readings)): + self._readingList.insert("end", (" %s" % (i + 1))) + self._readingList.config(height=min(len(self._readings), 25), width=5) + + # If they select a example, apply it. + self._readingList.bind("<>", self._readingList_select) + + def _init_bindings(self): + # Key bindings are a good thing. + self._top.bind("", self.destroy) + self._top.bind("", self.destroy) + self._top.bind("", self.destroy) + self._top.bind("n", self.next) + self._top.bind("", self.next) + self._top.bind("p", self.prev) + self._top.bind("", self.prev) + + def _init_buttons(self, parent): + # Set up the frames. + self._buttonframe = buttonframe = Frame(parent) + buttonframe.pack(fill="none", side="bottom", padx=3, pady=2) + Button( + buttonframe, + text="Prev", + background="#90c0d0", + foreground="black", + command=self.prev, + ).pack(side="left") + Button( + buttonframe, + text="Next", + background="#90c0d0", + foreground="black", + command=self.next, + ).pack(side="left") + + def _configure(self, event): + self._autostep = 0 + (x1, y1, x2, y2) = self._cframe.scrollregion() + y2 = event.height - 6 + self._canvas["scrollregion"] = "%d %d %d %d" % (x1, y1, x2, y2) + self._redraw() + + def _init_canvas(self, parent): + self._cframe = CanvasFrame( + parent, + background="white", + # width=525, height=250, + closeenough=10, + border=2, + relief="sunken", + ) + self._cframe.pack(expand=1, fill="both", side="top", pady=2) + canvas = self._canvas = self._cframe.canvas() + + # Initially, there's no tree or text + self._tree = None + self._textwidgets = [] + self._textline = None + + def _init_menubar(self, parent): + menubar = Menu(parent) + + filemenu = Menu(menubar, tearoff=0) + filemenu.add_command( + label="Exit", underline=1, command=self.destroy, accelerator="q" + ) + menubar.add_cascade(label="File", underline=0, menu=filemenu) + + actionmenu = Menu(menubar, tearoff=0) + actionmenu.add_command( + label="Next", underline=0, command=self.next, accelerator="n, Space" + ) + actionmenu.add_command( + label="Previous", underline=0, command=self.prev, accelerator="p, Backspace" + ) + menubar.add_cascade(label="Action", underline=0, menu=actionmenu) + + optionmenu = Menu(menubar, tearoff=0) + optionmenu.add_checkbutton( + label="Remove Duplicates", + underline=0, + variable=self._glue.remove_duplicates, + command=self._toggle_remove_duplicates, + accelerator="r", + ) + menubar.add_cascade(label="Options", underline=0, menu=optionmenu) + + viewmenu = Menu(menubar, tearoff=0) + viewmenu.add_radiobutton( + label="Tiny", + variable=self._size, + underline=0, + value=10, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Small", + variable=self._size, + underline=0, + value=12, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Medium", + variable=self._size, + underline=0, + value=14, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Large", + variable=self._size, + underline=0, + value=18, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Huge", + variable=self._size, + underline=0, + value=24, + command=self.resize, + ) + menubar.add_cascade(label="View", underline=0, menu=viewmenu) + + helpmenu = Menu(menubar, tearoff=0) + helpmenu.add_command(label="About", underline=0, command=self.about) + menubar.add_cascade(label="Help", underline=0, menu=helpmenu) + + parent.config(menu=menubar) + + ######################################### + ## Main draw procedure + ######################################### + + def _redraw(self): + canvas = self._canvas + + # Delete the old DRS, widgets, etc. + if self._drsWidget is not None: + self._drsWidget.clear() + + if self._drs: + self._drsWidget = DrsWidget(self._canvas, self._drs) + self._drsWidget.draw() + + if self._error: + self._drsWidget = DrsWidget(self._canvas, self._error) + self._drsWidget.draw() + + ######################################### + ## Button Callbacks + ######################################### + + def destroy(self, *e): + self._autostep = 0 + if self._top is None: + return + self._top.destroy() + self._top = None + + def prev(self, *e): + selection = self._readingList.curselection() + readingListSize = self._readingList.size() + + # there are readings + if readingListSize > 0: + # if one reading is currently selected + if len(selection) == 1: + index = int(selection[0]) + + # if it's on (or before) the first item + if index <= 0: + self._select_previous_example() + else: + self._readingList_store_selection(index - 1) + + else: + # select its first reading + self._readingList_store_selection(readingListSize - 1) + + else: + self._select_previous_example() + + def _select_previous_example(self): + # if the current example is not the first example + if self._curExample > 0: + self._exampleList_store_selection(self._curExample - 1) + else: + # go to the last example + self._exampleList_store_selection(len(self._examples) - 1) + + def next(self, *e): + selection = self._readingList.curselection() + readingListSize = self._readingList.size() + + # if there are readings + if readingListSize > 0: + # if one reading is currently selected + if len(selection) == 1: + index = int(selection[0]) + + # if it's on (or past) the last item + if index >= (readingListSize - 1): + self._select_next_example() + else: + self._readingList_store_selection(index + 1) + + else: + # select its first reading + self._readingList_store_selection(0) + + else: + self._select_next_example() + + def _select_next_example(self): + # if the current example is not the last example + if self._curExample < len(self._examples) - 1: + self._exampleList_store_selection(self._curExample + 1) + else: + # go to the first example + self._exampleList_store_selection(0) + + def about(self, *e): + ABOUT = ( + "NLTK Discourse Representation Theory (DRT) Glue Semantics Demo\n" + + "Written by Daniel H. Garrette" + ) + TITLE = "About: NLTK DRT Glue Demo" + try: + from tkinter.messagebox import Message + + Message(message=ABOUT, title=TITLE).show() + except: + ShowText(self._top, TITLE, ABOUT) + + def postscript(self, *e): + self._autostep = 0 + self._cframe.print_to_file() + + def mainloop(self, *args, **kwargs): + """ + Enter the Tkinter mainloop. This function must be called if + this demo is created from a non-interactive program (e.g. + from a secript); otherwise, the demo will close as soon as + the script completes. + """ + if in_idle(): + return + self._top.mainloop(*args, **kwargs) + + def resize(self, size=None): + if size is not None: + self._size.set(size) + size = self._size.get() + self._font.configure(size=-(abs(size))) + self._boldfont.configure(size=-(abs(size))) + self._sysfont.configure(size=-(abs(size))) + self._bigfont.configure(size=-(abs(size + 2))) + self._redraw() + + def _toggle_remove_duplicates(self): + self._glue.remove_duplicates = not self._glue.remove_duplicates + + self._exampleList.selection_clear(0, "end") + self._readings = [] + self._populate_readingListbox() + self._readingCache = [None for ex in self._examples] + self._curExample = -1 + self._error = None + + self._drs = None + self._redraw() + + def _exampleList_select(self, event): + selection = self._exampleList.curselection() + if len(selection) != 1: + return + self._exampleList_store_selection(int(selection[0])) + + def _exampleList_store_selection(self, index): + self._curExample = index + example = self._examples[index] + + self._exampleList.selection_clear(0, "end") + if example: + cache = self._readingCache[index] + if cache: + if isinstance(cache, list): + self._readings = cache + self._error = None + else: + self._readings = [] + self._error = cache + else: + try: + self._readings = self._glue.parse_to_meaning(example) + self._error = None + self._readingCache[index] = self._readings + except Exception as e: + self._readings = [] + self._error = DrtVariableExpression(Variable("Error: " + str(e))) + self._readingCache[index] = self._error + + # add a star to the end of the example + self._exampleList.delete(index) + self._exampleList.insert(index, (" %s *" % example)) + self._exampleList.config( + height=min(len(self._examples), 25), width=40 + ) + + self._populate_readingListbox() + + self._exampleList.selection_set(index) + + self._drs = None + self._redraw() + + def _readingList_select(self, event): + selection = self._readingList.curselection() + if len(selection) != 1: + return + self._readingList_store_selection(int(selection[0])) + + def _readingList_store_selection(self, index): + reading = self._readings[index] + + self._readingList.selection_clear(0, "end") + if reading: + self._readingList.selection_set(index) + + self._drs = reading.simplify().normalize().resolve_anaphora() + + self._redraw() + + +class DrsWidget: + def __init__(self, canvas, drs, **attribs): + self._drs = drs + self._canvas = canvas + canvas.font = Font( + font=canvas.itemcget(canvas.create_text(0, 0, text=""), "font") + ) + canvas._BUFFER = 3 + self.bbox = (0, 0, 0, 0) + + def draw(self): + (right, bottom) = DrsDrawer(self._drs, canvas=self._canvas).draw() + self.bbox = (0, 0, right + 1, bottom + 1) + + def clear(self): + self._canvas.create_rectangle(self.bbox, fill="white", width="0") + + +def demo(): + examples = [ + "John walks", + "David sees Mary", + "David eats a sandwich", + "every man chases a dog", + # 'every man believes a dog yawns', + # 'John gives David a sandwich', + "John chases himself", + # 'John persuades David to order a pizza', + # 'John tries to go', + # 'John tries to find a unicorn', + # 'John seems to vanish', + # 'a unicorn seems to approach', + # 'every big cat leaves', + # 'every gray cat leaves', + # 'every big gray cat leaves', + # 'a former senator leaves', + # 'John likes a cat', + # 'John likes every cat', + # 'he walks', + # 'John walks and he leaves' + ] + DrtGlueDemo(examples).mainloop() + + +if __name__ == "__main__": + demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/sem/evaluate.py b/llmeval-env/lib/python3.10/site-packages/nltk/sem/evaluate.py new file mode 100644 index 0000000000000000000000000000000000000000..bbff44f70b34ce0ac4de038b83a95a325d44abaf --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/sem/evaluate.py @@ -0,0 +1,829 @@ +# Natural Language Toolkit: Models for first-order languages with lambda +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ewan Klein , +# URL: +# For license information, see LICENSE.TXT + +# TODO: +# - fix tracing +# - fix iterator-based approach to existentials + +""" +This module provides data structures for representing first-order +models. +""" + +import inspect +import re +import sys +import textwrap +from pprint import pformat + +from nltk.decorators import decorator # this used in code that is commented out +from nltk.sem.logic import ( + AbstractVariableExpression, + AllExpression, + AndExpression, + ApplicationExpression, + EqualityExpression, + ExistsExpression, + Expression, + IffExpression, + ImpExpression, + IndividualVariableExpression, + IotaExpression, + LambdaExpression, + NegatedExpression, + OrExpression, + Variable, + is_indvar, +) + + +class Error(Exception): + pass + + +class Undefined(Error): + pass + + +def trace(f, *args, **kw): + argspec = inspect.getfullargspec(f) + d = dict(zip(argspec[0], args)) + if d.pop("trace", None): + print() + for item in d.items(): + print("%s => %s" % item) + return f(*args, **kw) + + +def is_rel(s): + """ + Check whether a set represents a relation (of any arity). + + :param s: a set containing tuples of str elements + :type s: set + :rtype: bool + """ + # we have the empty relation, i.e. set() + if len(s) == 0: + return True + # all the elements are tuples of the same length + elif all(isinstance(el, tuple) for el in s) and len(max(s)) == len(min(s)): + return True + else: + raise ValueError("Set %r contains sequences of different lengths" % s) + + +def set2rel(s): + """ + Convert a set containing individuals (strings or numbers) into a set of + unary tuples. Any tuples of strings already in the set are passed through + unchanged. + + For example: + - set(['a', 'b']) => set([('a',), ('b',)]) + - set([3, 27]) => set([('3',), ('27',)]) + + :type s: set + :rtype: set of tuple of str + """ + new = set() + for elem in s: + if isinstance(elem, str): + new.add((elem,)) + elif isinstance(elem, int): + new.add(str(elem)) + else: + new.add(elem) + return new + + +def arity(rel): + """ + Check the arity of a relation. + :type rel: set of tuples + :rtype: int of tuple of str + """ + if len(rel) == 0: + return 0 + return len(list(rel)[0]) + + +class Valuation(dict): + """ + A dictionary which represents a model-theoretic Valuation of non-logical constants. + Keys are strings representing the constants to be interpreted, and values correspond + to individuals (represented as strings) and n-ary relations (represented as sets of tuples + of strings). + + An instance of ``Valuation`` will raise a KeyError exception (i.e., + just behave like a standard dictionary) if indexed with an expression that + is not in its list of symbols. + """ + + def __init__(self, xs): + """ + :param xs: a list of (symbol, value) pairs. + """ + super().__init__() + for (sym, val) in xs: + if isinstance(val, str) or isinstance(val, bool): + self[sym] = val + elif isinstance(val, set): + self[sym] = set2rel(val) + else: + msg = textwrap.fill( + "Error in initializing Valuation. " + "Unrecognized value for symbol '%s':\n%s" % (sym, val), + width=66, + ) + + raise ValueError(msg) + + def __getitem__(self, key): + if key in self: + return dict.__getitem__(self, key) + else: + raise Undefined("Unknown expression: '%s'" % key) + + def __str__(self): + return pformat(self) + + @property + def domain(self): + """Set-theoretic domain of the value-space of a Valuation.""" + dom = [] + for val in self.values(): + if isinstance(val, str): + dom.append(val) + elif not isinstance(val, bool): + dom.extend( + [elem for tuple_ in val for elem in tuple_ if elem is not None] + ) + return set(dom) + + @property + def symbols(self): + """The non-logical constants which the Valuation recognizes.""" + return sorted(self.keys()) + + @classmethod + def fromstring(cls, s): + return read_valuation(s) + + +########################################## +# REs used by the _read_valuation function +########################################## +_VAL_SPLIT_RE = re.compile(r"\s*=+>\s*") +_ELEMENT_SPLIT_RE = re.compile(r"\s*,\s*") +_TUPLES_RE = re.compile( + r"""\s* + (\([^)]+\)) # tuple-expression + \s*""", + re.VERBOSE, +) + + +def _read_valuation_line(s): + """ + Read a line in a valuation file. + + Lines are expected to be of the form:: + + noosa => n + girl => {g1, g2} + chase => {(b1, g1), (b2, g1), (g1, d1), (g2, d2)} + + :param s: input line + :type s: str + :return: a pair (symbol, value) + :rtype: tuple + """ + pieces = _VAL_SPLIT_RE.split(s) + symbol = pieces[0] + value = pieces[1] + # check whether the value is meant to be a set + if value.startswith("{"): + value = value[1:-1] + tuple_strings = _TUPLES_RE.findall(value) + # are the set elements tuples? + if tuple_strings: + set_elements = [] + for ts in tuple_strings: + ts = ts[1:-1] + element = tuple(_ELEMENT_SPLIT_RE.split(ts)) + set_elements.append(element) + else: + set_elements = _ELEMENT_SPLIT_RE.split(value) + value = set(set_elements) + return symbol, value + + +def read_valuation(s, encoding=None): + """ + Convert a valuation string into a valuation. + + :param s: a valuation string + :type s: str + :param encoding: the encoding of the input string, if it is binary + :type encoding: str + :return: a ``nltk.sem`` valuation + :rtype: Valuation + """ + if encoding is not None: + s = s.decode(encoding) + statements = [] + for linenum, line in enumerate(s.splitlines()): + line = line.strip() + if line.startswith("#") or line == "": + continue + try: + statements.append(_read_valuation_line(line)) + except ValueError as e: + raise ValueError(f"Unable to parse line {linenum}: {line}") from e + return Valuation(statements) + + +class Assignment(dict): + r""" + A dictionary which represents an assignment of values to variables. + + An assignment can only assign values from its domain. + + If an unknown expression *a* is passed to a model *M*\ 's + interpretation function *i*, *i* will first check whether *M*\ 's + valuation assigns an interpretation to *a* as a constant, and if + this fails, *i* will delegate the interpretation of *a* to + *g*. *g* only assigns values to individual variables (i.e., + members of the class ``IndividualVariableExpression`` in the ``logic`` + module. If a variable is not assigned a value by *g*, it will raise + an ``Undefined`` exception. + + A variable *Assignment* is a mapping from individual variables to + entities in the domain. Individual variables are usually indicated + with the letters ``'x'``, ``'y'``, ``'w'`` and ``'z'``, optionally + followed by an integer (e.g., ``'x0'``, ``'y332'``). Assignments are + created using the ``Assignment`` constructor, which also takes the + domain as a parameter. + + >>> from nltk.sem.evaluate import Assignment + >>> dom = set(['u1', 'u2', 'u3', 'u4']) + >>> g3 = Assignment(dom, [('x', 'u1'), ('y', 'u2')]) + >>> g3 == {'x': 'u1', 'y': 'u2'} + True + + There is also a ``print`` format for assignments which uses a notation + closer to that in logic textbooks: + + >>> print(g3) + g[u1/x][u2/y] + + It is also possible to update an assignment using the ``add`` method: + + >>> dom = set(['u1', 'u2', 'u3', 'u4']) + >>> g4 = Assignment(dom) + >>> g4.add('x', 'u1') + {'x': 'u1'} + + With no arguments, ``purge()`` is equivalent to ``clear()`` on a dictionary: + + >>> g4.purge() + >>> g4 + {} + + :param domain: the domain of discourse + :type domain: set + :param assign: a list of (varname, value) associations + :type assign: list + """ + + def __init__(self, domain, assign=None): + super().__init__() + self.domain = domain + if assign: + for (var, val) in assign: + assert val in self.domain, "'{}' is not in the domain: {}".format( + val, + self.domain, + ) + assert is_indvar(var), ( + "Wrong format for an Individual Variable: '%s'" % var + ) + self[var] = val + self.variant = None + self._addvariant() + + def __getitem__(self, key): + if key in self: + return dict.__getitem__(self, key) + else: + raise Undefined("Not recognized as a variable: '%s'" % key) + + def copy(self): + new = Assignment(self.domain) + new.update(self) + return new + + def purge(self, var=None): + """ + Remove one or all keys (i.e. logic variables) from an + assignment, and update ``self.variant``. + + :param var: a Variable acting as a key for the assignment. + """ + if var: + del self[var] + else: + self.clear() + self._addvariant() + return None + + def __str__(self): + """ + Pretty printing for assignments. {'x', 'u'} appears as 'g[u/x]' + """ + gstring = "g" + # Deterministic output for unit testing. + variant = sorted(self.variant) + for (val, var) in variant: + gstring += f"[{val}/{var}]" + return gstring + + def _addvariant(self): + """ + Create a more pretty-printable version of the assignment. + """ + list_ = [] + for item in self.items(): + pair = (item[1], item[0]) + list_.append(pair) + self.variant = list_ + return None + + def add(self, var, val): + """ + Add a new variable-value pair to the assignment, and update + ``self.variant``. + + """ + assert val in self.domain, f"{val} is not in the domain {self.domain}" + assert is_indvar(var), "Wrong format for an Individual Variable: '%s'" % var + self[var] = val + self._addvariant() + return self + + +class Model: + """ + A first order model is a domain *D* of discourse and a valuation *V*. + + A domain *D* is a set, and a valuation *V* is a map that associates + expressions with values in the model. + The domain of *V* should be a subset of *D*. + + Construct a new ``Model``. + + :type domain: set + :param domain: A set of entities representing the domain of discourse of the model. + :type valuation: Valuation + :param valuation: the valuation of the model. + :param prop: If this is set, then we are building a propositional\ + model and don't require the domain of *V* to be subset of *D*. + """ + + def __init__(self, domain, valuation): + assert isinstance(domain, set) + self.domain = domain + self.valuation = valuation + if not domain.issuperset(valuation.domain): + raise Error( + "The valuation domain, %s, must be a subset of the model's domain, %s" + % (valuation.domain, domain) + ) + + def __repr__(self): + return f"({self.domain!r}, {self.valuation!r})" + + def __str__(self): + return f"Domain = {self.domain},\nValuation = \n{self.valuation}" + + def evaluate(self, expr, g, trace=None): + """ + Read input expressions, and provide a handler for ``satisfy`` + that blocks further propagation of the ``Undefined`` error. + :param expr: An ``Expression`` of ``logic``. + :type g: Assignment + :param g: an assignment to individual variables. + :rtype: bool or 'Undefined' + """ + try: + parsed = Expression.fromstring(expr) + value = self.satisfy(parsed, g, trace=trace) + if trace: + print() + print(f"'{expr}' evaluates to {value} under M, {g}") + return value + except Undefined: + if trace: + print() + print(f"'{expr}' is undefined under M, {g}") + return "Undefined" + + def satisfy(self, parsed, g, trace=None): + """ + Recursive interpretation function for a formula of first-order logic. + + Raises an ``Undefined`` error when ``parsed`` is an atomic string + but is not a symbol or an individual variable. + + :return: Returns a truth value or ``Undefined`` if ``parsed`` is\ + complex, and calls the interpretation function ``i`` if ``parsed``\ + is atomic. + + :param parsed: An expression of ``logic``. + :type g: Assignment + :param g: an assignment to individual variables. + """ + + if isinstance(parsed, ApplicationExpression): + function, arguments = parsed.uncurry() + if isinstance(function, AbstractVariableExpression): + # It's a predicate expression ("P(x,y)"), so used uncurried arguments + funval = self.satisfy(function, g) + argvals = tuple(self.satisfy(arg, g) for arg in arguments) + return argvals in funval + else: + # It must be a lambda expression, so use curried form + funval = self.satisfy(parsed.function, g) + argval = self.satisfy(parsed.argument, g) + return funval[argval] + elif isinstance(parsed, NegatedExpression): + return not self.satisfy(parsed.term, g) + elif isinstance(parsed, AndExpression): + return self.satisfy(parsed.first, g) and self.satisfy(parsed.second, g) + elif isinstance(parsed, OrExpression): + return self.satisfy(parsed.first, g) or self.satisfy(parsed.second, g) + elif isinstance(parsed, ImpExpression): + return (not self.satisfy(parsed.first, g)) or self.satisfy(parsed.second, g) + elif isinstance(parsed, IffExpression): + return self.satisfy(parsed.first, g) == self.satisfy(parsed.second, g) + elif isinstance(parsed, EqualityExpression): + return self.satisfy(parsed.first, g) == self.satisfy(parsed.second, g) + elif isinstance(parsed, AllExpression): + new_g = g.copy() + for u in self.domain: + new_g.add(parsed.variable.name, u) + if not self.satisfy(parsed.term, new_g): + return False + return True + elif isinstance(parsed, ExistsExpression): + new_g = g.copy() + for u in self.domain: + new_g.add(parsed.variable.name, u) + if self.satisfy(parsed.term, new_g): + return True + return False + elif isinstance(parsed, IotaExpression): + new_g = g.copy() + for u in self.domain: + new_g.add(parsed.variable.name, u) + if self.satisfy(parsed.term, new_g): + return True + return False + elif isinstance(parsed, LambdaExpression): + cf = {} + var = parsed.variable.name + for u in self.domain: + val = self.satisfy(parsed.term, g.add(var, u)) + # NB the dict would be a lot smaller if we do this: + # if val: cf[u] = val + # But then need to deal with cases where f(a) should yield + # a function rather than just False. + cf[u] = val + return cf + else: + return self.i(parsed, g, trace) + + # @decorator(trace_eval) + def i(self, parsed, g, trace=False): + """ + An interpretation function. + + Assuming that ``parsed`` is atomic: + + - if ``parsed`` is a non-logical constant, calls the valuation *V* + - else if ``parsed`` is an individual variable, calls assignment *g* + - else returns ``Undefined``. + + :param parsed: an ``Expression`` of ``logic``. + :type g: Assignment + :param g: an assignment to individual variables. + :return: a semantic value + """ + # If parsed is a propositional letter 'p', 'q', etc, it could be in valuation.symbols + # and also be an IndividualVariableExpression. We want to catch this first case. + # So there is a procedural consequence to the ordering of clauses here: + if parsed.variable.name in self.valuation.symbols: + return self.valuation[parsed.variable.name] + elif isinstance(parsed, IndividualVariableExpression): + return g[parsed.variable.name] + + else: + raise Undefined("Can't find a value for %s" % parsed) + + def satisfiers(self, parsed, varex, g, trace=None, nesting=0): + """ + Generate the entities from the model's domain that satisfy an open formula. + + :param parsed: an open formula + :type parsed: Expression + :param varex: the relevant free individual variable in ``parsed``. + :type varex: VariableExpression or str + :param g: a variable assignment + :type g: Assignment + :return: a set of the entities that satisfy ``parsed``. + """ + + spacer = " " + indent = spacer + (spacer * nesting) + candidates = [] + + if isinstance(varex, str): + var = Variable(varex) + else: + var = varex + + if var in parsed.free(): + if trace: + print() + print( + (spacer * nesting) + + f"Open formula is '{parsed}' with assignment {g}" + ) + for u in self.domain: + new_g = g.copy() + new_g.add(var.name, u) + if trace and trace > 1: + lowtrace = trace - 1 + else: + lowtrace = 0 + value = self.satisfy(parsed, new_g, lowtrace) + + if trace: + print(indent + "(trying assignment %s)" % new_g) + + # parsed == False under g[u/var]? + if value == False: + if trace: + print(indent + f"value of '{parsed}' under {new_g} is False") + + # so g[u/var] is a satisfying assignment + else: + candidates.append(u) + if trace: + print(indent + f"value of '{parsed}' under {new_g} is {value}") + + result = {c for c in candidates} + # var isn't free in parsed + else: + raise Undefined(f"{var.name} is not free in {parsed}") + + return result + + +# ////////////////////////////////////////////////////////////////////// +# Demo.. +# ////////////////////////////////////////////////////////////////////// +# number of spacer chars +mult = 30 + +# Demo 1: Propositional Logic +################# +def propdemo(trace=None): + """Example of a propositional model.""" + + global val1, dom1, m1, g1 + val1 = Valuation([("P", True), ("Q", True), ("R", False)]) + dom1 = set() + m1 = Model(dom1, val1) + g1 = Assignment(dom1) + + print() + print("*" * mult) + print("Propositional Formulas Demo") + print("*" * mult) + print("(Propositional constants treated as nullary predicates)") + print() + print("Model m1:\n", m1) + print("*" * mult) + sentences = [ + "(P & Q)", + "(P & R)", + "- P", + "- R", + "- - P", + "- (P & R)", + "(P | R)", + "(R | P)", + "(R | R)", + "(- P | R)", + "(P | - P)", + "(P -> Q)", + "(P -> R)", + "(R -> P)", + "(P <-> P)", + "(R <-> R)", + "(P <-> R)", + ] + + for sent in sentences: + if trace: + print() + m1.evaluate(sent, g1, trace) + else: + print(f"The value of '{sent}' is: {m1.evaluate(sent, g1)}") + + +# Demo 2: FOL Model +############# + + +def folmodel(quiet=False, trace=None): + """Example of a first-order model.""" + + global val2, v2, dom2, m2, g2 + + v2 = [ + ("adam", "b1"), + ("betty", "g1"), + ("fido", "d1"), + ("girl", {"g1", "g2"}), + ("boy", {"b1", "b2"}), + ("dog", {"d1"}), + ("love", {("b1", "g1"), ("b2", "g2"), ("g1", "b1"), ("g2", "b1")}), + ] + val2 = Valuation(v2) + dom2 = val2.domain + m2 = Model(dom2, val2) + g2 = Assignment(dom2, [("x", "b1"), ("y", "g2")]) + + if not quiet: + print() + print("*" * mult) + print("Models Demo") + print("*" * mult) + print("Model m2:\n", "-" * 14, "\n", m2) + print("Variable assignment = ", g2) + + exprs = ["adam", "boy", "love", "walks", "x", "y", "z"] + parsed_exprs = [Expression.fromstring(e) for e in exprs] + + print() + for parsed in parsed_exprs: + try: + print( + "The interpretation of '%s' in m2 is %s" + % (parsed, m2.i(parsed, g2)) + ) + except Undefined: + print("The interpretation of '%s' in m2 is Undefined" % parsed) + + applications = [ + ("boy", ("adam")), + ("walks", ("adam",)), + ("love", ("adam", "y")), + ("love", ("y", "adam")), + ] + + for (fun, args) in applications: + try: + funval = m2.i(Expression.fromstring(fun), g2) + argsval = tuple(m2.i(Expression.fromstring(arg), g2) for arg in args) + print(f"{fun}({args}) evaluates to {argsval in funval}") + except Undefined: + print(f"{fun}({args}) evaluates to Undefined") + + +# Demo 3: FOL +######### + + +def foldemo(trace=None): + """ + Interpretation of closed expressions in a first-order model. + """ + folmodel(quiet=True) + + print() + print("*" * mult) + print("FOL Formulas Demo") + print("*" * mult) + + formulas = [ + "love (adam, betty)", + "(adam = mia)", + "\\x. (boy(x) | girl(x))", + "\\x. boy(x)(adam)", + "\\x y. love(x, y)", + "\\x y. love(x, y)(adam)(betty)", + "\\x y. love(x, y)(adam, betty)", + "\\x y. (boy(x) & love(x, y))", + "\\x. exists y. (boy(x) & love(x, y))", + "exists z1. boy(z1)", + "exists x. (boy(x) & -(x = adam))", + "exists x. (boy(x) & all y. love(y, x))", + "all x. (boy(x) | girl(x))", + "all x. (girl(x) -> exists y. boy(y) & love(x, y))", # Every girl loves exists boy. + "exists x. (boy(x) & all y. (girl(y) -> love(y, x)))", # There is exists boy that every girl loves. + "exists x. (boy(x) & all y. (girl(y) -> love(x, y)))", # exists boy loves every girl. + "all x. (dog(x) -> - girl(x))", + "exists x. exists y. (love(x, y) & love(x, y))", + ] + + for fmla in formulas: + g2.purge() + if trace: + m2.evaluate(fmla, g2, trace) + else: + print(f"The value of '{fmla}' is: {m2.evaluate(fmla, g2)}") + + +# Demo 3: Satisfaction +############# + + +def satdemo(trace=None): + """Satisfiers of an open formula in a first order model.""" + + print() + print("*" * mult) + print("Satisfiers Demo") + print("*" * mult) + + folmodel(quiet=True) + + formulas = [ + "boy(x)", + "(x = x)", + "(boy(x) | girl(x))", + "(boy(x) & girl(x))", + "love(adam, x)", + "love(x, adam)", + "-(x = adam)", + "exists z22. love(x, z22)", + "exists y. love(y, x)", + "all y. (girl(y) -> love(x, y))", + "all y. (girl(y) -> love(y, x))", + "all y. (girl(y) -> (boy(x) & love(y, x)))", + "(boy(x) & all y. (girl(y) -> love(x, y)))", + "(boy(x) & all y. (girl(y) -> love(y, x)))", + "(boy(x) & exists y. (girl(y) & love(y, x)))", + "(girl(x) -> dog(x))", + "all y. (dog(y) -> (x = y))", + "exists y. love(y, x)", + "exists y. (love(adam, y) & love(y, x))", + ] + + if trace: + print(m2) + + for fmla in formulas: + print(fmla) + Expression.fromstring(fmla) + + parsed = [Expression.fromstring(fmla) for fmla in formulas] + + for p in parsed: + g2.purge() + print( + "The satisfiers of '{}' are: {}".format(p, m2.satisfiers(p, "x", g2, trace)) + ) + + +def demo(num=0, trace=None): + """ + Run exists demos. + + - num = 1: propositional logic demo + - num = 2: first order model demo (only if trace is set) + - num = 3: first order sentences demo + - num = 4: satisfaction of open formulas demo + - any other value: run all the demos + + :param trace: trace = 1, or trace = 2 for more verbose tracing + """ + demos = {1: propdemo, 2: folmodel, 3: foldemo, 4: satdemo} + + try: + demos[num](trace=trace) + except KeyError: + for num in demos: + demos[num](trace=trace) + + +if __name__ == "__main__": + demo(2, trace=0) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/sem/glue.py b/llmeval-env/lib/python3.10/site-packages/nltk/sem/glue.py new file mode 100644 index 0000000000000000000000000000000000000000..1098c83bec71cee14b6c06e93ba3f15366c0ada2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/sem/glue.py @@ -0,0 +1,835 @@ +# Natural Language Toolkit: Glue Semantics +# +# Author: Dan Garrette +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +import os +from itertools import chain + +import nltk +from nltk.internals import Counter +from nltk.sem import drt, linearlogic +from nltk.sem.logic import ( + AbstractVariableExpression, + Expression, + LambdaExpression, + Variable, + VariableExpression, +) +from nltk.tag import BigramTagger, RegexpTagger, TrigramTagger, UnigramTagger + +SPEC_SEMTYPES = { + "a": "ex_quant", + "an": "ex_quant", + "every": "univ_quant", + "the": "def_art", + "no": "no_quant", + "default": "ex_quant", +} + +OPTIONAL_RELATIONSHIPS = ["nmod", "vmod", "punct"] + + +class GlueFormula: + def __init__(self, meaning, glue, indices=None): + if not indices: + indices = set() + + if isinstance(meaning, str): + self.meaning = Expression.fromstring(meaning) + elif isinstance(meaning, Expression): + self.meaning = meaning + else: + raise RuntimeError( + "Meaning term neither string or expression: %s, %s" + % (meaning, meaning.__class__) + ) + + if isinstance(glue, str): + self.glue = linearlogic.LinearLogicParser().parse(glue) + elif isinstance(glue, linearlogic.Expression): + self.glue = glue + else: + raise RuntimeError( + "Glue term neither string or expression: %s, %s" + % (glue, glue.__class__) + ) + + self.indices = indices + + def applyto(self, arg): + """self = (\\x.(walk x), (subj -o f)) + arg = (john , subj) + returns ((walk john), f) + """ + if self.indices & arg.indices: # if the sets are NOT disjoint + raise linearlogic.LinearLogicApplicationException( + f"'{self}' applied to '{arg}'. Indices are not disjoint." + ) + else: # if the sets ARE disjoint + return_indices = self.indices | arg.indices + + try: + return_glue = linearlogic.ApplicationExpression( + self.glue, arg.glue, arg.indices + ) + except linearlogic.LinearLogicApplicationException as e: + raise linearlogic.LinearLogicApplicationException( + f"'{self.simplify()}' applied to '{arg.simplify()}'" + ) from e + + arg_meaning_abstracted = arg.meaning + if return_indices: + for dep in self.glue.simplify().antecedent.dependencies[ + ::-1 + ]: # if self.glue is (A -o B), dep is in A.dependencies + arg_meaning_abstracted = self.make_LambdaExpression( + Variable("v%s" % dep), arg_meaning_abstracted + ) + return_meaning = self.meaning.applyto(arg_meaning_abstracted) + + return self.__class__(return_meaning, return_glue, return_indices) + + def make_VariableExpression(self, name): + return VariableExpression(name) + + def make_LambdaExpression(self, variable, term): + return LambdaExpression(variable, term) + + def lambda_abstract(self, other): + assert isinstance(other, GlueFormula) + assert isinstance(other.meaning, AbstractVariableExpression) + return self.__class__( + self.make_LambdaExpression(other.meaning.variable, self.meaning), + linearlogic.ImpExpression(other.glue, self.glue), + ) + + def compile(self, counter=None): + """From Iddo Lev's PhD Dissertation p108-109""" + if not counter: + counter = Counter() + (compiled_glue, new_forms) = self.glue.simplify().compile_pos( + counter, self.__class__ + ) + return new_forms + [ + self.__class__(self.meaning, compiled_glue, {counter.get()}) + ] + + def simplify(self): + return self.__class__( + self.meaning.simplify(), self.glue.simplify(), self.indices + ) + + def __eq__(self, other): + return ( + self.__class__ == other.__class__ + and self.meaning == other.meaning + and self.glue == other.glue + ) + + def __ne__(self, other): + return not self == other + + # sorting for use in doctests which must be deterministic + def __lt__(self, other): + return str(self) < str(other) + + def __str__(self): + assert isinstance(self.indices, set) + accum = f"{self.meaning} : {self.glue}" + if self.indices: + accum += ( + " : {" + ", ".join(str(index) for index in sorted(self.indices)) + "}" + ) + return accum + + def __repr__(self): + return "%s" % self + + +class GlueDict(dict): + def __init__(self, filename, encoding=None): + self.filename = filename + self.file_encoding = encoding + self.read_file() + + def read_file(self, empty_first=True): + if empty_first: + self.clear() + + try: + contents = nltk.data.load( + self.filename, format="text", encoding=self.file_encoding + ) + # TODO: the above can't handle zip files, but this should anyway be fixed in nltk.data.load() + except LookupError as e: + try: + contents = nltk.data.load( + "file:" + self.filename, format="text", encoding=self.file_encoding + ) + except LookupError: + raise e + lines = contents.splitlines() + + for line in lines: # example: 'n : (\\x.( x), (v-or))' + # lambdacalc -^ linear logic -^ + line = line.strip() # remove trailing newline + if not len(line): + continue # skip empty lines + if line[0] == "#": + continue # skip commented out lines + + parts = line.split( + " : ", 2 + ) # ['verb', '(\\x.( x), ( subj -o f ))', '[subj]'] + + glue_formulas = [] + paren_count = 0 + tuple_start = 0 + tuple_comma = 0 + + relationships = None + + if len(parts) > 1: + for (i, c) in enumerate(parts[1]): + if c == "(": + if paren_count == 0: # if it's the first '(' of a tuple + tuple_start = i + 1 # then save the index + paren_count += 1 + elif c == ")": + paren_count -= 1 + if paren_count == 0: # if it's the last ')' of a tuple + meaning_term = parts[1][ + tuple_start:tuple_comma + ] # '\\x.( x)' + glue_term = parts[1][tuple_comma + 1 : i] # '(v-r)' + glue_formulas.append( + [meaning_term, glue_term] + ) # add the GlueFormula to the list + elif c == ",": + if ( + paren_count == 1 + ): # if it's a comma separating the parts of the tuple + tuple_comma = i # then save the index + elif c == "#": # skip comments at the ends of lines + if ( + paren_count != 0 + ): # if the line hasn't parsed correctly so far + raise RuntimeError( + "Formula syntax is incorrect for entry " + line + ) + break # break to the next line + + if len(parts) > 2: # if there is a relationship entry at the end + rel_start = parts[2].index("[") + 1 + rel_end = parts[2].index("]") + if rel_start == rel_end: + relationships = frozenset() + else: + relationships = frozenset( + r.strip() for r in parts[2][rel_start:rel_end].split(",") + ) + + try: + start_inheritance = parts[0].index("(") + end_inheritance = parts[0].index(")") + sem = parts[0][:start_inheritance].strip() + supertype = parts[0][start_inheritance + 1 : end_inheritance] + except: + sem = parts[0].strip() + supertype = None + + if sem not in self: + self[sem] = {} + + if ( + relationships is None + ): # if not specified for a specific relationship set + # add all relationship entries for parents + if supertype: + for rels in self[supertype]: + if rels not in self[sem]: + self[sem][rels] = [] + glue = self[supertype][rels] + self[sem][rels].extend(glue) + self[sem][rels].extend( + glue_formulas + ) # add the glue formulas to every rel entry + else: + if None not in self[sem]: + self[sem][None] = [] + self[sem][None].extend( + glue_formulas + ) # add the glue formulas to every rel entry + else: + if relationships not in self[sem]: + self[sem][relationships] = [] + if supertype: + self[sem][relationships].extend(self[supertype][relationships]) + self[sem][relationships].extend( + glue_formulas + ) # add the glue entry to the dictionary + + def __str__(self): + accum = "" + for pos in self: + str_pos = "%s" % pos + for relset in self[pos]: + i = 1 + for gf in self[pos][relset]: + if i == 1: + accum += str_pos + ": " + else: + accum += " " * (len(str_pos) + 2) + accum += "%s" % gf + if relset and i == len(self[pos][relset]): + accum += " : %s" % relset + accum += "\n" + i += 1 + return accum + + def to_glueformula_list(self, depgraph, node=None, counter=None, verbose=False): + if node is None: + # TODO: should it be depgraph.root? Is this code tested? + top = depgraph.nodes[0] + depList = list(chain.from_iterable(top["deps"].values())) + root = depgraph.nodes[depList[0]] + + return self.to_glueformula_list(depgraph, root, Counter(), verbose) + + glueformulas = self.lookup(node, depgraph, counter) + for dep_idx in chain.from_iterable(node["deps"].values()): + dep = depgraph.nodes[dep_idx] + glueformulas.extend( + self.to_glueformula_list(depgraph, dep, counter, verbose) + ) + return glueformulas + + def lookup(self, node, depgraph, counter): + semtype_names = self.get_semtypes(node) + + semtype = None + for name in semtype_names: + if name in self: + semtype = self[name] + break + if semtype is None: + # raise KeyError, "There is no GlueDict entry for sem type '%s' (for '%s')" % (sem, word) + return [] + + self.add_missing_dependencies(node, depgraph) + + lookup = self._lookup_semtype_option(semtype, node, depgraph) + + if not len(lookup): + raise KeyError( + "There is no GlueDict entry for sem type of '%s' " + "with tag '%s', and rel '%s'" % (node["word"], node["tag"], node["rel"]) + ) + + return self.get_glueformulas_from_semtype_entry( + lookup, node["word"], node, depgraph, counter + ) + + def add_missing_dependencies(self, node, depgraph): + rel = node["rel"].lower() + + if rel == "main": + headnode = depgraph.nodes[node["head"]] + subj = self.lookup_unique("subj", headnode, depgraph) + relation = subj["rel"] + node["deps"].setdefault(relation, []) + node["deps"][relation].append(subj["address"]) + # node['deps'].append(subj['address']) + + def _lookup_semtype_option(self, semtype, node, depgraph): + relationships = frozenset( + depgraph.nodes[dep]["rel"].lower() + for dep in chain.from_iterable(node["deps"].values()) + if depgraph.nodes[dep]["rel"].lower() not in OPTIONAL_RELATIONSHIPS + ) + + try: + lookup = semtype[relationships] + except KeyError: + # An exact match is not found, so find the best match where + # 'best' is defined as the glue entry whose relationship set has the + # most relations of any possible relationship set that is a subset + # of the actual depgraph + best_match = frozenset() + for relset_option in set(semtype) - {None}: + if ( + len(relset_option) > len(best_match) + and relset_option < relationships + ): + best_match = relset_option + if not best_match: + if None in semtype: + best_match = None + else: + return None + lookup = semtype[best_match] + + return lookup + + def get_semtypes(self, node): + """ + Based on the node, return a list of plausible semtypes in order of + plausibility. + """ + rel = node["rel"].lower() + word = node["word"].lower() + + if rel == "spec": + if word in SPEC_SEMTYPES: + return [SPEC_SEMTYPES[word]] + else: + return [SPEC_SEMTYPES["default"]] + elif rel in ["nmod", "vmod"]: + return [node["tag"], rel] + else: + return [node["tag"]] + + def get_glueformulas_from_semtype_entry( + self, lookup, word, node, depgraph, counter + ): + glueformulas = [] + + glueFormulaFactory = self.get_GlueFormula_factory() + for meaning, glue in lookup: + gf = glueFormulaFactory(self.get_meaning_formula(meaning, word), glue) + if not len(glueformulas): + gf.word = word + else: + gf.word = f"{word}{len(glueformulas) + 1}" + + gf.glue = self.initialize_labels(gf.glue, node, depgraph, counter.get()) + + glueformulas.append(gf) + return glueformulas + + def get_meaning_formula(self, generic, word): + """ + :param generic: A meaning formula string containing the + parameter "" + :param word: The actual word to be replace "" + """ + word = word.replace(".", "") + return generic.replace("", word) + + def initialize_labels(self, expr, node, depgraph, unique_index): + if isinstance(expr, linearlogic.AtomicExpression): + name = self.find_label_name(expr.name.lower(), node, depgraph, unique_index) + if name[0].isupper(): + return linearlogic.VariableExpression(name) + else: + return linearlogic.ConstantExpression(name) + else: + return linearlogic.ImpExpression( + self.initialize_labels(expr.antecedent, node, depgraph, unique_index), + self.initialize_labels(expr.consequent, node, depgraph, unique_index), + ) + + def find_label_name(self, name, node, depgraph, unique_index): + try: + dot = name.index(".") + + before_dot = name[:dot] + after_dot = name[dot + 1 :] + if before_dot == "super": + return self.find_label_name( + after_dot, depgraph.nodes[node["head"]], depgraph, unique_index + ) + else: + return self.find_label_name( + after_dot, + self.lookup_unique(before_dot, node, depgraph), + depgraph, + unique_index, + ) + except ValueError: + lbl = self.get_label(node) + if name == "f": + return lbl + elif name == "v": + return "%sv" % lbl + elif name == "r": + return "%sr" % lbl + elif name == "super": + return self.get_label(depgraph.nodes[node["head"]]) + elif name == "var": + return f"{lbl.upper()}{unique_index}" + elif name == "a": + return self.get_label(self.lookup_unique("conja", node, depgraph)) + elif name == "b": + return self.get_label(self.lookup_unique("conjb", node, depgraph)) + else: + return self.get_label(self.lookup_unique(name, node, depgraph)) + + def get_label(self, node): + """ + Pick an alphabetic character as identifier for an entity in the model. + + :param value: where to index into the list of characters + :type value: int + """ + value = node["address"] + + letter = [ + "f", + "g", + "h", + "i", + "j", + "k", + "l", + "m", + "n", + "o", + "p", + "q", + "r", + "s", + "t", + "u", + "v", + "w", + "x", + "y", + "z", + "a", + "b", + "c", + "d", + "e", + ][value - 1] + num = int(value) // 26 + if num > 0: + return letter + str(num) + else: + return letter + + def lookup_unique(self, rel, node, depgraph): + """ + Lookup 'key'. There should be exactly one item in the associated relation. + """ + deps = [ + depgraph.nodes[dep] + for dep in chain.from_iterable(node["deps"].values()) + if depgraph.nodes[dep]["rel"].lower() == rel.lower() + ] + + if len(deps) == 0: + raise KeyError( + "'{}' doesn't contain a feature '{}'".format(node["word"], rel) + ) + elif len(deps) > 1: + raise KeyError( + "'{}' should only have one feature '{}'".format(node["word"], rel) + ) + else: + return deps[0] + + def get_GlueFormula_factory(self): + return GlueFormula + + +class Glue: + def __init__( + self, semtype_file=None, remove_duplicates=False, depparser=None, verbose=False + ): + self.verbose = verbose + self.remove_duplicates = remove_duplicates + self.depparser = depparser + + from nltk import Prover9 + + self.prover = Prover9() + + if semtype_file: + self.semtype_file = semtype_file + else: + self.semtype_file = os.path.join( + "grammars", "sample_grammars", "glue.semtype" + ) + + def train_depparser(self, depgraphs=None): + if depgraphs: + self.depparser.train(depgraphs) + else: + self.depparser.train_from_file( + nltk.data.find( + os.path.join("grammars", "sample_grammars", "glue_train.conll") + ) + ) + + def parse_to_meaning(self, sentence): + readings = [] + for agenda in self.parse_to_compiled(sentence): + readings.extend(self.get_readings(agenda)) + return readings + + def get_readings(self, agenda): + readings = [] + agenda_length = len(agenda) + atomics = dict() + nonatomics = dict() + while agenda: # is not empty + cur = agenda.pop() + glue_simp = cur.glue.simplify() + if isinstance( + glue_simp, linearlogic.ImpExpression + ): # if cur.glue is non-atomic + for key in atomics: + try: + if isinstance(cur.glue, linearlogic.ApplicationExpression): + bindings = cur.glue.bindings + else: + bindings = linearlogic.BindingDict() + glue_simp.antecedent.unify(key, bindings) + for atomic in atomics[key]: + if not ( + cur.indices & atomic.indices + ): # if the sets of indices are disjoint + try: + agenda.append(cur.applyto(atomic)) + except linearlogic.LinearLogicApplicationException: + pass + except linearlogic.UnificationException: + pass + try: + nonatomics[glue_simp.antecedent].append(cur) + except KeyError: + nonatomics[glue_simp.antecedent] = [cur] + + else: # else cur.glue is atomic + for key in nonatomics: + for nonatomic in nonatomics[key]: + try: + if isinstance( + nonatomic.glue, linearlogic.ApplicationExpression + ): + bindings = nonatomic.glue.bindings + else: + bindings = linearlogic.BindingDict() + glue_simp.unify(key, bindings) + if not ( + cur.indices & nonatomic.indices + ): # if the sets of indices are disjoint + try: + agenda.append(nonatomic.applyto(cur)) + except linearlogic.LinearLogicApplicationException: + pass + except linearlogic.UnificationException: + pass + try: + atomics[glue_simp].append(cur) + except KeyError: + atomics[glue_simp] = [cur] + + for entry in atomics: + for gf in atomics[entry]: + if len(gf.indices) == agenda_length: + self._add_to_reading_list(gf, readings) + for entry in nonatomics: + for gf in nonatomics[entry]: + if len(gf.indices) == agenda_length: + self._add_to_reading_list(gf, readings) + return readings + + def _add_to_reading_list(self, glueformula, reading_list): + add_reading = True + if self.remove_duplicates: + for reading in reading_list: + try: + if reading.equiv(glueformula.meaning, self.prover): + add_reading = False + break + except Exception as e: + # if there is an exception, the syntax of the formula + # may not be understandable by the prover, so don't + # throw out the reading. + print("Error when checking logical equality of statements", e) + + if add_reading: + reading_list.append(glueformula.meaning) + + def parse_to_compiled(self, sentence): + gfls = [self.depgraph_to_glue(dg) for dg in self.dep_parse(sentence)] + return [self.gfl_to_compiled(gfl) for gfl in gfls] + + def dep_parse(self, sentence): + """ + Return a dependency graph for the sentence. + + :param sentence: the sentence to be parsed + :type sentence: list(str) + :rtype: DependencyGraph + """ + + # Lazy-initialize the depparser + if self.depparser is None: + from nltk.parse import MaltParser + + self.depparser = MaltParser(tagger=self.get_pos_tagger()) + if not self.depparser._trained: + self.train_depparser() + return self.depparser.parse(sentence, verbose=self.verbose) + + def depgraph_to_glue(self, depgraph): + return self.get_glue_dict().to_glueformula_list(depgraph) + + def get_glue_dict(self): + return GlueDict(self.semtype_file) + + def gfl_to_compiled(self, gfl): + index_counter = Counter() + return_list = [] + for gf in gfl: + return_list.extend(gf.compile(index_counter)) + + if self.verbose: + print("Compiled Glue Premises:") + for cgf in return_list: + print(cgf) + + return return_list + + def get_pos_tagger(self): + from nltk.corpus import brown + + regexp_tagger = RegexpTagger( + [ + (r"^-?[0-9]+(\.[0-9]+)?$", "CD"), # cardinal numbers + (r"(The|the|A|a|An|an)$", "AT"), # articles + (r".*able$", "JJ"), # adjectives + (r".*ness$", "NN"), # nouns formed from adjectives + (r".*ly$", "RB"), # adverbs + (r".*s$", "NNS"), # plural nouns + (r".*ing$", "VBG"), # gerunds + (r".*ed$", "VBD"), # past tense verbs + (r".*", "NN"), # nouns (default) + ] + ) + brown_train = brown.tagged_sents(categories="news") + unigram_tagger = UnigramTagger(brown_train, backoff=regexp_tagger) + bigram_tagger = BigramTagger(brown_train, backoff=unigram_tagger) + trigram_tagger = TrigramTagger(brown_train, backoff=bigram_tagger) + + # Override particular words + main_tagger = RegexpTagger( + [(r"(A|a|An|an)$", "ex_quant"), (r"(Every|every|All|all)$", "univ_quant")], + backoff=trigram_tagger, + ) + + return main_tagger + + +class DrtGlueFormula(GlueFormula): + def __init__(self, meaning, glue, indices=None): + if not indices: + indices = set() + + if isinstance(meaning, str): + self.meaning = drt.DrtExpression.fromstring(meaning) + elif isinstance(meaning, drt.DrtExpression): + self.meaning = meaning + else: + raise RuntimeError( + "Meaning term neither string or expression: %s, %s" + % (meaning, meaning.__class__) + ) + + if isinstance(glue, str): + self.glue = linearlogic.LinearLogicParser().parse(glue) + elif isinstance(glue, linearlogic.Expression): + self.glue = glue + else: + raise RuntimeError( + "Glue term neither string or expression: %s, %s" + % (glue, glue.__class__) + ) + + self.indices = indices + + def make_VariableExpression(self, name): + return drt.DrtVariableExpression(name) + + def make_LambdaExpression(self, variable, term): + return drt.DrtLambdaExpression(variable, term) + + +class DrtGlueDict(GlueDict): + def get_GlueFormula_factory(self): + return DrtGlueFormula + + +class DrtGlue(Glue): + def __init__( + self, semtype_file=None, remove_duplicates=False, depparser=None, verbose=False + ): + if not semtype_file: + semtype_file = os.path.join( + "grammars", "sample_grammars", "drt_glue.semtype" + ) + Glue.__init__(self, semtype_file, remove_duplicates, depparser, verbose) + + def get_glue_dict(self): + return DrtGlueDict(self.semtype_file) + + +def demo(show_example=-1): + from nltk.parse import MaltParser + + examples = [ + "David sees Mary", + "David eats a sandwich", + "every man chases a dog", + "every man believes a dog sleeps", + "John gives David a sandwich", + "John chases himself", + ] + # 'John persuades David to order a pizza', + # 'John tries to go', + # 'John tries to find a unicorn', + # 'John seems to vanish', + # 'a unicorn seems to approach', + # 'every big cat leaves', + # 'every gray cat leaves', + # 'every big gray cat leaves', + # 'a former senator leaves', + + print("============== DEMO ==============") + + tagger = RegexpTagger( + [ + ("^(David|Mary|John)$", "NNP"), + ( + "^(sees|eats|chases|believes|gives|sleeps|chases|persuades|tries|seems|leaves)$", + "VB", + ), + ("^(go|order|vanish|find|approach)$", "VB"), + ("^(a)$", "ex_quant"), + ("^(every)$", "univ_quant"), + ("^(sandwich|man|dog|pizza|unicorn|cat|senator)$", "NN"), + ("^(big|gray|former)$", "JJ"), + ("^(him|himself)$", "PRP"), + ] + ) + + depparser = MaltParser(tagger=tagger) + glue = Glue(depparser=depparser, verbose=False) + + for (i, sentence) in enumerate(examples): + if i == show_example or show_example == -1: + print(f"[[[Example {i}]]] {sentence}") + for reading in glue.parse_to_meaning(sentence.split()): + print(reading.simplify()) + print("") + + +if __name__ == "__main__": + demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/sem/hole.py b/llmeval-env/lib/python3.10/site-packages/nltk/sem/hole.py new file mode 100644 index 0000000000000000000000000000000000000000..4570cb02a3bf183a73a1f9b5e78b8f0e1dac430f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/sem/hole.py @@ -0,0 +1,395 @@ +# Natural Language Toolkit: Logic +# +# Author: Peter Wang +# Updated by: Dan Garrette +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +""" +An implementation of the Hole Semantics model, following Blackburn and Bos, +Representation and Inference for Natural Language (CSLI, 2005). + +The semantic representations are built by the grammar hole.fcfg. +This module contains driver code to read in sentences and parse them +according to a hole semantics grammar. + +After parsing, the semantic representation is in the form of an underspecified +representation that is not easy to read. We use a "plugging" algorithm to +convert that representation into first-order logic formulas. +""" + +from functools import reduce + +from nltk.parse import load_parser +from nltk.sem.logic import ( + AllExpression, + AndExpression, + ApplicationExpression, + ExistsExpression, + IffExpression, + ImpExpression, + LambdaExpression, + NegatedExpression, + OrExpression, +) +from nltk.sem.skolemize import skolemize + +# Note that in this code there may be multiple types of trees being referred to: +# +# 1. parse trees +# 2. the underspecified representation +# 3. first-order logic formula trees +# 4. the search space when plugging (search tree) +# + + +class Constants: + ALL = "ALL" + EXISTS = "EXISTS" + NOT = "NOT" + AND = "AND" + OR = "OR" + IMP = "IMP" + IFF = "IFF" + PRED = "PRED" + LEQ = "LEQ" + HOLE = "HOLE" + LABEL = "LABEL" + + MAP = { + ALL: lambda v, e: AllExpression(v.variable, e), + EXISTS: lambda v, e: ExistsExpression(v.variable, e), + NOT: NegatedExpression, + AND: AndExpression, + OR: OrExpression, + IMP: ImpExpression, + IFF: IffExpression, + PRED: ApplicationExpression, + } + + +class HoleSemantics: + """ + This class holds the broken-down components of a hole semantics, i.e. it + extracts the holes, labels, logic formula fragments and constraints out of + a big conjunction of such as produced by the hole semantics grammar. It + then provides some operations on the semantics dealing with holes, labels + and finding legal ways to plug holes with labels. + """ + + def __init__(self, usr): + """ + Constructor. `usr' is a ``sem.Expression`` representing an + Underspecified Representation Structure (USR). A USR has the following + special predicates: + ALL(l,v,n), + EXISTS(l,v,n), + AND(l,n,n), + OR(l,n,n), + IMP(l,n,n), + IFF(l,n,n), + PRED(l,v,n,v[,v]*) where the brackets and star indicate zero or more repetitions, + LEQ(n,n), + HOLE(n), + LABEL(n) + where l is the label of the node described by the predicate, n is either + a label or a hole, and v is a variable. + """ + self.holes = set() + self.labels = set() + self.fragments = {} # mapping of label -> formula fragment + self.constraints = set() # set of Constraints + self._break_down(usr) + self.top_most_labels = self._find_top_most_labels() + self.top_hole = self._find_top_hole() + + def is_node(self, x): + """ + Return true if x is a node (label or hole) in this semantic + representation. + """ + return x in (self.labels | self.holes) + + def _break_down(self, usr): + """ + Extract holes, labels, formula fragments and constraints from the hole + semantics underspecified representation (USR). + """ + if isinstance(usr, AndExpression): + self._break_down(usr.first) + self._break_down(usr.second) + elif isinstance(usr, ApplicationExpression): + func, args = usr.uncurry() + if func.variable.name == Constants.LEQ: + self.constraints.add(Constraint(args[0], args[1])) + elif func.variable.name == Constants.HOLE: + self.holes.add(args[0]) + elif func.variable.name == Constants.LABEL: + self.labels.add(args[0]) + else: + label = args[0] + assert label not in self.fragments + self.fragments[label] = (func, args[1:]) + else: + raise ValueError(usr.label()) + + def _find_top_nodes(self, node_list): + top_nodes = node_list.copy() + for f in self.fragments.values(): + # the label is the first argument of the predicate + args = f[1] + for arg in args: + if arg in node_list: + top_nodes.discard(arg) + return top_nodes + + def _find_top_most_labels(self): + """ + Return the set of labels which are not referenced directly as part of + another formula fragment. These will be the top-most labels for the + subtree that they are part of. + """ + return self._find_top_nodes(self.labels) + + def _find_top_hole(self): + """ + Return the hole that will be the top of the formula tree. + """ + top_holes = self._find_top_nodes(self.holes) + assert len(top_holes) == 1 # it must be unique + return top_holes.pop() + + def pluggings(self): + """ + Calculate and return all the legal pluggings (mappings of labels to + holes) of this semantics given the constraints. + """ + record = [] + self._plug_nodes([(self.top_hole, [])], self.top_most_labels, {}, record) + return record + + def _plug_nodes(self, queue, potential_labels, plug_acc, record): + """ + Plug the nodes in `queue' with the labels in `potential_labels'. + + Each element of `queue' is a tuple of the node to plug and the list of + ancestor holes from the root of the graph to that node. + + `potential_labels' is a set of the labels which are still available for + plugging. + + `plug_acc' is the incomplete mapping of holes to labels made on the + current branch of the search tree so far. + + `record' is a list of all the complete pluggings that we have found in + total so far. It is the only parameter that is destructively updated. + """ + if queue != []: + (node, ancestors) = queue[0] + if node in self.holes: + # The node is a hole, try to plug it. + self._plug_hole( + node, ancestors, queue[1:], potential_labels, plug_acc, record + ) + else: + assert node in self.labels + # The node is a label. Replace it in the queue by the holes and + # labels in the formula fragment named by that label. + args = self.fragments[node][1] + head = [(a, ancestors) for a in args if self.is_node(a)] + self._plug_nodes(head + queue[1:], potential_labels, plug_acc, record) + else: + raise Exception("queue empty") + + def _plug_hole(self, hole, ancestors0, queue, potential_labels0, plug_acc0, record): + """ + Try all possible ways of plugging a single hole. + See _plug_nodes for the meanings of the parameters. + """ + # Add the current hole we're trying to plug into the list of ancestors. + assert hole not in ancestors0 + ancestors = [hole] + ancestors0 + + # Try each potential label in this hole in turn. + for l in potential_labels0: + # Is the label valid in this hole? + if self._violates_constraints(l, ancestors): + continue + + plug_acc = plug_acc0.copy() + plug_acc[hole] = l + potential_labels = potential_labels0.copy() + potential_labels.remove(l) + + if len(potential_labels) == 0: + # No more potential labels. That must mean all the holes have + # been filled so we have found a legal plugging so remember it. + # + # Note that the queue might not be empty because there might + # be labels on there that point to formula fragments with + # no holes in them. _sanity_check_plugging will make sure + # all holes are filled. + self._sanity_check_plugging(plug_acc, self.top_hole, []) + record.append(plug_acc) + else: + # Recursively try to fill in the rest of the holes in the + # queue. The label we just plugged into the hole could have + # holes of its own so at the end of the queue. Putting it on + # the end of the queue gives us a breadth-first search, so that + # all the holes at level i of the formula tree are filled + # before filling level i+1. + # A depth-first search would work as well since the trees must + # be finite but the bookkeeping would be harder. + self._plug_nodes( + queue + [(l, ancestors)], potential_labels, plug_acc, record + ) + + def _violates_constraints(self, label, ancestors): + """ + Return True if the `label' cannot be placed underneath the holes given + by the set `ancestors' because it would violate the constraints imposed + on it. + """ + for c in self.constraints: + if c.lhs == label: + if c.rhs not in ancestors: + return True + return False + + def _sanity_check_plugging(self, plugging, node, ancestors): + """ + Make sure that a given plugging is legal. We recursively go through + each node and make sure that no constraints are violated. + We also check that all holes have been filled. + """ + if node in self.holes: + ancestors = [node] + ancestors + label = plugging[node] + else: + label = node + assert label in self.labels + for c in self.constraints: + if c.lhs == label: + assert c.rhs in ancestors + args = self.fragments[label][1] + for arg in args: + if self.is_node(arg): + self._sanity_check_plugging(plugging, arg, [label] + ancestors) + + def formula_tree(self, plugging): + """ + Return the first-order logic formula tree for this underspecified + representation using the plugging given. + """ + return self._formula_tree(plugging, self.top_hole) + + def _formula_tree(self, plugging, node): + if node in plugging: + return self._formula_tree(plugging, plugging[node]) + elif node in self.fragments: + pred, args = self.fragments[node] + children = [self._formula_tree(plugging, arg) for arg in args] + return reduce(Constants.MAP[pred.variable.name], children) + else: + return node + + +class Constraint: + """ + This class represents a constraint of the form (L =< N), + where L is a label and N is a node (a label or a hole). + """ + + def __init__(self, lhs, rhs): + self.lhs = lhs + self.rhs = rhs + + def __eq__(self, other): + if self.__class__ == other.__class__: + return self.lhs == other.lhs and self.rhs == other.rhs + else: + return False + + def __ne__(self, other): + return not (self == other) + + def __hash__(self): + return hash(repr(self)) + + def __repr__(self): + return f"({self.lhs} < {self.rhs})" + + +def hole_readings(sentence, grammar_filename=None, verbose=False): + if not grammar_filename: + grammar_filename = "grammars/sample_grammars/hole.fcfg" + + if verbose: + print("Reading grammar file", grammar_filename) + + parser = load_parser(grammar_filename) + + # Parse the sentence. + tokens = sentence.split() + trees = list(parser.parse(tokens)) + if verbose: + print("Got %d different parses" % len(trees)) + + all_readings = [] + for tree in trees: + # Get the semantic feature from the top of the parse tree. + sem = tree.label()["SEM"].simplify() + + # Print the raw semantic representation. + if verbose: + print("Raw: ", sem) + + # Skolemize away all quantifiers. All variables become unique. + while isinstance(sem, LambdaExpression): + sem = sem.term + skolemized = skolemize(sem) + + if verbose: + print("Skolemized:", skolemized) + + # Break the hole semantics representation down into its components + # i.e. holes, labels, formula fragments and constraints. + hole_sem = HoleSemantics(skolemized) + + # Maybe show the details of the semantic representation. + if verbose: + print("Holes: ", hole_sem.holes) + print("Labels: ", hole_sem.labels) + print("Constraints: ", hole_sem.constraints) + print("Top hole: ", hole_sem.top_hole) + print("Top labels: ", hole_sem.top_most_labels) + print("Fragments:") + for l, f in hole_sem.fragments.items(): + print(f"\t{l}: {f}") + + # Find all the possible ways to plug the formulas together. + pluggings = hole_sem.pluggings() + + # Build FOL formula trees using the pluggings. + readings = list(map(hole_sem.formula_tree, pluggings)) + + # Print out the formulas in a textual format. + if verbose: + for i, r in enumerate(readings): + print() + print("%d. %s" % (i, r)) + print() + + all_readings.extend(readings) + + return all_readings + + +if __name__ == "__main__": + for r in hole_readings("a dog barks"): + print(r) + print() + for r in hole_readings("every girl chases a dog"): + print(r) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/sem/lfg.py b/llmeval-env/lib/python3.10/site-packages/nltk/sem/lfg.py new file mode 100644 index 0000000000000000000000000000000000000000..13473b0087940c9b42cc4c36d5f442bb0f78eafe --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/sem/lfg.py @@ -0,0 +1,261 @@ +# Natural Language Toolkit: Lexical Functional Grammar +# +# Author: Dan Garrette +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +from itertools import chain + +from nltk.internals import Counter + + +class FStructure(dict): + def safeappend(self, key, item): + """ + Append 'item' to the list at 'key'. If no list exists for 'key', then + construct one. + """ + if key not in self: + self[key] = [] + self[key].append(item) + + def __setitem__(self, key, value): + dict.__setitem__(self, key.lower(), value) + + def __getitem__(self, key): + return dict.__getitem__(self, key.lower()) + + def __contains__(self, key): + return dict.__contains__(self, key.lower()) + + def to_glueformula_list(self, glue_dict): + depgraph = self.to_depgraph() + return glue_dict.to_glueformula_list(depgraph) + + def to_depgraph(self, rel=None): + from nltk.parse.dependencygraph import DependencyGraph + + depgraph = DependencyGraph() + nodes = depgraph.nodes + + self._to_depgraph(nodes, 0, "ROOT") + + # Add all the dependencies for all the nodes + for address, node in nodes.items(): + for n2 in (n for n in nodes.values() if n["rel"] != "TOP"): + if n2["head"] == address: + relation = n2["rel"] + node["deps"].setdefault(relation, []) + node["deps"][relation].append(n2["address"]) + + depgraph.root = nodes[1] + + return depgraph + + def _to_depgraph(self, nodes, head, rel): + index = len(nodes) + + nodes[index].update( + { + "address": index, + "word": self.pred[0], + "tag": self.pred[1], + "head": head, + "rel": rel, + } + ) + + for feature in sorted(self): + for item in sorted(self[feature]): + if isinstance(item, FStructure): + item._to_depgraph(nodes, index, feature) + elif isinstance(item, tuple): + new_index = len(nodes) + nodes[new_index].update( + { + "address": new_index, + "word": item[0], + "tag": item[1], + "head": index, + "rel": feature, + } + ) + elif isinstance(item, list): + for n in item: + n._to_depgraph(nodes, index, feature) + else: + raise Exception( + "feature %s is not an FStruct, a list, or a tuple" % feature + ) + + @staticmethod + def read_depgraph(depgraph): + return FStructure._read_depgraph(depgraph.root, depgraph) + + @staticmethod + def _read_depgraph(node, depgraph, label_counter=None, parent=None): + if not label_counter: + label_counter = Counter() + + if node["rel"].lower() in ["spec", "punct"]: + # the value of a 'spec' entry is a word, not an FStructure + return (node["word"], node["tag"]) + + else: + fstruct = FStructure() + fstruct.pred = None + fstruct.label = FStructure._make_label(label_counter.get()) + + fstruct.parent = parent + + word, tag = node["word"], node["tag"] + if tag[:2] == "VB": + if tag[2:3] == "D": + fstruct.safeappend("tense", ("PAST", "tense")) + fstruct.pred = (word, tag[:2]) + + if not fstruct.pred: + fstruct.pred = (word, tag) + + children = [ + depgraph.nodes[idx] + for idx in chain.from_iterable(node["deps"].values()) + ] + for child in children: + fstruct.safeappend( + child["rel"], + FStructure._read_depgraph(child, depgraph, label_counter, fstruct), + ) + + return fstruct + + @staticmethod + def _make_label(value): + """ + Pick an alphabetic character as identifier for an entity in the model. + + :param value: where to index into the list of characters + :type value: int + """ + letter = [ + "f", + "g", + "h", + "i", + "j", + "k", + "l", + "m", + "n", + "o", + "p", + "q", + "r", + "s", + "t", + "u", + "v", + "w", + "x", + "y", + "z", + "a", + "b", + "c", + "d", + "e", + ][value - 1] + num = int(value) // 26 + if num > 0: + return letter + str(num) + else: + return letter + + def __repr__(self): + return self.__str__().replace("\n", "") + + def __str__(self): + return self.pretty_format() + + def pretty_format(self, indent=3): + try: + accum = "%s:[" % self.label + except NameError: + accum = "[" + try: + accum += "pred '%s'" % (self.pred[0]) + except NameError: + pass + + for feature in sorted(self): + for item in self[feature]: + if isinstance(item, FStructure): + next_indent = indent + len(feature) + 3 + len(self.label) + accum += "\n{}{} {}".format( + " " * (indent), + feature, + item.pretty_format(next_indent), + ) + elif isinstance(item, tuple): + accum += "\n{}{} '{}'".format(" " * (indent), feature, item[0]) + elif isinstance(item, list): + accum += "\n{}{} {{{}}}".format( + " " * (indent), + feature, + ("\n%s" % (" " * (indent + len(feature) + 2))).join(item), + ) + else: # ERROR + raise Exception( + "feature %s is not an FStruct, a list, or a tuple" % feature + ) + return accum + "]" + + +def demo_read_depgraph(): + from nltk.parse.dependencygraph import DependencyGraph + + dg1 = DependencyGraph( + """\ +Esso NNP 2 SUB +said VBD 0 ROOT +the DT 5 NMOD +Whiting NNP 5 NMOD +field NN 6 SUB +started VBD 2 VMOD +production NN 6 OBJ +Tuesday NNP 6 VMOD +""" + ) + dg2 = DependencyGraph( + """\ +John NNP 2 SUB +sees VBP 0 ROOT +Mary NNP 2 OBJ +""" + ) + dg3 = DependencyGraph( + """\ +a DT 2 SPEC +man NN 3 SUBJ +walks VB 0 ROOT +""" + ) + dg4 = DependencyGraph( + """\ +every DT 2 SPEC +girl NN 3 SUBJ +chases VB 0 ROOT +a DT 5 SPEC +dog NN 3 OBJ +""" + ) + + depgraphs = [dg1, dg2, dg3, dg4] + for dg in depgraphs: + print(FStructure.read_depgraph(dg)) + + +if __name__ == "__main__": + demo_read_depgraph() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/sem/linearlogic.py b/llmeval-env/lib/python3.10/site-packages/nltk/sem/linearlogic.py new file mode 100644 index 0000000000000000000000000000000000000000..474f835e2f1bbe19fe2486e259bea2d08fa473b1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/sem/linearlogic.py @@ -0,0 +1,482 @@ +# Natural Language Toolkit: Linear Logic +# +# Author: Dan Garrette +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +from nltk.internals import Counter +from nltk.sem.logic import APP, LogicParser + +_counter = Counter() + + +class Tokens: + # Punctuation + OPEN = "(" + CLOSE = ")" + + # Operations + IMP = "-o" + + PUNCT = [OPEN, CLOSE] + TOKENS = PUNCT + [IMP] + + +class LinearLogicParser(LogicParser): + """A linear logic expression parser.""" + + def __init__(self): + LogicParser.__init__(self) + + self.operator_precedence = {APP: 1, Tokens.IMP: 2, None: 3} + self.right_associated_operations += [Tokens.IMP] + + def get_all_symbols(self): + return Tokens.TOKENS + + def handle(self, tok, context): + if tok not in Tokens.TOKENS: + return self.handle_variable(tok, context) + elif tok == Tokens.OPEN: + return self.handle_open(tok, context) + + def get_BooleanExpression_factory(self, tok): + if tok == Tokens.IMP: + return ImpExpression + else: + return None + + def make_BooleanExpression(self, factory, first, second): + return factory(first, second) + + def attempt_ApplicationExpression(self, expression, context): + """Attempt to make an application expression. If the next tokens + are an argument in parens, then the argument expression is a + function being applied to the arguments. Otherwise, return the + argument expression.""" + if self.has_priority(APP, context): + if self.inRange(0) and self.token(0) == Tokens.OPEN: + self.token() # swallow then open paren + argument = self.process_next_expression(APP) + self.assertNextToken(Tokens.CLOSE) + expression = ApplicationExpression(expression, argument, None) + return expression + + def make_VariableExpression(self, name): + if name[0].isupper(): + return VariableExpression(name) + else: + return ConstantExpression(name) + + +class Expression: + + _linear_logic_parser = LinearLogicParser() + + @classmethod + def fromstring(cls, s): + return cls._linear_logic_parser.parse(s) + + def applyto(self, other, other_indices=None): + return ApplicationExpression(self, other, other_indices) + + def __call__(self, other): + return self.applyto(other) + + def __repr__(self): + return f"<{self.__class__.__name__} {self}>" + + +class AtomicExpression(Expression): + def __init__(self, name, dependencies=None): + """ + :param name: str for the constant name + :param dependencies: list of int for the indices on which this atom is dependent + """ + assert isinstance(name, str) + self.name = name + + if not dependencies: + dependencies = [] + self.dependencies = dependencies + + def simplify(self, bindings=None): + """ + If 'self' is bound by 'bindings', return the atomic to which it is bound. + Otherwise, return self. + + :param bindings: ``BindingDict`` A dictionary of bindings used to simplify + :return: ``AtomicExpression`` + """ + if bindings and self in bindings: + return bindings[self] + else: + return self + + def compile_pos(self, index_counter, glueFormulaFactory): + """ + From Iddo Lev's PhD Dissertation p108-109 + + :param index_counter: ``Counter`` for unique indices + :param glueFormulaFactory: ``GlueFormula`` for creating new glue formulas + :return: (``Expression``,set) for the compiled linear logic and any newly created glue formulas + """ + self.dependencies = [] + return (self, []) + + def compile_neg(self, index_counter, glueFormulaFactory): + """ + From Iddo Lev's PhD Dissertation p108-109 + + :param index_counter: ``Counter`` for unique indices + :param glueFormulaFactory: ``GlueFormula`` for creating new glue formulas + :return: (``Expression``,set) for the compiled linear logic and any newly created glue formulas + """ + self.dependencies = [] + return (self, []) + + def initialize_labels(self, fstruct): + self.name = fstruct.initialize_label(self.name.lower()) + + def __eq__(self, other): + return self.__class__ == other.__class__ and self.name == other.name + + def __ne__(self, other): + return not self == other + + def __str__(self): + accum = self.name + if self.dependencies: + accum += "%s" % self.dependencies + return accum + + def __hash__(self): + return hash(self.name) + + +class ConstantExpression(AtomicExpression): + def unify(self, other, bindings): + """ + If 'other' is a constant, then it must be equal to 'self'. If 'other' is a variable, + then it must not be bound to anything other than 'self'. + + :param other: ``Expression`` + :param bindings: ``BindingDict`` A dictionary of all current bindings + :return: ``BindingDict`` A new combined dictionary of of 'bindings' and any new binding + :raise UnificationException: If 'self' and 'other' cannot be unified in the context of 'bindings' + """ + assert isinstance(other, Expression) + if isinstance(other, VariableExpression): + try: + return bindings + BindingDict([(other, self)]) + except VariableBindingException: + pass + elif self == other: + return bindings + raise UnificationException(self, other, bindings) + + +class VariableExpression(AtomicExpression): + def unify(self, other, bindings): + """ + 'self' must not be bound to anything other than 'other'. + + :param other: ``Expression`` + :param bindings: ``BindingDict`` A dictionary of all current bindings + :return: ``BindingDict`` A new combined dictionary of of 'bindings' and the new binding + :raise UnificationException: If 'self' and 'other' cannot be unified in the context of 'bindings' + """ + assert isinstance(other, Expression) + try: + if self == other: + return bindings + else: + return bindings + BindingDict([(self, other)]) + except VariableBindingException as e: + raise UnificationException(self, other, bindings) from e + + +class ImpExpression(Expression): + def __init__(self, antecedent, consequent): + """ + :param antecedent: ``Expression`` for the antecedent + :param consequent: ``Expression`` for the consequent + """ + assert isinstance(antecedent, Expression) + assert isinstance(consequent, Expression) + self.antecedent = antecedent + self.consequent = consequent + + def simplify(self, bindings=None): + return self.__class__( + self.antecedent.simplify(bindings), self.consequent.simplify(bindings) + ) + + def unify(self, other, bindings): + """ + Both the antecedent and consequent of 'self' and 'other' must unify. + + :param other: ``ImpExpression`` + :param bindings: ``BindingDict`` A dictionary of all current bindings + :return: ``BindingDict`` A new combined dictionary of of 'bindings' and any new bindings + :raise UnificationException: If 'self' and 'other' cannot be unified in the context of 'bindings' + """ + assert isinstance(other, ImpExpression) + try: + return ( + bindings + + self.antecedent.unify(other.antecedent, bindings) + + self.consequent.unify(other.consequent, bindings) + ) + except VariableBindingException as e: + raise UnificationException(self, other, bindings) from e + + def compile_pos(self, index_counter, glueFormulaFactory): + """ + From Iddo Lev's PhD Dissertation p108-109 + + :param index_counter: ``Counter`` for unique indices + :param glueFormulaFactory: ``GlueFormula`` for creating new glue formulas + :return: (``Expression``,set) for the compiled linear logic and any newly created glue formulas + """ + (a, a_new) = self.antecedent.compile_neg(index_counter, glueFormulaFactory) + (c, c_new) = self.consequent.compile_pos(index_counter, glueFormulaFactory) + return (ImpExpression(a, c), a_new + c_new) + + def compile_neg(self, index_counter, glueFormulaFactory): + """ + From Iddo Lev's PhD Dissertation p108-109 + + :param index_counter: ``Counter`` for unique indices + :param glueFormulaFactory: ``GlueFormula`` for creating new glue formulas + :return: (``Expression``,list of ``GlueFormula``) for the compiled linear logic and any newly created glue formulas + """ + (a, a_new) = self.antecedent.compile_pos(index_counter, glueFormulaFactory) + (c, c_new) = self.consequent.compile_neg(index_counter, glueFormulaFactory) + fresh_index = index_counter.get() + c.dependencies.append(fresh_index) + new_v = glueFormulaFactory("v%s" % fresh_index, a, {fresh_index}) + return (c, a_new + c_new + [new_v]) + + def initialize_labels(self, fstruct): + self.antecedent.initialize_labels(fstruct) + self.consequent.initialize_labels(fstruct) + + def __eq__(self, other): + return ( + self.__class__ == other.__class__ + and self.antecedent == other.antecedent + and self.consequent == other.consequent + ) + + def __ne__(self, other): + return not self == other + + def __str__(self): + return "{}{} {} {}{}".format( + Tokens.OPEN, + self.antecedent, + Tokens.IMP, + self.consequent, + Tokens.CLOSE, + ) + + def __hash__(self): + return hash(f"{hash(self.antecedent)}{Tokens.IMP}{hash(self.consequent)}") + + +class ApplicationExpression(Expression): + def __init__(self, function, argument, argument_indices=None): + """ + :param function: ``Expression`` for the function + :param argument: ``Expression`` for the argument + :param argument_indices: set for the indices of the glue formula from which the argument came + :raise LinearLogicApplicationException: If 'function' cannot be applied to 'argument' given 'argument_indices'. + """ + function_simp = function.simplify() + argument_simp = argument.simplify() + + assert isinstance(function_simp, ImpExpression) + assert isinstance(argument_simp, Expression) + + bindings = BindingDict() + + try: + if isinstance(function, ApplicationExpression): + bindings += function.bindings + if isinstance(argument, ApplicationExpression): + bindings += argument.bindings + bindings += function_simp.antecedent.unify(argument_simp, bindings) + except UnificationException as e: + raise LinearLogicApplicationException( + f"Cannot apply {function_simp} to {argument_simp}. {e}" + ) from e + + # If you are running it on complied premises, more conditions apply + if argument_indices: + # A.dependencies of (A -o (B -o C)) must be a proper subset of argument_indices + if not set(function_simp.antecedent.dependencies) < argument_indices: + raise LinearLogicApplicationException( + "Dependencies unfulfilled when attempting to apply Linear Logic formula %s to %s" + % (function_simp, argument_simp) + ) + if set(function_simp.antecedent.dependencies) == argument_indices: + raise LinearLogicApplicationException( + "Dependencies not a proper subset of indices when attempting to apply Linear Logic formula %s to %s" + % (function_simp, argument_simp) + ) + + self.function = function + self.argument = argument + self.bindings = bindings + + def simplify(self, bindings=None): + """ + Since function is an implication, return its consequent. There should be + no need to check that the application is valid since the checking is done + by the constructor. + + :param bindings: ``BindingDict`` A dictionary of bindings used to simplify + :return: ``Expression`` + """ + if not bindings: + bindings = self.bindings + + return self.function.simplify(bindings).consequent + + def __eq__(self, other): + return ( + self.__class__ == other.__class__ + and self.function == other.function + and self.argument == other.argument + ) + + def __ne__(self, other): + return not self == other + + def __str__(self): + return "%s" % self.function + Tokens.OPEN + "%s" % self.argument + Tokens.CLOSE + + def __hash__(self): + return hash(f"{hash(self.antecedent)}{Tokens.OPEN}{hash(self.consequent)}") + + +class BindingDict: + def __init__(self, bindings=None): + """ + :param bindings: + list [(``VariableExpression``, ``AtomicExpression``)] to initialize the dictionary + dict {``VariableExpression``: ``AtomicExpression``} to initialize the dictionary + """ + self.d = {} + + if isinstance(bindings, dict): + bindings = bindings.items() + + if bindings: + for (v, b) in bindings: + self[v] = b + + def __setitem__(self, variable, binding): + """ + A binding is consistent with the dict if its variable is not already bound, OR if its + variable is already bound to its argument. + + :param variable: ``VariableExpression`` The variable bind + :param binding: ``Expression`` The expression to which 'variable' should be bound + :raise VariableBindingException: If the variable cannot be bound in this dictionary + """ + assert isinstance(variable, VariableExpression) + assert isinstance(binding, Expression) + + assert variable != binding + + existing = self.d.get(variable, None) + + if not existing or binding == existing: + self.d[variable] = binding + else: + raise VariableBindingException( + "Variable %s already bound to another value" % (variable) + ) + + def __getitem__(self, variable): + """ + Return the expression to which 'variable' is bound + """ + assert isinstance(variable, VariableExpression) + + intermediate = self.d[variable] + while intermediate: + try: + intermediate = self.d[intermediate] + except KeyError: + return intermediate + + def __contains__(self, item): + return item in self.d + + def __add__(self, other): + """ + :param other: ``BindingDict`` The dict with which to combine self + :return: ``BindingDict`` A new dict containing all the elements of both parameters + :raise VariableBindingException: If the parameter dictionaries are not consistent with each other + """ + try: + combined = BindingDict() + for v in self.d: + combined[v] = self.d[v] + for v in other.d: + combined[v] = other.d[v] + return combined + except VariableBindingException as e: + raise VariableBindingException( + "Attempting to add two contradicting" + " VariableBindingsLists: %s, %s" % (self, other) + ) from e + + def __ne__(self, other): + return not self == other + + def __eq__(self, other): + if not isinstance(other, BindingDict): + raise TypeError + return self.d == other.d + + def __str__(self): + return "{" + ", ".join(f"{v}: {self.d[v]}" for v in sorted(self.d.keys())) + "}" + + def __repr__(self): + return "BindingDict: %s" % self + + +class VariableBindingException(Exception): + pass + + +class UnificationException(Exception): + def __init__(self, a, b, bindings): + Exception.__init__(self, f"Cannot unify {a} with {b} given {bindings}") + + +class LinearLogicApplicationException(Exception): + pass + + +def demo(): + lexpr = Expression.fromstring + + print(lexpr(r"f")) + print(lexpr(r"(g -o f)")) + print(lexpr(r"((g -o G) -o G)")) + print(lexpr(r"g -o h -o f")) + print(lexpr(r"(g -o f)(g)").simplify()) + print(lexpr(r"(H -o f)(g)").simplify()) + print(lexpr(r"((g -o G) -o G)((g -o f))").simplify()) + print(lexpr(r"(H -o H)((g -o f))").simplify()) + + +if __name__ == "__main__": + demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/sem/logic.py b/llmeval-env/lib/python3.10/site-packages/nltk/sem/logic.py new file mode 100644 index 0000000000000000000000000000000000000000..aed3a118760b0a9111fc0445df870231f943e1e3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/sem/logic.py @@ -0,0 +1,2065 @@ +# Natural Language Toolkit: Logic +# +# Author: Dan Garrette +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +""" +A version of first order predicate logic, built on +top of the typed lambda calculus. +""" + +import operator +import re +from collections import defaultdict +from functools import reduce, total_ordering + +from nltk.internals import Counter +from nltk.util import Trie + +APP = "APP" + +_counter = Counter() + + +class Tokens: + LAMBDA = "\\" + LAMBDA_LIST = ["\\"] + + # Quantifiers + EXISTS = "exists" + EXISTS_LIST = ["some", "exists", "exist"] + ALL = "all" + ALL_LIST = ["all", "forall"] + IOTA = "iota" + IOTA_LIST = ["iota"] + + # Punctuation + DOT = "." + OPEN = "(" + CLOSE = ")" + COMMA = "," + + # Operations + NOT = "-" + NOT_LIST = ["not", "-", "!"] + AND = "&" + AND_LIST = ["and", "&", "^"] + OR = "|" + OR_LIST = ["or", "|"] + IMP = "->" + IMP_LIST = ["implies", "->", "=>"] + IFF = "<->" + IFF_LIST = ["iff", "<->", "<=>"] + EQ = "=" + EQ_LIST = ["=", "=="] + NEQ = "!=" + NEQ_LIST = ["!="] + + # Collections of tokens + BINOPS = AND_LIST + OR_LIST + IMP_LIST + IFF_LIST + QUANTS = EXISTS_LIST + ALL_LIST + IOTA_LIST + PUNCT = [DOT, OPEN, CLOSE, COMMA] + + TOKENS = BINOPS + EQ_LIST + NEQ_LIST + QUANTS + LAMBDA_LIST + PUNCT + NOT_LIST + + # Special + SYMBOLS = [x for x in TOKENS if re.match(r"^[-\\.(),!&^|>=<]*$", x)] + + +def boolean_ops(): + """ + Boolean operators + """ + names = ["negation", "conjunction", "disjunction", "implication", "equivalence"] + for pair in zip(names, [Tokens.NOT, Tokens.AND, Tokens.OR, Tokens.IMP, Tokens.IFF]): + print("%-15s\t%s" % pair) + + +def equality_preds(): + """ + Equality predicates + """ + names = ["equality", "inequality"] + for pair in zip(names, [Tokens.EQ, Tokens.NEQ]): + print("%-15s\t%s" % pair) + + +def binding_ops(): + """ + Binding operators + """ + names = ["existential", "universal", "lambda"] + for pair in zip(names, [Tokens.EXISTS, Tokens.ALL, Tokens.LAMBDA, Tokens.IOTA]): + print("%-15s\t%s" % pair) + + +class LogicParser: + """A lambda calculus expression parser.""" + + def __init__(self, type_check=False): + """ + :param type_check: should type checking be performed + to their types? + :type type_check: bool + """ + assert isinstance(type_check, bool) + + self._currentIndex = 0 + self._buffer = [] + self.type_check = type_check + + """A list of tuples of quote characters. The 4-tuple is comprised + of the start character, the end character, the escape character, and + a boolean indicating whether the quotes should be included in the + result. Quotes are used to signify that a token should be treated as + atomic, ignoring any special characters within the token. The escape + character allows the quote end character to be used within the quote. + If True, the boolean indicates that the final token should contain the + quote and escape characters. + This method exists to be overridden""" + self.quote_chars = [] + + self.operator_precedence = dict( + [(x, 1) for x in Tokens.LAMBDA_LIST] + + [(x, 2) for x in Tokens.NOT_LIST] + + [(APP, 3)] + + [(x, 4) for x in Tokens.EQ_LIST + Tokens.NEQ_LIST] + + [(x, 5) for x in Tokens.QUANTS] + + [(x, 6) for x in Tokens.AND_LIST] + + [(x, 7) for x in Tokens.OR_LIST] + + [(x, 8) for x in Tokens.IMP_LIST] + + [(x, 9) for x in Tokens.IFF_LIST] + + [(None, 10)] + ) + self.right_associated_operations = [APP] + + def parse(self, data, signature=None): + """ + Parse the expression. + + :param data: str for the input to be parsed + :param signature: ``dict`` that maps variable names to type + strings + :returns: a parsed Expression + """ + data = data.rstrip() + + self._currentIndex = 0 + self._buffer, mapping = self.process(data) + + try: + result = self.process_next_expression(None) + if self.inRange(0): + raise UnexpectedTokenException(self._currentIndex + 1, self.token(0)) + except LogicalExpressionException as e: + msg = "{}\n{}\n{}^".format(e, data, " " * mapping[e.index - 1]) + raise LogicalExpressionException(None, msg) from e + + if self.type_check: + result.typecheck(signature) + + return result + + def process(self, data): + """Split the data into tokens""" + out = [] + mapping = {} + tokenTrie = Trie(self.get_all_symbols()) + token = "" + data_idx = 0 + token_start_idx = data_idx + while data_idx < len(data): + cur_data_idx = data_idx + quoted_token, data_idx = self.process_quoted_token(data_idx, data) + if quoted_token: + if not token: + token_start_idx = cur_data_idx + token += quoted_token + continue + + st = tokenTrie + c = data[data_idx] + symbol = "" + while c in st: + symbol += c + st = st[c] + if len(data) - data_idx > len(symbol): + c = data[data_idx + len(symbol)] + else: + break + if Trie.LEAF in st: + # token is a complete symbol + if token: + mapping[len(out)] = token_start_idx + out.append(token) + token = "" + mapping[len(out)] = data_idx + out.append(symbol) + data_idx += len(symbol) + else: + if data[data_idx] in " \t\n": # any whitespace + if token: + mapping[len(out)] = token_start_idx + out.append(token) + token = "" + else: + if not token: + token_start_idx = data_idx + token += data[data_idx] + data_idx += 1 + if token: + mapping[len(out)] = token_start_idx + out.append(token) + mapping[len(out)] = len(data) + mapping[len(out) + 1] = len(data) + 1 + return out, mapping + + def process_quoted_token(self, data_idx, data): + token = "" + c = data[data_idx] + i = data_idx + for start, end, escape, incl_quotes in self.quote_chars: + if c == start: + if incl_quotes: + token += c + i += 1 + while data[i] != end: + if data[i] == escape: + if incl_quotes: + token += data[i] + i += 1 + if len(data) == i: # if there are no more chars + raise LogicalExpressionException( + None, + "End of input reached. " + "Escape character [%s] found at end." % escape, + ) + token += data[i] + else: + token += data[i] + i += 1 + if len(data) == i: + raise LogicalExpressionException( + None, "End of input reached. " "Expected: [%s]" % end + ) + if incl_quotes: + token += data[i] + i += 1 + if not token: + raise LogicalExpressionException(None, "Empty quoted token found") + break + return token, i + + def get_all_symbols(self): + """This method exists to be overridden""" + return Tokens.SYMBOLS + + def inRange(self, location): + """Return TRUE if the given location is within the buffer""" + return self._currentIndex + location < len(self._buffer) + + def token(self, location=None): + """Get the next waiting token. If a location is given, then + return the token at currentIndex+location without advancing + currentIndex; setting it gives lookahead/lookback capability.""" + try: + if location is None: + tok = self._buffer[self._currentIndex] + self._currentIndex += 1 + else: + tok = self._buffer[self._currentIndex + location] + return tok + except IndexError as e: + raise ExpectedMoreTokensException(self._currentIndex + 1) from e + + def isvariable(self, tok): + return tok not in Tokens.TOKENS + + def process_next_expression(self, context): + """Parse the next complete expression from the stream and return it.""" + try: + tok = self.token() + except ExpectedMoreTokensException as e: + raise ExpectedMoreTokensException( + self._currentIndex + 1, message="Expression expected." + ) from e + + accum = self.handle(tok, context) + + if not accum: + raise UnexpectedTokenException( + self._currentIndex, tok, message="Expression expected." + ) + + return self.attempt_adjuncts(accum, context) + + def handle(self, tok, context): + """This method is intended to be overridden for logics that + use different operators or expressions""" + if self.isvariable(tok): + return self.handle_variable(tok, context) + + elif tok in Tokens.NOT_LIST: + return self.handle_negation(tok, context) + + elif tok in Tokens.LAMBDA_LIST: + return self.handle_lambda(tok, context) + + elif tok in Tokens.QUANTS: + return self.handle_quant(tok, context) + + elif tok == Tokens.OPEN: + return self.handle_open(tok, context) + + def attempt_adjuncts(self, expression, context): + cur_idx = None + while cur_idx != self._currentIndex: # while adjuncts are added + cur_idx = self._currentIndex + expression = self.attempt_EqualityExpression(expression, context) + expression = self.attempt_ApplicationExpression(expression, context) + expression = self.attempt_BooleanExpression(expression, context) + return expression + + def handle_negation(self, tok, context): + return self.make_NegatedExpression(self.process_next_expression(Tokens.NOT)) + + def make_NegatedExpression(self, expression): + return NegatedExpression(expression) + + def handle_variable(self, tok, context): + # It's either: 1) a predicate expression: sees(x,y) + # 2) an application expression: P(x) + # 3) a solo variable: john OR x + accum = self.make_VariableExpression(tok) + if self.inRange(0) and self.token(0) == Tokens.OPEN: + # The predicate has arguments + if not isinstance(accum, FunctionVariableExpression) and not isinstance( + accum, ConstantExpression + ): + raise LogicalExpressionException( + self._currentIndex, + "'%s' is an illegal predicate name. " + "Individual variables may not be used as " + "predicates." % tok, + ) + self.token() # swallow the Open Paren + + # curry the arguments + accum = self.make_ApplicationExpression( + accum, self.process_next_expression(APP) + ) + while self.inRange(0) and self.token(0) == Tokens.COMMA: + self.token() # swallow the comma + accum = self.make_ApplicationExpression( + accum, self.process_next_expression(APP) + ) + self.assertNextToken(Tokens.CLOSE) + return accum + + def get_next_token_variable(self, description): + try: + tok = self.token() + except ExpectedMoreTokensException as e: + raise ExpectedMoreTokensException(e.index, "Variable expected.") from e + if isinstance(self.make_VariableExpression(tok), ConstantExpression): + raise LogicalExpressionException( + self._currentIndex, + "'%s' is an illegal variable name. " + "Constants may not be %s." % (tok, description), + ) + return Variable(tok) + + def handle_lambda(self, tok, context): + # Expression is a lambda expression + if not self.inRange(0): + raise ExpectedMoreTokensException( + self._currentIndex + 2, + message="Variable and Expression expected following lambda operator.", + ) + vars = [self.get_next_token_variable("abstracted")] + while True: + if not self.inRange(0) or ( + self.token(0) == Tokens.DOT and not self.inRange(1) + ): + raise ExpectedMoreTokensException( + self._currentIndex + 2, message="Expression expected." + ) + if not self.isvariable(self.token(0)): + break + # Support expressions like: \x y.M == \x.\y.M + vars.append(self.get_next_token_variable("abstracted")) + if self.inRange(0) and self.token(0) == Tokens.DOT: + self.token() # swallow the dot + + accum = self.process_next_expression(tok) + while vars: + accum = self.make_LambdaExpression(vars.pop(), accum) + return accum + + def handle_quant(self, tok, context): + # Expression is a quantified expression: some x.M + factory = self.get_QuantifiedExpression_factory(tok) + + if not self.inRange(0): + raise ExpectedMoreTokensException( + self._currentIndex + 2, + message="Variable and Expression expected following quantifier '%s'." + % tok, + ) + vars = [self.get_next_token_variable("quantified")] + while True: + if not self.inRange(0) or ( + self.token(0) == Tokens.DOT and not self.inRange(1) + ): + raise ExpectedMoreTokensException( + self._currentIndex + 2, message="Expression expected." + ) + if not self.isvariable(self.token(0)): + break + # Support expressions like: some x y.M == some x.some y.M + vars.append(self.get_next_token_variable("quantified")) + if self.inRange(0) and self.token(0) == Tokens.DOT: + self.token() # swallow the dot + + accum = self.process_next_expression(tok) + while vars: + accum = self.make_QuanifiedExpression(factory, vars.pop(), accum) + return accum + + def get_QuantifiedExpression_factory(self, tok): + """This method serves as a hook for other logic parsers that + have different quantifiers""" + if tok in Tokens.EXISTS_LIST: + return ExistsExpression + elif tok in Tokens.ALL_LIST: + return AllExpression + elif tok in Tokens.IOTA_LIST: + return IotaExpression + else: + self.assertToken(tok, Tokens.QUANTS) + + def make_QuanifiedExpression(self, factory, variable, term): + return factory(variable, term) + + def handle_open(self, tok, context): + # Expression is in parens + accum = self.process_next_expression(None) + self.assertNextToken(Tokens.CLOSE) + return accum + + def attempt_EqualityExpression(self, expression, context): + """Attempt to make an equality expression. If the next token is an + equality operator, then an EqualityExpression will be returned. + Otherwise, the parameter will be returned.""" + if self.inRange(0): + tok = self.token(0) + if tok in Tokens.EQ_LIST + Tokens.NEQ_LIST and self.has_priority( + tok, context + ): + self.token() # swallow the "=" or "!=" + expression = self.make_EqualityExpression( + expression, self.process_next_expression(tok) + ) + if tok in Tokens.NEQ_LIST: + expression = self.make_NegatedExpression(expression) + return expression + + def make_EqualityExpression(self, first, second): + """This method serves as a hook for other logic parsers that + have different equality expression classes""" + return EqualityExpression(first, second) + + def attempt_BooleanExpression(self, expression, context): + """Attempt to make a boolean expression. If the next token is a boolean + operator, then a BooleanExpression will be returned. Otherwise, the + parameter will be returned.""" + while self.inRange(0): + tok = self.token(0) + factory = self.get_BooleanExpression_factory(tok) + if factory and self.has_priority(tok, context): + self.token() # swallow the operator + expression = self.make_BooleanExpression( + factory, expression, self.process_next_expression(tok) + ) + else: + break + return expression + + def get_BooleanExpression_factory(self, tok): + """This method serves as a hook for other logic parsers that + have different boolean operators""" + if tok in Tokens.AND_LIST: + return AndExpression + elif tok in Tokens.OR_LIST: + return OrExpression + elif tok in Tokens.IMP_LIST: + return ImpExpression + elif tok in Tokens.IFF_LIST: + return IffExpression + else: + return None + + def make_BooleanExpression(self, factory, first, second): + return factory(first, second) + + def attempt_ApplicationExpression(self, expression, context): + """Attempt to make an application expression. The next tokens are + a list of arguments in parens, then the argument expression is a + function being applied to the arguments. Otherwise, return the + argument expression.""" + if self.has_priority(APP, context): + if self.inRange(0) and self.token(0) == Tokens.OPEN: + if ( + not isinstance(expression, LambdaExpression) + and not isinstance(expression, ApplicationExpression) + and not isinstance(expression, FunctionVariableExpression) + and not isinstance(expression, ConstantExpression) + ): + raise LogicalExpressionException( + self._currentIndex, + ("The function '%s" % expression) + + "' is not a Lambda Expression, an " + "Application Expression, or a " + "functional predicate, so it may " + "not take arguments.", + ) + self.token() # swallow then open paren + # curry the arguments + accum = self.make_ApplicationExpression( + expression, self.process_next_expression(APP) + ) + while self.inRange(0) and self.token(0) == Tokens.COMMA: + self.token() # swallow the comma + accum = self.make_ApplicationExpression( + accum, self.process_next_expression(APP) + ) + self.assertNextToken(Tokens.CLOSE) + return accum + return expression + + def make_ApplicationExpression(self, function, argument): + return ApplicationExpression(function, argument) + + def make_VariableExpression(self, name): + return VariableExpression(Variable(name)) + + def make_LambdaExpression(self, variable, term): + return LambdaExpression(variable, term) + + def has_priority(self, operation, context): + return self.operator_precedence[operation] < self.operator_precedence[ + context + ] or ( + operation in self.right_associated_operations + and self.operator_precedence[operation] == self.operator_precedence[context] + ) + + def assertNextToken(self, expected): + try: + tok = self.token() + except ExpectedMoreTokensException as e: + raise ExpectedMoreTokensException( + e.index, message="Expected token '%s'." % expected + ) from e + + if isinstance(expected, list): + if tok not in expected: + raise UnexpectedTokenException(self._currentIndex, tok, expected) + else: + if tok != expected: + raise UnexpectedTokenException(self._currentIndex, tok, expected) + + def assertToken(self, tok, expected): + if isinstance(expected, list): + if tok not in expected: + raise UnexpectedTokenException(self._currentIndex, tok, expected) + else: + if tok != expected: + raise UnexpectedTokenException(self._currentIndex, tok, expected) + + def __repr__(self): + if self.inRange(0): + msg = "Next token: " + self.token(0) + else: + msg = "No more tokens" + return "<" + self.__class__.__name__ + ": " + msg + ">" + + +def read_logic(s, logic_parser=None, encoding=None): + """ + Convert a file of First Order Formulas into a list of {Expression}s. + + :param s: the contents of the file + :type s: str + :param logic_parser: The parser to be used to parse the logical expression + :type logic_parser: LogicParser + :param encoding: the encoding of the input string, if it is binary + :type encoding: str + :return: a list of parsed formulas. + :rtype: list(Expression) + """ + if encoding is not None: + s = s.decode(encoding) + if logic_parser is None: + logic_parser = LogicParser() + + statements = [] + for linenum, line in enumerate(s.splitlines()): + line = line.strip() + if line.startswith("#") or line == "": + continue + try: + statements.append(logic_parser.parse(line)) + except LogicalExpressionException as e: + raise ValueError(f"Unable to parse line {linenum}: {line}") from e + return statements + + +@total_ordering +class Variable: + def __init__(self, name): + """ + :param name: the name of the variable + """ + assert isinstance(name, str), "%s is not a string" % name + self.name = name + + def __eq__(self, other): + return isinstance(other, Variable) and self.name == other.name + + def __ne__(self, other): + return not self == other + + def __lt__(self, other): + if not isinstance(other, Variable): + raise TypeError + return self.name < other.name + + def substitute_bindings(self, bindings): + return bindings.get(self, self) + + def __hash__(self): + return hash(self.name) + + def __str__(self): + return self.name + + def __repr__(self): + return "Variable('%s')" % self.name + + +def unique_variable(pattern=None, ignore=None): + """ + Return a new, unique variable. + + :param pattern: ``Variable`` that is being replaced. The new variable must + be the same type. + :param term: a set of ``Variable`` objects that should not be returned from + this function. + :rtype: Variable + """ + if pattern is not None: + if is_indvar(pattern.name): + prefix = "z" + elif is_funcvar(pattern.name): + prefix = "F" + elif is_eventvar(pattern.name): + prefix = "e0" + else: + assert False, "Cannot generate a unique constant" + else: + prefix = "z" + + v = Variable(f"{prefix}{_counter.get()}") + while ignore is not None and v in ignore: + v = Variable(f"{prefix}{_counter.get()}") + return v + + +def skolem_function(univ_scope=None): + """ + Return a skolem function over the variables in univ_scope + param univ_scope + """ + skolem = VariableExpression(Variable("F%s" % _counter.get())) + if univ_scope: + for v in list(univ_scope): + skolem = skolem(VariableExpression(v)) + return skolem + + +class Type: + def __repr__(self): + return "%s" % self + + def __hash__(self): + return hash("%s" % self) + + @classmethod + def fromstring(cls, s): + return read_type(s) + + +class ComplexType(Type): + def __init__(self, first, second): + assert isinstance(first, Type), "%s is not a Type" % first + assert isinstance(second, Type), "%s is not a Type" % second + self.first = first + self.second = second + + def __eq__(self, other): + return ( + isinstance(other, ComplexType) + and self.first == other.first + and self.second == other.second + ) + + def __ne__(self, other): + return not self == other + + __hash__ = Type.__hash__ + + def matches(self, other): + if isinstance(other, ComplexType): + return self.first.matches(other.first) and self.second.matches(other.second) + else: + return self == ANY_TYPE + + def resolve(self, other): + if other == ANY_TYPE: + return self + elif isinstance(other, ComplexType): + f = self.first.resolve(other.first) + s = self.second.resolve(other.second) + if f and s: + return ComplexType(f, s) + else: + return None + elif self == ANY_TYPE: + return other + else: + return None + + def __str__(self): + if self == ANY_TYPE: + return "%s" % ANY_TYPE + else: + return f"<{self.first},{self.second}>" + + def str(self): + if self == ANY_TYPE: + return ANY_TYPE.str() + else: + return f"({self.first.str()} -> {self.second.str()})" + + +class BasicType(Type): + def __eq__(self, other): + return isinstance(other, BasicType) and ("%s" % self) == ("%s" % other) + + def __ne__(self, other): + return not self == other + + __hash__ = Type.__hash__ + + def matches(self, other): + return other == ANY_TYPE or self == other + + def resolve(self, other): + if self.matches(other): + return self + else: + return None + + +class EntityType(BasicType): + def __str__(self): + return "e" + + def str(self): + return "IND" + + +class TruthValueType(BasicType): + def __str__(self): + return "t" + + def str(self): + return "BOOL" + + +class EventType(BasicType): + def __str__(self): + return "v" + + def str(self): + return "EVENT" + + +class AnyType(BasicType, ComplexType): + def __init__(self): + pass + + @property + def first(self): + return self + + @property + def second(self): + return self + + def __eq__(self, other): + return isinstance(other, AnyType) or other.__eq__(self) + + def __ne__(self, other): + return not self == other + + __hash__ = Type.__hash__ + + def matches(self, other): + return True + + def resolve(self, other): + return other + + def __str__(self): + return "?" + + def str(self): + return "ANY" + + +TRUTH_TYPE = TruthValueType() +ENTITY_TYPE = EntityType() +EVENT_TYPE = EventType() +ANY_TYPE = AnyType() + + +def read_type(type_string): + assert isinstance(type_string, str) + type_string = type_string.replace(" ", "") # remove spaces + + if type_string[0] == "<": + assert type_string[-1] == ">" + paren_count = 0 + for i, char in enumerate(type_string): + if char == "<": + paren_count += 1 + elif char == ">": + paren_count -= 1 + assert paren_count > 0 + elif char == ",": + if paren_count == 1: + break + return ComplexType( + read_type(type_string[1:i]), read_type(type_string[i + 1 : -1]) + ) + elif type_string[0] == "%s" % ENTITY_TYPE: + return ENTITY_TYPE + elif type_string[0] == "%s" % TRUTH_TYPE: + return TRUTH_TYPE + elif type_string[0] == "%s" % ANY_TYPE: + return ANY_TYPE + else: + raise LogicalExpressionException( + None, "Unexpected character: '%s'." % type_string[0] + ) + + +class TypeException(Exception): + def __init__(self, msg): + super().__init__(msg) + + +class InconsistentTypeHierarchyException(TypeException): + def __init__(self, variable, expression=None): + if expression: + msg = ( + "The variable '%s' was found in multiple places with different" + " types in '%s'." % (variable, expression) + ) + else: + msg = ( + "The variable '%s' was found in multiple places with different" + " types." % (variable) + ) + super().__init__(msg) + + +class TypeResolutionException(TypeException): + def __init__(self, expression, other_type): + super().__init__( + "The type of '%s', '%s', cannot be resolved with type '%s'" + % (expression, expression.type, other_type) + ) + + +class IllegalTypeException(TypeException): + def __init__(self, expression, other_type, allowed_type): + super().__init__( + "Cannot set type of %s '%s' to '%s'; must match type '%s'." + % (expression.__class__.__name__, expression, other_type, allowed_type) + ) + + +def typecheck(expressions, signature=None): + """ + Ensure correct typing across a collection of ``Expression`` objects. + :param expressions: a collection of expressions + :param signature: dict that maps variable names to types (or string + representations of types) + """ + # typecheck and create master signature + for expression in expressions: + signature = expression.typecheck(signature) + # apply master signature to all expressions + for expression in expressions[:-1]: + expression.typecheck(signature) + return signature + + +class SubstituteBindingsI: + """ + An interface for classes that can perform substitutions for + variables. + """ + + def substitute_bindings(self, bindings): + """ + :return: The object that is obtained by replacing + each variable bound by ``bindings`` with its values. + Aliases are already resolved. (maybe?) + :rtype: (any) + """ + raise NotImplementedError() + + def variables(self): + """ + :return: A list of all variables in this object. + """ + raise NotImplementedError() + + +class Expression(SubstituteBindingsI): + """This is the base abstract object for all logical expressions""" + + _logic_parser = LogicParser() + _type_checking_logic_parser = LogicParser(type_check=True) + + @classmethod + def fromstring(cls, s, type_check=False, signature=None): + if type_check: + return cls._type_checking_logic_parser.parse(s, signature) + else: + return cls._logic_parser.parse(s, signature) + + def __call__(self, other, *additional): + accum = self.applyto(other) + for a in additional: + accum = accum(a) + return accum + + def applyto(self, other): + assert isinstance(other, Expression), "%s is not an Expression" % other + return ApplicationExpression(self, other) + + def __neg__(self): + return NegatedExpression(self) + + def negate(self): + """If this is a negated expression, remove the negation. + Otherwise add a negation.""" + return -self + + def __and__(self, other): + if not isinstance(other, Expression): + raise TypeError("%s is not an Expression" % other) + return AndExpression(self, other) + + def __or__(self, other): + if not isinstance(other, Expression): + raise TypeError("%s is not an Expression" % other) + return OrExpression(self, other) + + def __gt__(self, other): + if not isinstance(other, Expression): + raise TypeError("%s is not an Expression" % other) + return ImpExpression(self, other) + + def __lt__(self, other): + if not isinstance(other, Expression): + raise TypeError("%s is not an Expression" % other) + return IffExpression(self, other) + + def __eq__(self, other): + return NotImplemented + + def __ne__(self, other): + return not self == other + + def equiv(self, other, prover=None): + """ + Check for logical equivalence. + Pass the expression (self <-> other) to the theorem prover. + If the prover says it is valid, then the self and other are equal. + + :param other: an ``Expression`` to check equality against + :param prover: a ``nltk.inference.api.Prover`` + """ + assert isinstance(other, Expression), "%s is not an Expression" % other + + if prover is None: + from nltk.inference import Prover9 + + prover = Prover9() + bicond = IffExpression(self.simplify(), other.simplify()) + return prover.prove(bicond) + + def __hash__(self): + return hash(repr(self)) + + def substitute_bindings(self, bindings): + expr = self + for var in expr.variables(): + if var in bindings: + val = bindings[var] + if isinstance(val, Variable): + val = self.make_VariableExpression(val) + elif not isinstance(val, Expression): + raise ValueError( + "Can not substitute a non-expression " + "value into an expression: %r" % (val,) + ) + # Substitute bindings in the target value. + val = val.substitute_bindings(bindings) + # Replace var w/ the target value. + expr = expr.replace(var, val) + return expr.simplify() + + def typecheck(self, signature=None): + """ + Infer and check types. Raise exceptions if necessary. + + :param signature: dict that maps variable names to types (or string + representations of types) + :return: the signature, plus any additional type mappings + """ + sig = defaultdict(list) + if signature: + for key in signature: + val = signature[key] + varEx = VariableExpression(Variable(key)) + if isinstance(val, Type): + varEx.type = val + else: + varEx.type = read_type(val) + sig[key].append(varEx) + + self._set_type(signature=sig) + + return {key: sig[key][0].type for key in sig} + + def findtype(self, variable): + """ + Find the type of the given variable as it is used in this expression. + For example, finding the type of "P" in "P(x) & Q(x,y)" yields "" + + :param variable: Variable + """ + raise NotImplementedError() + + def _set_type(self, other_type=ANY_TYPE, signature=None): + """ + Set the type of this expression to be the given type. Raise type + exceptions where applicable. + + :param other_type: Type + :param signature: dict(str -> list(AbstractVariableExpression)) + """ + raise NotImplementedError() + + def replace(self, variable, expression, replace_bound=False, alpha_convert=True): + """ + Replace every instance of 'variable' with 'expression' + :param variable: ``Variable`` The variable to replace + :param expression: ``Expression`` The expression with which to replace it + :param replace_bound: bool Should bound variables be replaced? + :param alpha_convert: bool Alpha convert automatically to avoid name clashes? + """ + assert isinstance(variable, Variable), "%s is not a Variable" % variable + assert isinstance(expression, Expression), ( + "%s is not an Expression" % expression + ) + + return self.visit_structured( + lambda e: e.replace(variable, expression, replace_bound, alpha_convert), + self.__class__, + ) + + def normalize(self, newvars=None): + """Rename auto-generated unique variables""" + + def get_indiv_vars(e): + if isinstance(e, IndividualVariableExpression): + return {e} + elif isinstance(e, AbstractVariableExpression): + return set() + else: + return e.visit( + get_indiv_vars, lambda parts: reduce(operator.or_, parts, set()) + ) + + result = self + for i, e in enumerate(sorted(get_indiv_vars(self), key=lambda e: e.variable)): + if isinstance(e, EventVariableExpression): + newVar = e.__class__(Variable("e0%s" % (i + 1))) + elif isinstance(e, IndividualVariableExpression): + newVar = e.__class__(Variable("z%s" % (i + 1))) + else: + newVar = e + result = result.replace(e.variable, newVar, True) + return result + + def visit(self, function, combinator): + """ + Recursively visit subexpressions. Apply 'function' to each + subexpression and pass the result of each function application + to the 'combinator' for aggregation: + + return combinator(map(function, self.subexpressions)) + + Bound variables are neither applied upon by the function nor given to + the combinator. + :param function: ``Function`` to call on each subexpression + :param combinator: ``Function,R>`` to combine the results of the + function calls + :return: result of combination ``R`` + """ + raise NotImplementedError() + + def visit_structured(self, function, combinator): + """ + Recursively visit subexpressions. Apply 'function' to each + subexpression and pass the result of each function application + to the 'combinator' for aggregation. The combinator must have + the same signature as the constructor. The function is not + applied to bound variables, but they are passed to the + combinator. + :param function: ``Function`` to call on each subexpression + :param combinator: ``Function`` with the same signature as the + constructor, to combine the results of the function calls + :return: result of combination + """ + return self.visit(function, lambda parts: combinator(*parts)) + + def __repr__(self): + return f"<{self.__class__.__name__} {self}>" + + def __str__(self): + return self.str() + + def variables(self): + """ + Return a set of all the variables for binding substitution. + The variables returned include all free (non-bound) individual + variables and any variable starting with '?' or '@'. + :return: set of ``Variable`` objects + """ + return self.free() | { + p for p in self.predicates() | self.constants() if re.match("^[?@]", p.name) + } + + def free(self): + """ + Return a set of all the free (non-bound) variables. This includes + both individual and predicate variables, but not constants. + :return: set of ``Variable`` objects + """ + return self.visit( + lambda e: e.free(), lambda parts: reduce(operator.or_, parts, set()) + ) + + def constants(self): + """ + Return a set of individual constants (non-predicates). + :return: set of ``Variable`` objects + """ + return self.visit( + lambda e: e.constants(), lambda parts: reduce(operator.or_, parts, set()) + ) + + def predicates(self): + """ + Return a set of predicates (constants, not variables). + :return: set of ``Variable`` objects + """ + return self.visit( + lambda e: e.predicates(), lambda parts: reduce(operator.or_, parts, set()) + ) + + def simplify(self): + """ + :return: beta-converted version of this expression + """ + return self.visit_structured(lambda e: e.simplify(), self.__class__) + + def make_VariableExpression(self, variable): + return VariableExpression(variable) + + +class ApplicationExpression(Expression): + r""" + This class is used to represent two related types of logical expressions. + + The first is a Predicate Expression, such as "P(x,y)". A predicate + expression is comprised of a ``FunctionVariableExpression`` or + ``ConstantExpression`` as the predicate and a list of Expressions as the + arguments. + + The second is a an application of one expression to another, such as + "(\x.dog(x))(fido)". + + The reason Predicate Expressions are treated as Application Expressions is + that the Variable Expression predicate of the expression may be replaced + with another Expression, such as a LambdaExpression, which would mean that + the Predicate should be thought of as being applied to the arguments. + + The logical expression reader will always curry arguments in a application expression. + So, "\x y.see(x,y)(john,mary)" will be represented internally as + "((\x y.(see(x))(y))(john))(mary)". This simplifies the internals since + there will always be exactly one argument in an application. + + The str() method will usually print the curried forms of application + expressions. The one exception is when the the application expression is + really a predicate expression (ie, underlying function is an + ``AbstractVariableExpression``). This means that the example from above + will be returned as "(\x y.see(x,y)(john))(mary)". + """ + + def __init__(self, function, argument): + """ + :param function: ``Expression``, for the function expression + :param argument: ``Expression``, for the argument + """ + assert isinstance(function, Expression), "%s is not an Expression" % function + assert isinstance(argument, Expression), "%s is not an Expression" % argument + self.function = function + self.argument = argument + + def simplify(self): + function = self.function.simplify() + argument = self.argument.simplify() + if isinstance(function, LambdaExpression): + return function.term.replace(function.variable, argument).simplify() + else: + return self.__class__(function, argument) + + @property + def type(self): + if isinstance(self.function.type, ComplexType): + return self.function.type.second + else: + return ANY_TYPE + + def _set_type(self, other_type=ANY_TYPE, signature=None): + """:see Expression._set_type()""" + assert isinstance(other_type, Type) + + if signature is None: + signature = defaultdict(list) + + self.argument._set_type(ANY_TYPE, signature) + try: + self.function._set_type( + ComplexType(self.argument.type, other_type), signature + ) + except TypeResolutionException as e: + raise TypeException( + "The function '%s' is of type '%s' and cannot be applied " + "to '%s' of type '%s'. Its argument must match type '%s'." + % ( + self.function, + self.function.type, + self.argument, + self.argument.type, + self.function.type.first, + ) + ) from e + + def findtype(self, variable): + """:see Expression.findtype()""" + assert isinstance(variable, Variable), "%s is not a Variable" % variable + if self.is_atom(): + function, args = self.uncurry() + else: + # It's not a predicate expression ("P(x,y)"), so leave args curried + function = self.function + args = [self.argument] + + found = [arg.findtype(variable) for arg in [function] + args] + + unique = [] + for f in found: + if f != ANY_TYPE: + if unique: + for u in unique: + if f.matches(u): + break + else: + unique.append(f) + + if len(unique) == 1: + return list(unique)[0] + else: + return ANY_TYPE + + def constants(self): + """:see: Expression.constants()""" + if isinstance(self.function, AbstractVariableExpression): + function_constants = set() + else: + function_constants = self.function.constants() + return function_constants | self.argument.constants() + + def predicates(self): + """:see: Expression.predicates()""" + if isinstance(self.function, ConstantExpression): + function_preds = {self.function.variable} + else: + function_preds = self.function.predicates() + return function_preds | self.argument.predicates() + + def visit(self, function, combinator): + """:see: Expression.visit()""" + return combinator([function(self.function), function(self.argument)]) + + def __eq__(self, other): + return ( + isinstance(other, ApplicationExpression) + and self.function == other.function + and self.argument == other.argument + ) + + def __ne__(self, other): + return not self == other + + __hash__ = Expression.__hash__ + + def __str__(self): + # uncurry the arguments and find the base function + if self.is_atom(): + function, args = self.uncurry() + arg_str = ",".join("%s" % arg for arg in args) + else: + # Leave arguments curried + function = self.function + arg_str = "%s" % self.argument + + function_str = "%s" % function + parenthesize_function = False + if isinstance(function, LambdaExpression): + if isinstance(function.term, ApplicationExpression): + if not isinstance(function.term.function, AbstractVariableExpression): + parenthesize_function = True + elif not isinstance(function.term, BooleanExpression): + parenthesize_function = True + elif isinstance(function, ApplicationExpression): + parenthesize_function = True + + if parenthesize_function: + function_str = Tokens.OPEN + function_str + Tokens.CLOSE + + return function_str + Tokens.OPEN + arg_str + Tokens.CLOSE + + def uncurry(self): + """ + Uncurry this application expression + + return: A tuple (base-function, arg-list) + """ + function = self.function + args = [self.argument] + while isinstance(function, ApplicationExpression): + # (\x.\y.sees(x,y)(john))(mary) + args.insert(0, function.argument) + function = function.function + return (function, args) + + @property + def pred(self): + """ + Return uncurried base-function. + If this is an atom, then the result will be a variable expression. + Otherwise, it will be a lambda expression. + """ + return self.uncurry()[0] + + @property + def args(self): + """ + Return uncurried arg-list + """ + return self.uncurry()[1] + + def is_atom(self): + """ + Is this expression an atom (as opposed to a lambda expression applied + to a term)? + """ + return isinstance(self.pred, AbstractVariableExpression) + + +@total_ordering +class AbstractVariableExpression(Expression): + """This class represents a variable to be used as a predicate or entity""" + + def __init__(self, variable): + """ + :param variable: ``Variable``, for the variable + """ + assert isinstance(variable, Variable), "%s is not a Variable" % variable + self.variable = variable + + def simplify(self): + return self + + def replace(self, variable, expression, replace_bound=False, alpha_convert=True): + """:see: Expression.replace()""" + assert isinstance(variable, Variable), "%s is not an Variable" % variable + assert isinstance(expression, Expression), ( + "%s is not an Expression" % expression + ) + if self.variable == variable: + return expression + else: + return self + + def _set_type(self, other_type=ANY_TYPE, signature=None): + """:see Expression._set_type()""" + assert isinstance(other_type, Type) + + if signature is None: + signature = defaultdict(list) + + resolution = other_type + for varEx in signature[self.variable.name]: + resolution = varEx.type.resolve(resolution) + if not resolution: + raise InconsistentTypeHierarchyException(self) + + signature[self.variable.name].append(self) + for varEx in signature[self.variable.name]: + varEx.type = resolution + + def findtype(self, variable): + """:see Expression.findtype()""" + assert isinstance(variable, Variable), "%s is not a Variable" % variable + if self.variable == variable: + return self.type + else: + return ANY_TYPE + + def predicates(self): + """:see: Expression.predicates()""" + return set() + + def __eq__(self, other): + """Allow equality between instances of ``AbstractVariableExpression`` + subtypes.""" + return ( + isinstance(other, AbstractVariableExpression) + and self.variable == other.variable + ) + + def __ne__(self, other): + return not self == other + + def __lt__(self, other): + if not isinstance(other, AbstractVariableExpression): + raise TypeError + return self.variable < other.variable + + __hash__ = Expression.__hash__ + + def __str__(self): + return "%s" % self.variable + + +class IndividualVariableExpression(AbstractVariableExpression): + """This class represents variables that take the form of a single lowercase + character (other than 'e') followed by zero or more digits.""" + + def _set_type(self, other_type=ANY_TYPE, signature=None): + """:see Expression._set_type()""" + assert isinstance(other_type, Type) + + if signature is None: + signature = defaultdict(list) + + if not other_type.matches(ENTITY_TYPE): + raise IllegalTypeException(self, other_type, ENTITY_TYPE) + + signature[self.variable.name].append(self) + + def _get_type(self): + return ENTITY_TYPE + + type = property(_get_type, _set_type) + + def free(self): + """:see: Expression.free()""" + return {self.variable} + + def constants(self): + """:see: Expression.constants()""" + return set() + + +class FunctionVariableExpression(AbstractVariableExpression): + """This class represents variables that take the form of a single uppercase + character followed by zero or more digits.""" + + type = ANY_TYPE + + def free(self): + """:see: Expression.free()""" + return {self.variable} + + def constants(self): + """:see: Expression.constants()""" + return set() + + +class EventVariableExpression(IndividualVariableExpression): + """This class represents variables that take the form of a single lowercase + 'e' character followed by zero or more digits.""" + + type = EVENT_TYPE + + +class ConstantExpression(AbstractVariableExpression): + """This class represents variables that do not take the form of a single + character followed by zero or more digits.""" + + type = ENTITY_TYPE + + def _set_type(self, other_type=ANY_TYPE, signature=None): + """:see Expression._set_type()""" + assert isinstance(other_type, Type) + + if signature is None: + signature = defaultdict(list) + + if other_type == ANY_TYPE: + # entity type by default, for individuals + resolution = ENTITY_TYPE + else: + resolution = other_type + if self.type != ENTITY_TYPE: + resolution = resolution.resolve(self.type) + + for varEx in signature[self.variable.name]: + resolution = varEx.type.resolve(resolution) + if not resolution: + raise InconsistentTypeHierarchyException(self) + + signature[self.variable.name].append(self) + for varEx in signature[self.variable.name]: + varEx.type = resolution + + def free(self): + """:see: Expression.free()""" + return set() + + def constants(self): + """:see: Expression.constants()""" + return {self.variable} + + +def VariableExpression(variable): + """ + This is a factory method that instantiates and returns a subtype of + ``AbstractVariableExpression`` appropriate for the given variable. + """ + assert isinstance(variable, Variable), "%s is not a Variable" % variable + if is_indvar(variable.name): + return IndividualVariableExpression(variable) + elif is_funcvar(variable.name): + return FunctionVariableExpression(variable) + elif is_eventvar(variable.name): + return EventVariableExpression(variable) + else: + return ConstantExpression(variable) + + +class VariableBinderExpression(Expression): + """This an abstract class for any Expression that binds a variable in an + Expression. This includes LambdaExpressions and Quantified Expressions""" + + def __init__(self, variable, term): + """ + :param variable: ``Variable``, for the variable + :param term: ``Expression``, for the term + """ + assert isinstance(variable, Variable), "%s is not a Variable" % variable + assert isinstance(term, Expression), "%s is not an Expression" % term + self.variable = variable + self.term = term + + def replace(self, variable, expression, replace_bound=False, alpha_convert=True): + """:see: Expression.replace()""" + assert isinstance(variable, Variable), "%s is not a Variable" % variable + assert isinstance(expression, Expression), ( + "%s is not an Expression" % expression + ) + # if the bound variable is the thing being replaced + if self.variable == variable: + if replace_bound: + assert isinstance(expression, AbstractVariableExpression), ( + "%s is not a AbstractVariableExpression" % expression + ) + return self.__class__( + expression.variable, + self.term.replace(variable, expression, True, alpha_convert), + ) + else: + return self + else: + # if the bound variable appears in the expression, then it must + # be alpha converted to avoid a conflict + if alpha_convert and self.variable in expression.free(): + self = self.alpha_convert(unique_variable(pattern=self.variable)) + + # replace in the term + return self.__class__( + self.variable, + self.term.replace(variable, expression, replace_bound, alpha_convert), + ) + + def alpha_convert(self, newvar): + """Rename all occurrences of the variable introduced by this variable + binder in the expression to ``newvar``. + :param newvar: ``Variable``, for the new variable + """ + assert isinstance(newvar, Variable), "%s is not a Variable" % newvar + return self.__class__( + newvar, self.term.replace(self.variable, VariableExpression(newvar), True) + ) + + def free(self): + """:see: Expression.free()""" + return self.term.free() - {self.variable} + + def findtype(self, variable): + """:see Expression.findtype()""" + assert isinstance(variable, Variable), "%s is not a Variable" % variable + if variable == self.variable: + return ANY_TYPE + else: + return self.term.findtype(variable) + + def visit(self, function, combinator): + """:see: Expression.visit()""" + return combinator([function(self.term)]) + + def visit_structured(self, function, combinator): + """:see: Expression.visit_structured()""" + return combinator(self.variable, function(self.term)) + + def __eq__(self, other): + r"""Defines equality modulo alphabetic variance. If we are comparing + \x.M and \y.N, then check equality of M and N[x/y].""" + if isinstance(self, other.__class__) or isinstance(other, self.__class__): + if self.variable == other.variable: + return self.term == other.term + else: + # Comparing \x.M and \y.N. Relabel y in N with x and continue. + varex = VariableExpression(self.variable) + return self.term == other.term.replace(other.variable, varex) + else: + return False + + def __ne__(self, other): + return not self == other + + __hash__ = Expression.__hash__ + + +class LambdaExpression(VariableBinderExpression): + @property + def type(self): + return ComplexType(self.term.findtype(self.variable), self.term.type) + + def _set_type(self, other_type=ANY_TYPE, signature=None): + """:see Expression._set_type()""" + assert isinstance(other_type, Type) + + if signature is None: + signature = defaultdict(list) + + self.term._set_type(other_type.second, signature) + if not self.type.resolve(other_type): + raise TypeResolutionException(self, other_type) + + def __str__(self): + variables = [self.variable] + term = self.term + while term.__class__ == self.__class__: + variables.append(term.variable) + term = term.term + return ( + Tokens.LAMBDA + + " ".join("%s" % v for v in variables) + + Tokens.DOT + + "%s" % term + ) + + +class QuantifiedExpression(VariableBinderExpression): + @property + def type(self): + return TRUTH_TYPE + + def _set_type(self, other_type=ANY_TYPE, signature=None): + """:see Expression._set_type()""" + assert isinstance(other_type, Type) + + if signature is None: + signature = defaultdict(list) + + if not other_type.matches(TRUTH_TYPE): + raise IllegalTypeException(self, other_type, TRUTH_TYPE) + self.term._set_type(TRUTH_TYPE, signature) + + def __str__(self): + variables = [self.variable] + term = self.term + while term.__class__ == self.__class__: + variables.append(term.variable) + term = term.term + return ( + self.getQuantifier() + + " " + + " ".join("%s" % v for v in variables) + + Tokens.DOT + + "%s" % term + ) + + +class ExistsExpression(QuantifiedExpression): + def getQuantifier(self): + return Tokens.EXISTS + + +class AllExpression(QuantifiedExpression): + def getQuantifier(self): + return Tokens.ALL + + +class IotaExpression(QuantifiedExpression): + def getQuantifier(self): + return Tokens.IOTA + + +class NegatedExpression(Expression): + def __init__(self, term): + assert isinstance(term, Expression), "%s is not an Expression" % term + self.term = term + + @property + def type(self): + return TRUTH_TYPE + + def _set_type(self, other_type=ANY_TYPE, signature=None): + """:see Expression._set_type()""" + assert isinstance(other_type, Type) + + if signature is None: + signature = defaultdict(list) + + if not other_type.matches(TRUTH_TYPE): + raise IllegalTypeException(self, other_type, TRUTH_TYPE) + self.term._set_type(TRUTH_TYPE, signature) + + def findtype(self, variable): + assert isinstance(variable, Variable), "%s is not a Variable" % variable + return self.term.findtype(variable) + + def visit(self, function, combinator): + """:see: Expression.visit()""" + return combinator([function(self.term)]) + + def negate(self): + """:see: Expression.negate()""" + return self.term + + def __eq__(self, other): + return isinstance(other, NegatedExpression) and self.term == other.term + + def __ne__(self, other): + return not self == other + + __hash__ = Expression.__hash__ + + def __str__(self): + return Tokens.NOT + "%s" % self.term + + +class BinaryExpression(Expression): + def __init__(self, first, second): + assert isinstance(first, Expression), "%s is not an Expression" % first + assert isinstance(second, Expression), "%s is not an Expression" % second + self.first = first + self.second = second + + @property + def type(self): + return TRUTH_TYPE + + def findtype(self, variable): + """:see Expression.findtype()""" + assert isinstance(variable, Variable), "%s is not a Variable" % variable + f = self.first.findtype(variable) + s = self.second.findtype(variable) + if f == s or s == ANY_TYPE: + return f + elif f == ANY_TYPE: + return s + else: + return ANY_TYPE + + def visit(self, function, combinator): + """:see: Expression.visit()""" + return combinator([function(self.first), function(self.second)]) + + def __eq__(self, other): + return ( + (isinstance(self, other.__class__) or isinstance(other, self.__class__)) + and self.first == other.first + and self.second == other.second + ) + + def __ne__(self, other): + return not self == other + + __hash__ = Expression.__hash__ + + def __str__(self): + first = self._str_subex(self.first) + second = self._str_subex(self.second) + return Tokens.OPEN + first + " " + self.getOp() + " " + second + Tokens.CLOSE + + def _str_subex(self, subex): + return "%s" % subex + + +class BooleanExpression(BinaryExpression): + def _set_type(self, other_type=ANY_TYPE, signature=None): + """:see Expression._set_type()""" + assert isinstance(other_type, Type) + + if signature is None: + signature = defaultdict(list) + + if not other_type.matches(TRUTH_TYPE): + raise IllegalTypeException(self, other_type, TRUTH_TYPE) + self.first._set_type(TRUTH_TYPE, signature) + self.second._set_type(TRUTH_TYPE, signature) + + +class AndExpression(BooleanExpression): + """This class represents conjunctions""" + + def getOp(self): + return Tokens.AND + + def _str_subex(self, subex): + s = "%s" % subex + if isinstance(subex, AndExpression): + return s[1:-1] + return s + + +class OrExpression(BooleanExpression): + """This class represents disjunctions""" + + def getOp(self): + return Tokens.OR + + def _str_subex(self, subex): + s = "%s" % subex + if isinstance(subex, OrExpression): + return s[1:-1] + return s + + +class ImpExpression(BooleanExpression): + """This class represents implications""" + + def getOp(self): + return Tokens.IMP + + +class IffExpression(BooleanExpression): + """This class represents biconditionals""" + + def getOp(self): + return Tokens.IFF + + +class EqualityExpression(BinaryExpression): + """This class represents equality expressions like "(x = y)".""" + + def _set_type(self, other_type=ANY_TYPE, signature=None): + """:see Expression._set_type()""" + assert isinstance(other_type, Type) + + if signature is None: + signature = defaultdict(list) + + if not other_type.matches(TRUTH_TYPE): + raise IllegalTypeException(self, other_type, TRUTH_TYPE) + self.first._set_type(ENTITY_TYPE, signature) + self.second._set_type(ENTITY_TYPE, signature) + + def getOp(self): + return Tokens.EQ + + +### Utilities + + +class LogicalExpressionException(Exception): + def __init__(self, index, message): + self.index = index + Exception.__init__(self, message) + + +class UnexpectedTokenException(LogicalExpressionException): + def __init__(self, index, unexpected=None, expected=None, message=None): + if unexpected and expected: + msg = "Unexpected token: '%s'. " "Expected token '%s'." % ( + unexpected, + expected, + ) + elif unexpected: + msg = "Unexpected token: '%s'." % unexpected + if message: + msg += " " + message + else: + msg = "Expected token '%s'." % expected + LogicalExpressionException.__init__(self, index, msg) + + +class ExpectedMoreTokensException(LogicalExpressionException): + def __init__(self, index, message=None): + if not message: + message = "More tokens expected." + LogicalExpressionException.__init__( + self, index, "End of input found. " + message + ) + + +def is_indvar(expr): + """ + An individual variable must be a single lowercase character other than 'e', + followed by zero or more digits. + + :param expr: str + :return: bool True if expr is of the correct form + """ + assert isinstance(expr, str), "%s is not a string" % expr + return re.match(r"^[a-df-z]\d*$", expr) is not None + + +def is_funcvar(expr): + """ + A function variable must be a single uppercase character followed by + zero or more digits. + + :param expr: str + :return: bool True if expr is of the correct form + """ + assert isinstance(expr, str), "%s is not a string" % expr + return re.match(r"^[A-Z]\d*$", expr) is not None + + +def is_eventvar(expr): + """ + An event variable must be a single lowercase 'e' character followed by + zero or more digits. + + :param expr: str + :return: bool True if expr is of the correct form + """ + assert isinstance(expr, str), "%s is not a string" % expr + return re.match(r"^e\d*$", expr) is not None + + +def demo(): + lexpr = Expression.fromstring + print("=" * 20 + "Test reader" + "=" * 20) + print(lexpr(r"john")) + print(lexpr(r"man(x)")) + print(lexpr(r"-man(x)")) + print(lexpr(r"(man(x) & tall(x) & walks(x))")) + print(lexpr(r"exists x.(man(x) & tall(x) & walks(x))")) + print(lexpr(r"\x.man(x)")) + print(lexpr(r"\x.man(x)(john)")) + print(lexpr(r"\x y.sees(x,y)")) + print(lexpr(r"\x y.sees(x,y)(a,b)")) + print(lexpr(r"(\x.exists y.walks(x,y))(x)")) + print(lexpr(r"exists x.x = y")) + print(lexpr(r"exists x.(x = y)")) + print(lexpr("P(x) & x=y & P(y)")) + print(lexpr(r"\P Q.exists x.(P(x) & Q(x))")) + print(lexpr(r"man(x) <-> tall(x)")) + + print("=" * 20 + "Test simplify" + "=" * 20) + print(lexpr(r"\x.\y.sees(x,y)(john)(mary)").simplify()) + print(lexpr(r"\x.\y.sees(x,y)(john, mary)").simplify()) + print(lexpr(r"all x.(man(x) & (\x.exists y.walks(x,y))(x))").simplify()) + print(lexpr(r"(\P.\Q.exists x.(P(x) & Q(x)))(\x.dog(x))(\x.bark(x))").simplify()) + + print("=" * 20 + "Test alpha conversion and binder expression equality" + "=" * 20) + e1 = lexpr("exists x.P(x)") + print(e1) + e2 = e1.alpha_convert(Variable("z")) + print(e2) + print(e1 == e2) + + +def demo_errors(): + print("=" * 20 + "Test reader errors" + "=" * 20) + demoException("(P(x) & Q(x)") + demoException("((P(x) &) & Q(x))") + demoException("P(x) -> ") + demoException("P(x") + demoException("P(x,") + demoException("P(x,)") + demoException("exists") + demoException("exists x.") + demoException("\\") + demoException("\\ x y.") + demoException("P(x)Q(x)") + demoException("(P(x)Q(x)") + demoException("exists x -> y") + + +def demoException(s): + try: + Expression.fromstring(s) + except LogicalExpressionException as e: + print(f"{e.__class__.__name__}: {e}") + + +def printtype(ex): + print(f"{ex.str()} : {ex.type}") + + +if __name__ == "__main__": + demo() +# demo_errors() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/sem/relextract.py b/llmeval-env/lib/python3.10/site-packages/nltk/sem/relextract.py new file mode 100644 index 0000000000000000000000000000000000000000..fcf755a3c4ab91678ae2965b96e79235a7c59120 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/sem/relextract.py @@ -0,0 +1,539 @@ +# Natural Language Toolkit: Relation Extraction +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ewan Klein +# URL: +# For license information, see LICENSE.TXT + +""" +Code for extracting relational triples from the ieer and conll2002 corpora. + +Relations are stored internally as dictionaries ('reldicts'). + +The two serialization outputs are "rtuple" and "clause". + +- An rtuple is a tuple of the form ``(subj, filler, obj)``, + where ``subj`` and ``obj`` are pairs of Named Entity mentions, and ``filler`` is the string of words + occurring between ``sub`` and ``obj`` (with no intervening NEs). Strings are printed via ``repr()`` to + circumvent locale variations in rendering utf-8 encoded strings. +- A clause is an atom of the form ``relsym(subjsym, objsym)``, + where the relation, subject and object have been canonicalized to single strings. +""" + +# todo: get a more general solution to canonicalized symbols for clauses -- maybe use xmlcharrefs? + +import html +import re +from collections import defaultdict + +# Dictionary that associates corpora with NE classes +NE_CLASSES = { + "ieer": [ + "LOCATION", + "ORGANIZATION", + "PERSON", + "DURATION", + "DATE", + "CARDINAL", + "PERCENT", + "MONEY", + "MEASURE", + ], + "conll2002": ["LOC", "PER", "ORG"], + "ace": [ + "LOCATION", + "ORGANIZATION", + "PERSON", + "DURATION", + "DATE", + "CARDINAL", + "PERCENT", + "MONEY", + "MEASURE", + "FACILITY", + "GPE", + ], +} + +# Allow abbreviated class labels +short2long = dict(LOC="LOCATION", ORG="ORGANIZATION", PER="PERSON") +long2short = dict(LOCATION="LOC", ORGANIZATION="ORG", PERSON="PER") + + +def _expand(type): + """ + Expand an NE class name. + :type type: str + :rtype: str + """ + try: + return short2long[type] + except KeyError: + return type + + +def class_abbrev(type): + """ + Abbreviate an NE class name. + :type type: str + :rtype: str + """ + try: + return long2short[type] + except KeyError: + return type + + +def _join(lst, sep=" ", untag=False): + """ + Join a list into a string, turning tags tuples into tag strings or just words. + :param untag: if ``True``, omit the tag from tagged input strings. + :type lst: list + :rtype: str + """ + try: + return sep.join(lst) + except TypeError: + if untag: + return sep.join(tup[0] for tup in lst) + from nltk.tag import tuple2str + + return sep.join(tuple2str(tup) for tup in lst) + + +def descape_entity(m, defs=html.entities.entitydefs): + """ + Translate one entity to its ISO Latin value. + Inspired by example from effbot.org + + + """ + try: + return defs[m.group(1)] + + except KeyError: + return m.group(0) # use as is + + +def list2sym(lst): + """ + Convert a list of strings into a canonical symbol. + :type lst: list + :return: a Unicode string without whitespace + :rtype: unicode + """ + sym = _join(lst, "_", untag=True) + sym = sym.lower() + ENT = re.compile(r"&(\w+?);") + sym = ENT.sub(descape_entity, sym) + sym = sym.replace(".", "") + return sym + + +def tree2semi_rel(tree): + """ + Group a chunk structure into a list of 'semi-relations' of the form (list(str), ``Tree``). + + In order to facilitate the construction of (``Tree``, string, ``Tree``) triples, this + identifies pairs whose first member is a list (possibly empty) of terminal + strings, and whose second member is a ``Tree`` of the form (NE_label, terminals). + + :param tree: a chunk tree + :return: a list of pairs (list(str), ``Tree``) + :rtype: list of tuple + """ + + from nltk.tree import Tree + + semi_rels = [] + semi_rel = [[], None] + + for dtr in tree: + if not isinstance(dtr, Tree): + semi_rel[0].append(dtr) + else: + # dtr is a Tree + semi_rel[1] = dtr + semi_rels.append(semi_rel) + semi_rel = [[], None] + return semi_rels + + +def semi_rel2reldict(pairs, window=5, trace=False): + """ + Converts the pairs generated by ``tree2semi_rel`` into a 'reldict': a dictionary which + stores information about the subject and object NEs plus the filler between them. + Additionally, a left and right context of length =< window are captured (within + a given input sentence). + + :param pairs: a pair of list(str) and ``Tree``, as generated by + :param window: a threshold for the number of items to include in the left and right context + :type window: int + :return: 'relation' dictionaries whose keys are 'lcon', 'subjclass', 'subjtext', 'subjsym', 'filler', objclass', objtext', 'objsym' and 'rcon' + :rtype: list(defaultdict) + """ + result = [] + while len(pairs) > 2: + reldict = defaultdict(str) + reldict["lcon"] = _join(pairs[0][0][-window:]) + reldict["subjclass"] = pairs[0][1].label() + reldict["subjtext"] = _join(pairs[0][1].leaves()) + reldict["subjsym"] = list2sym(pairs[0][1].leaves()) + reldict["filler"] = _join(pairs[1][0]) + reldict["untagged_filler"] = _join(pairs[1][0], untag=True) + reldict["objclass"] = pairs[1][1].label() + reldict["objtext"] = _join(pairs[1][1].leaves()) + reldict["objsym"] = list2sym(pairs[1][1].leaves()) + reldict["rcon"] = _join(pairs[2][0][:window]) + if trace: + print( + "(%s(%s, %s)" + % ( + reldict["untagged_filler"], + reldict["subjclass"], + reldict["objclass"], + ) + ) + result.append(reldict) + pairs = pairs[1:] + return result + + +def extract_rels(subjclass, objclass, doc, corpus="ace", pattern=None, window=10): + """ + Filter the output of ``semi_rel2reldict`` according to specified NE classes and a filler pattern. + + The parameters ``subjclass`` and ``objclass`` can be used to restrict the + Named Entities to particular types (any of 'LOCATION', 'ORGANIZATION', + 'PERSON', 'DURATION', 'DATE', 'CARDINAL', 'PERCENT', 'MONEY', 'MEASURE'). + + :param subjclass: the class of the subject Named Entity. + :type subjclass: str + :param objclass: the class of the object Named Entity. + :type objclass: str + :param doc: input document + :type doc: ieer document or a list of chunk trees + :param corpus: name of the corpus to take as input; possible values are + 'ieer' and 'conll2002' + :type corpus: str + :param pattern: a regular expression for filtering the fillers of + retrieved triples. + :type pattern: SRE_Pattern + :param window: filters out fillers which exceed this threshold + :type window: int + :return: see ``mk_reldicts`` + :rtype: list(defaultdict) + """ + + if subjclass and subjclass not in NE_CLASSES[corpus]: + if _expand(subjclass) in NE_CLASSES[corpus]: + subjclass = _expand(subjclass) + else: + raise ValueError( + "your value for the subject type has not been recognized: %s" + % subjclass + ) + if objclass and objclass not in NE_CLASSES[corpus]: + if _expand(objclass) in NE_CLASSES[corpus]: + objclass = _expand(objclass) + else: + raise ValueError( + "your value for the object type has not been recognized: %s" % objclass + ) + + if corpus == "ace" or corpus == "conll2002": + pairs = tree2semi_rel(doc) + elif corpus == "ieer": + pairs = tree2semi_rel(doc.text) + tree2semi_rel(doc.headline) + else: + raise ValueError("corpus type not recognized") + + reldicts = semi_rel2reldict(pairs) + + relfilter = lambda x: ( + x["subjclass"] == subjclass + and len(x["filler"].split()) <= window + and pattern.match(x["filler"]) + and x["objclass"] == objclass + ) + + return list(filter(relfilter, reldicts)) + + +def rtuple(reldict, lcon=False, rcon=False): + """ + Pretty print the reldict as an rtuple. + :param reldict: a relation dictionary + :type reldict: defaultdict + """ + items = [ + class_abbrev(reldict["subjclass"]), + reldict["subjtext"], + reldict["filler"], + class_abbrev(reldict["objclass"]), + reldict["objtext"], + ] + format = "[%s: %r] %r [%s: %r]" + if lcon: + items = [reldict["lcon"]] + items + format = "...%r)" + format + if rcon: + items.append(reldict["rcon"]) + format = format + "(%r..." + printargs = tuple(items) + return format % printargs + + +def clause(reldict, relsym): + """ + Print the relation in clausal form. + :param reldict: a relation dictionary + :type reldict: defaultdict + :param relsym: a label for the relation + :type relsym: str + """ + items = (relsym, reldict["subjsym"], reldict["objsym"]) + return "%s(%r, %r)" % items + + +####################################################### +# Demos of relation extraction with regular expressions +####################################################### + +############################################ +# Example of in(ORG, LOC) +############################################ +def in_demo(trace=0, sql=True): + """ + Select pairs of organizations and locations whose mentions occur with an + intervening occurrence of the preposition "in". + + If the sql parameter is set to True, then the entity pairs are loaded into + an in-memory database, and subsequently pulled out using an SQL "SELECT" + query. + """ + from nltk.corpus import ieer + + if sql: + try: + import sqlite3 + + connection = sqlite3.connect(":memory:") + cur = connection.cursor() + cur.execute( + """create table Locations + (OrgName text, LocationName text, DocID text)""" + ) + except ImportError: + import warnings + + warnings.warn("Cannot import sqlite; sql flag will be ignored.") + + IN = re.compile(r".*\bin\b(?!\b.+ing)") + + print() + print("IEER: in(ORG, LOC) -- just the clauses:") + print("=" * 45) + + for file in ieer.fileids(): + for doc in ieer.parsed_docs(file): + if trace: + print(doc.docno) + print("=" * 15) + for rel in extract_rels("ORG", "LOC", doc, corpus="ieer", pattern=IN): + print(clause(rel, relsym="IN")) + if sql: + try: + rtuple = (rel["subjtext"], rel["objtext"], doc.docno) + cur.execute( + """insert into Locations + values (?, ?, ?)""", + rtuple, + ) + connection.commit() + except NameError: + pass + + if sql: + try: + cur.execute( + """select OrgName from Locations + where LocationName = 'Atlanta'""" + ) + print() + print("Extract data from SQL table: ORGs in Atlanta") + print("-" * 15) + for row in cur: + print(row) + except NameError: + pass + + +############################################ +# Example of has_role(PER, LOC) +############################################ + + +def roles_demo(trace=0): + from nltk.corpus import ieer + + roles = r""" + (.*( # assorted roles + analyst| + chair(wo)?man| + commissioner| + counsel| + director| + economist| + editor| + executive| + foreman| + governor| + head| + lawyer| + leader| + librarian).*)| + manager| + partner| + president| + producer| + professor| + researcher| + spokes(wo)?man| + writer| + ,\sof\sthe?\s* # "X, of (the) Y" + """ + ROLES = re.compile(roles, re.VERBOSE) + + print() + print("IEER: has_role(PER, ORG) -- raw rtuples:") + print("=" * 45) + + for file in ieer.fileids(): + for doc in ieer.parsed_docs(file): + lcon = rcon = False + if trace: + print(doc.docno) + print("=" * 15) + lcon = rcon = True + for rel in extract_rels("PER", "ORG", doc, corpus="ieer", pattern=ROLES): + print(rtuple(rel, lcon=lcon, rcon=rcon)) + + +############################################## +### Show what's in the IEER Headlines +############################################## + + +def ieer_headlines(): + + from nltk.corpus import ieer + from nltk.tree import Tree + + print("IEER: First 20 Headlines") + print("=" * 45) + + trees = [ + (doc.docno, doc.headline) + for file in ieer.fileids() + for doc in ieer.parsed_docs(file) + ] + for tree in trees[:20]: + print() + print("%s:\n%s" % tree) + + +############################################# +## Dutch CONLL2002: take_on_role(PER, ORG +############################################# + + +def conllned(trace=1): + """ + Find the copula+'van' relation ('of') in the Dutch tagged training corpus + from CoNLL 2002. + """ + + from nltk.corpus import conll2002 + + vnv = """ + ( + is/V| # 3rd sing present and + was/V| # past forms of the verb zijn ('be') + werd/V| # and also present + wordt/V # past of worden ('become) + ) + .* # followed by anything + van/Prep # followed by van ('of') + """ + VAN = re.compile(vnv, re.VERBOSE) + + print() + print("Dutch CoNLL2002: van(PER, ORG) -- raw rtuples with context:") + print("=" * 45) + + for doc in conll2002.chunked_sents("ned.train"): + lcon = rcon = False + if trace: + lcon = rcon = True + for rel in extract_rels( + "PER", "ORG", doc, corpus="conll2002", pattern=VAN, window=10 + ): + print(rtuple(rel, lcon=lcon, rcon=rcon)) + + +############################################# +## Spanish CONLL2002: (PER, ORG) +############################################# + + +def conllesp(): + from nltk.corpus import conll2002 + + de = """ + .* + ( + de/SP| + del/SP + ) + """ + DE = re.compile(de, re.VERBOSE) + + print() + print("Spanish CoNLL2002: de(ORG, LOC) -- just the first 10 clauses:") + print("=" * 45) + rels = [ + rel + for doc in conll2002.chunked_sents("esp.train") + for rel in extract_rels("ORG", "LOC", doc, corpus="conll2002", pattern=DE) + ] + for r in rels[:10]: + print(clause(r, relsym="DE")) + print() + + +def ne_chunked(): + print() + print("1500 Sentences from Penn Treebank, as processed by NLTK NE Chunker") + print("=" * 45) + ROLE = re.compile( + r".*(chairman|president|trader|scientist|economist|analyst|partner).*" + ) + rels = [] + for i, sent in enumerate(nltk.corpus.treebank.tagged_sents()[:1500]): + sent = nltk.ne_chunk(sent) + rels = extract_rels("PER", "ORG", sent, corpus="ace", pattern=ROLE, window=7) + for rel in rels: + print(f"{i:<5}{rtuple(rel)}") + + +if __name__ == "__main__": + import nltk + from nltk.sem import relextract + + in_demo(trace=0) + roles_demo(trace=0) + conllned() + conllesp() + ieer_headlines() + ne_chunked() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/sem/skolemize.py b/llmeval-env/lib/python3.10/site-packages/nltk/sem/skolemize.py new file mode 100644 index 0000000000000000000000000000000000000000..6f98437cee85ecf4a023a71a3f4518e25893ef8d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/sem/skolemize.py @@ -0,0 +1,148 @@ +# Natural Language Toolkit: Semantic Interpretation +# +# Author: Ewan Klein +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +from nltk.sem.logic import ( + AllExpression, + AndExpression, + ApplicationExpression, + EqualityExpression, + ExistsExpression, + IffExpression, + ImpExpression, + NegatedExpression, + OrExpression, + VariableExpression, + skolem_function, + unique_variable, +) + + +def skolemize(expression, univ_scope=None, used_variables=None): + """ + Skolemize the expression and convert to conjunctive normal form (CNF) + """ + if univ_scope is None: + univ_scope = set() + if used_variables is None: + used_variables = set() + + if isinstance(expression, AllExpression): + term = skolemize( + expression.term, + univ_scope | {expression.variable}, + used_variables | {expression.variable}, + ) + return term.replace( + expression.variable, + VariableExpression(unique_variable(ignore=used_variables)), + ) + elif isinstance(expression, AndExpression): + return skolemize(expression.first, univ_scope, used_variables) & skolemize( + expression.second, univ_scope, used_variables + ) + elif isinstance(expression, OrExpression): + return to_cnf( + skolemize(expression.first, univ_scope, used_variables), + skolemize(expression.second, univ_scope, used_variables), + ) + elif isinstance(expression, ImpExpression): + return to_cnf( + skolemize(-expression.first, univ_scope, used_variables), + skolemize(expression.second, univ_scope, used_variables), + ) + elif isinstance(expression, IffExpression): + return to_cnf( + skolemize(-expression.first, univ_scope, used_variables), + skolemize(expression.second, univ_scope, used_variables), + ) & to_cnf( + skolemize(expression.first, univ_scope, used_variables), + skolemize(-expression.second, univ_scope, used_variables), + ) + elif isinstance(expression, EqualityExpression): + return expression + elif isinstance(expression, NegatedExpression): + negated = expression.term + if isinstance(negated, AllExpression): + term = skolemize( + -negated.term, univ_scope, used_variables | {negated.variable} + ) + if univ_scope: + return term.replace(negated.variable, skolem_function(univ_scope)) + else: + skolem_constant = VariableExpression( + unique_variable(ignore=used_variables) + ) + return term.replace(negated.variable, skolem_constant) + elif isinstance(negated, AndExpression): + return to_cnf( + skolemize(-negated.first, univ_scope, used_variables), + skolemize(-negated.second, univ_scope, used_variables), + ) + elif isinstance(negated, OrExpression): + return skolemize(-negated.first, univ_scope, used_variables) & skolemize( + -negated.second, univ_scope, used_variables + ) + elif isinstance(negated, ImpExpression): + return skolemize(negated.first, univ_scope, used_variables) & skolemize( + -negated.second, univ_scope, used_variables + ) + elif isinstance(negated, IffExpression): + return to_cnf( + skolemize(-negated.first, univ_scope, used_variables), + skolemize(-negated.second, univ_scope, used_variables), + ) & to_cnf( + skolemize(negated.first, univ_scope, used_variables), + skolemize(negated.second, univ_scope, used_variables), + ) + elif isinstance(negated, EqualityExpression): + return expression + elif isinstance(negated, NegatedExpression): + return skolemize(negated.term, univ_scope, used_variables) + elif isinstance(negated, ExistsExpression): + term = skolemize( + -negated.term, + univ_scope | {negated.variable}, + used_variables | {negated.variable}, + ) + return term.replace( + negated.variable, + VariableExpression(unique_variable(ignore=used_variables)), + ) + elif isinstance(negated, ApplicationExpression): + return expression + else: + raise Exception("'%s' cannot be skolemized" % expression) + elif isinstance(expression, ExistsExpression): + term = skolemize( + expression.term, univ_scope, used_variables | {expression.variable} + ) + if univ_scope: + return term.replace(expression.variable, skolem_function(univ_scope)) + else: + skolem_constant = VariableExpression(unique_variable(ignore=used_variables)) + return term.replace(expression.variable, skolem_constant) + elif isinstance(expression, ApplicationExpression): + return expression + else: + raise Exception("'%s' cannot be skolemized" % expression) + + +def to_cnf(first, second): + """ + Convert this split disjunction to conjunctive normal form (CNF) + """ + if isinstance(first, AndExpression): + r_first = to_cnf(first.first, second) + r_second = to_cnf(first.second, second) + return r_first & r_second + elif isinstance(second, AndExpression): + r_first = to_cnf(first, second.first) + r_second = to_cnf(first, second.second) + return r_first & r_second + else: + return first | second diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/sem/util.py b/llmeval-env/lib/python3.10/site-packages/nltk/sem/util.py new file mode 100644 index 0000000000000000000000000000000000000000..8d119db424331b9b9873733a0acc6e9b3754a5cb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/sem/util.py @@ -0,0 +1,309 @@ +# Natural Language Toolkit: Semantic Interpretation +# +# Author: Ewan Klein +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +""" +Utility functions for batch-processing sentences: parsing and +extraction of the semantic representation of the root node of the the +syntax tree, followed by evaluation of the semantic representation in +a first-order model. +""" + +import codecs + +from nltk.sem import evaluate + +############################################################## +## Utility functions for connecting parse output to semantics +############################################################## + + +def parse_sents(inputs, grammar, trace=0): + """ + Convert input sentences into syntactic trees. + + :param inputs: sentences to be parsed + :type inputs: list(str) + :param grammar: ``FeatureGrammar`` or name of feature-based grammar + :type grammar: nltk.grammar.FeatureGrammar + :rtype: list(nltk.tree.Tree) or dict(list(str)): list(Tree) + :return: a mapping from input sentences to a list of ``Tree`` instances. + """ + # put imports here to avoid circult dependencies + from nltk.grammar import FeatureGrammar + from nltk.parse import FeatureChartParser, load_parser + + if isinstance(grammar, FeatureGrammar): + cp = FeatureChartParser(grammar) + else: + cp = load_parser(grammar, trace=trace) + parses = [] + for sent in inputs: + tokens = sent.split() # use a tokenizer? + syntrees = list(cp.parse(tokens)) + parses.append(syntrees) + return parses + + +def root_semrep(syntree, semkey="SEM"): + """ + Find the semantic representation at the root of a tree. + + :param syntree: a parse ``Tree`` + :param semkey: the feature label to use for the root semantics in the tree + :return: the semantic representation at the root of a ``Tree`` + :rtype: sem.Expression + """ + from nltk.grammar import FeatStructNonterminal + + node = syntree.label() + assert isinstance(node, FeatStructNonterminal) + try: + return node[semkey] + except KeyError: + print(node, end=" ") + print("has no specification for the feature %s" % semkey) + raise + + +def interpret_sents(inputs, grammar, semkey="SEM", trace=0): + """ + Add the semantic representation to each syntactic parse tree + of each input sentence. + + :param inputs: a list of sentences + :type inputs: list(str) + :param grammar: ``FeatureGrammar`` or name of feature-based grammar + :type grammar: nltk.grammar.FeatureGrammar + :return: a mapping from sentences to lists of pairs (parse-tree, semantic-representations) + :rtype: list(list(tuple(nltk.tree.Tree, nltk.sem.logic.ConstantExpression))) + """ + return [ + [(syn, root_semrep(syn, semkey)) for syn in syntrees] + for syntrees in parse_sents(inputs, grammar, trace=trace) + ] + + +def evaluate_sents(inputs, grammar, model, assignment, trace=0): + """ + Add the truth-in-a-model value to each semantic representation + for each syntactic parse of each input sentences. + + :param inputs: a list of sentences + :type inputs: list(str) + :param grammar: ``FeatureGrammar`` or name of feature-based grammar + :type grammar: nltk.grammar.FeatureGrammar + :return: a mapping from sentences to lists of triples (parse-tree, semantic-representations, evaluation-in-model) + :rtype: list(list(tuple(nltk.tree.Tree, nltk.sem.logic.ConstantExpression, bool or dict(str): bool))) + """ + return [ + [ + (syn, sem, model.evaluate("%s" % sem, assignment, trace=trace)) + for (syn, sem) in interpretations + ] + for interpretations in interpret_sents(inputs, grammar) + ] + + +def demo_model0(): + global m0, g0 + # Initialize a valuation of non-logical constants.""" + v = [ + ("john", "b1"), + ("mary", "g1"), + ("suzie", "g2"), + ("fido", "d1"), + ("tess", "d2"), + ("noosa", "n"), + ("girl", {"g1", "g2"}), + ("boy", {"b1", "b2"}), + ("dog", {"d1", "d2"}), + ("bark", {"d1", "d2"}), + ("walk", {"b1", "g2", "d1"}), + ("chase", {("b1", "g1"), ("b2", "g1"), ("g1", "d1"), ("g2", "d2")}), + ( + "see", + {("b1", "g1"), ("b2", "d2"), ("g1", "b1"), ("d2", "b1"), ("g2", "n")}, + ), + ("in", {("b1", "n"), ("b2", "n"), ("d2", "n")}), + ("with", {("b1", "g1"), ("g1", "b1"), ("d1", "b1"), ("b1", "d1")}), + ] + # Read in the data from ``v`` + val = evaluate.Valuation(v) + # Bind ``dom`` to the ``domain`` property of ``val`` + dom = val.domain + # Initialize a model with parameters ``dom`` and ``val``. + m0 = evaluate.Model(dom, val) + # Initialize a variable assignment with parameter ``dom`` + g0 = evaluate.Assignment(dom) + + +def read_sents(filename, encoding="utf8"): + with codecs.open(filename, "r", encoding) as fp: + sents = [l.rstrip() for l in fp] + + # get rid of blank lines + sents = [l for l in sents if len(l) > 0] + sents = [l for l in sents if not l[0] == "#"] + return sents + + +def demo_legacy_grammar(): + """ + Check that interpret_sents() is compatible with legacy grammars that use + a lowercase 'sem' feature. + + Define 'test.fcfg' to be the following + + """ + from nltk.grammar import FeatureGrammar + + g = FeatureGrammar.fromstring( + """ + % start S + S[sem=] -> 'hello' + """ + ) + print("Reading grammar: %s" % g) + print("*" * 20) + for reading in interpret_sents(["hello"], g, semkey="sem"): + syn, sem = reading[0] + print() + print("output: ", sem) + + +def demo(): + import sys + from optparse import OptionParser + + description = """ + Parse and evaluate some sentences. + """ + + opts = OptionParser(description=description) + + opts.set_defaults( + evaluate=True, + beta=True, + syntrace=0, + semtrace=0, + demo="default", + grammar="", + sentences="", + ) + + opts.add_option( + "-d", + "--demo", + dest="demo", + help="choose demo D; omit this for the default demo, or specify 'chat80'", + metavar="D", + ) + opts.add_option( + "-g", "--gram", dest="grammar", help="read in grammar G", metavar="G" + ) + opts.add_option( + "-m", + "--model", + dest="model", + help="import model M (omit '.py' suffix)", + metavar="M", + ) + opts.add_option( + "-s", + "--sentences", + dest="sentences", + help="read in a file of test sentences S", + metavar="S", + ) + opts.add_option( + "-e", + "--no-eval", + action="store_false", + dest="evaluate", + help="just do a syntactic analysis", + ) + opts.add_option( + "-b", + "--no-beta-reduction", + action="store_false", + dest="beta", + help="don't carry out beta-reduction", + ) + opts.add_option( + "-t", + "--syntrace", + action="count", + dest="syntrace", + help="set syntactic tracing on; requires '-e' option", + ) + opts.add_option( + "-T", + "--semtrace", + action="count", + dest="semtrace", + help="set semantic tracing on", + ) + + (options, args) = opts.parse_args() + + SPACER = "-" * 30 + + demo_model0() + + sents = [ + "Fido sees a boy with Mary", + "John sees Mary", + "every girl chases a dog", + "every boy chases a girl", + "John walks with a girl in Noosa", + "who walks", + ] + + gramfile = "grammars/sample_grammars/sem2.fcfg" + + if options.sentences: + sentsfile = options.sentences + if options.grammar: + gramfile = options.grammar + if options.model: + exec("import %s as model" % options.model) + + if sents is None: + sents = read_sents(sentsfile) + + # Set model and assignment + model = m0 + g = g0 + + if options.evaluate: + evaluations = evaluate_sents(sents, gramfile, model, g, trace=options.semtrace) + else: + semreps = interpret_sents(sents, gramfile, trace=options.syntrace) + + for i, sent in enumerate(sents): + n = 1 + print("\nSentence: %s" % sent) + print(SPACER) + if options.evaluate: + + for (syntree, semrep, value) in evaluations[i]: + if isinstance(value, dict): + value = set(value.keys()) + print("%d: %s" % (n, semrep)) + print(value) + n += 1 + else: + + for (syntree, semrep) in semreps[i]: + print("%d: %s" % (n, semrep)) + n += 1 + + +if __name__ == "__main__": + demo() + demo_legacy_grammar() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/sentiment/__init__.py b/llmeval-env/lib/python3.10/site-packages/nltk/sentiment/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..37c21108d41d8daafbcee02c34646291db597e88 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/sentiment/__init__.py @@ -0,0 +1,13 @@ +# Natural Language Toolkit: Sentiment Analysis +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ewan Klein +# URL: +# For license information, see LICENSE.TXT + +""" +NLTK Sentiment Analysis Package + +""" +from nltk.sentiment.sentiment_analyzer import SentimentAnalyzer +from nltk.sentiment.vader import SentimentIntensityAnalyzer diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__init__.py b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5162796f751878d3521aaf66de56fac11b2a2dd8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__init__.py @@ -0,0 +1,132 @@ +# Natural Language Toolkit: Tokenizers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird (minor additions) +# Contributors: matthewmc, clouds56 +# URL: +# For license information, see LICENSE.TXT + +r""" +NLTK Tokenizer Package + +Tokenizers divide strings into lists of substrings. For example, +tokenizers can be used to find the words and punctuation in a string: + + >>> from nltk.tokenize import word_tokenize + >>> s = '''Good muffins cost $3.88\nin New York. Please buy me + ... two of them.\n\nThanks.''' + >>> word_tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$', '3.88', 'in', 'New', 'York', '.', + 'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.'] + +This particular tokenizer requires the Punkt sentence tokenization +models to be installed. NLTK also provides a simpler, +regular-expression based tokenizer, which splits text on whitespace +and punctuation: + + >>> from nltk.tokenize import wordpunct_tokenize + >>> wordpunct_tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$', '3', '.', '88', 'in', 'New', 'York', '.', + 'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.'] + +We can also operate at the level of sentences, using the sentence +tokenizer directly as follows: + + >>> from nltk.tokenize import sent_tokenize, word_tokenize + >>> sent_tokenize(s) + ['Good muffins cost $3.88\nin New York.', 'Please buy me\ntwo of them.', 'Thanks.'] + >>> [word_tokenize(t) for t in sent_tokenize(s)] # doctest: +NORMALIZE_WHITESPACE + [['Good', 'muffins', 'cost', '$', '3.88', 'in', 'New', 'York', '.'], + ['Please', 'buy', 'me', 'two', 'of', 'them', '.'], ['Thanks', '.']] + +Caution: when tokenizing a Unicode string, make sure you are not +using an encoded version of the string (it may be necessary to +decode it first, e.g. with ``s.decode("utf8")``. + +NLTK tokenizers can produce token-spans, represented as tuples of integers +having the same semantics as string slices, to support efficient comparison +of tokenizers. (These methods are implemented as generators.) + + >>> from nltk.tokenize import WhitespaceTokenizer + >>> list(WhitespaceTokenizer().span_tokenize(s)) # doctest: +NORMALIZE_WHITESPACE + [(0, 4), (5, 12), (13, 17), (18, 23), (24, 26), (27, 30), (31, 36), (38, 44), + (45, 48), (49, 51), (52, 55), (56, 58), (59, 64), (66, 73)] + +There are numerous ways to tokenize text. If you need more control over +tokenization, see the other methods provided in this package. + +For further information, please see Chapter 3 of the NLTK book. +""" + +import re + +from nltk.data import load +from nltk.tokenize.casual import TweetTokenizer, casual_tokenize +from nltk.tokenize.destructive import NLTKWordTokenizer +from nltk.tokenize.legality_principle import LegalitySyllableTokenizer +from nltk.tokenize.mwe import MWETokenizer +from nltk.tokenize.punkt import PunktSentenceTokenizer +from nltk.tokenize.regexp import ( + BlanklineTokenizer, + RegexpTokenizer, + WhitespaceTokenizer, + WordPunctTokenizer, + blankline_tokenize, + regexp_tokenize, + wordpunct_tokenize, +) +from nltk.tokenize.repp import ReppTokenizer +from nltk.tokenize.sexpr import SExprTokenizer, sexpr_tokenize +from nltk.tokenize.simple import ( + LineTokenizer, + SpaceTokenizer, + TabTokenizer, + line_tokenize, +) +from nltk.tokenize.sonority_sequencing import SyllableTokenizer +from nltk.tokenize.stanford_segmenter import StanfordSegmenter +from nltk.tokenize.texttiling import TextTilingTokenizer +from nltk.tokenize.toktok import ToktokTokenizer +from nltk.tokenize.treebank import TreebankWordDetokenizer, TreebankWordTokenizer +from nltk.tokenize.util import regexp_span_tokenize, string_span_tokenize + + +# Standard sentence tokenizer. +def sent_tokenize(text, language="english"): + """ + Return a sentence-tokenized copy of *text*, + using NLTK's recommended sentence tokenizer + (currently :class:`.PunktSentenceTokenizer` + for the specified language). + + :param text: text to split into sentences + :param language: the model name in the Punkt corpus + """ + tokenizer = load(f"tokenizers/punkt/{language}.pickle") + return tokenizer.tokenize(text) + + +# Standard word tokenizer. +_treebank_word_tokenizer = NLTKWordTokenizer() + + +def word_tokenize(text, language="english", preserve_line=False): + """ + Return a tokenized copy of *text*, + using NLTK's recommended word tokenizer + (currently an improved :class:`.TreebankWordTokenizer` + along with :class:`.PunktSentenceTokenizer` + for the specified language). + + :param text: text to split into words + :type text: str + :param language: the model name in the Punkt corpus + :type language: str + :param preserve_line: A flag to decide whether to sentence tokenize the text or not. + :type preserve_line: bool + """ + sentences = [text] if preserve_line else sent_tokenize(text, language) + return [ + token for sent in sentences for token in _treebank_word_tokenizer.tokenize(sent) + ] diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/regexp.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/regexp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0fa2819509234684c1f12625d381ed4eebade65f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/regexp.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/sonority_sequencing.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/sonority_sequencing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2612ab4dd123df0d6285a7b518491c13ef74b49a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/sonority_sequencing.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/toktok.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/toktok.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..76a70003b1ceff321260d45550ec187c921b4ffa Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/__pycache__/toktok.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/api.py b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/api.py new file mode 100644 index 0000000000000000000000000000000000000000..419ff646cfb89d5f3b63e645b53bedea09a1b479 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/api.py @@ -0,0 +1,83 @@ +# Natural Language Toolkit: Tokenizer Interface +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + +""" +Tokenizer Interface +""" + +from abc import ABC, abstractmethod +from typing import Iterator, List, Tuple + +from nltk.internals import overridden +from nltk.tokenize.util import string_span_tokenize + + +class TokenizerI(ABC): + """ + A processing interface for tokenizing a string. + Subclasses must define ``tokenize()`` or ``tokenize_sents()`` (or both). + """ + + @abstractmethod + def tokenize(self, s: str) -> List[str]: + """ + Return a tokenized copy of *s*. + + :rtype: List[str] + """ + if overridden(self.tokenize_sents): + return self.tokenize_sents([s])[0] + + def span_tokenize(self, s: str) -> Iterator[Tuple[int, int]]: + """ + Identify the tokens using integer offsets ``(start_i, end_i)``, + where ``s[start_i:end_i]`` is the corresponding token. + + :rtype: Iterator[Tuple[int, int]] + """ + raise NotImplementedError() + + def tokenize_sents(self, strings: List[str]) -> List[List[str]]: + """ + Apply ``self.tokenize()`` to each element of ``strings``. I.e.: + + return [self.tokenize(s) for s in strings] + + :rtype: List[List[str]] + """ + return [self.tokenize(s) for s in strings] + + def span_tokenize_sents( + self, strings: List[str] + ) -> Iterator[List[Tuple[int, int]]]: + """ + Apply ``self.span_tokenize()`` to each element of ``strings``. I.e.: + + return [self.span_tokenize(s) for s in strings] + + :yield: List[Tuple[int, int]] + """ + for s in strings: + yield list(self.span_tokenize(s)) + + +class StringTokenizer(TokenizerI): + """A tokenizer that divides a string into substrings by splitting + on the specified string (defined in subclasses). + """ + + @property + @abstractmethod + def _string(self): + raise NotImplementedError + + def tokenize(self, s): + return s.split(self._string) + + def span_tokenize(self, s): + yield from string_span_tokenize(s, self._string) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/casual.py b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/casual.py new file mode 100644 index 0000000000000000000000000000000000000000..d0545abe50530c20903f8aeaa29fbfc55094e70e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/casual.py @@ -0,0 +1,458 @@ +# +# Natural Language Toolkit: Twitter Tokenizer +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Christopher Potts +# Ewan Klein (modifications) +# Pierpaolo Pantone <> (modifications) +# Tom Aarsen <> (modifications) +# URL: +# For license information, see LICENSE.TXT +# + + +""" +Twitter-aware tokenizer, designed to be flexible and easy to adapt to new +domains and tasks. The basic logic is this: + +1. The tuple REGEXPS defines a list of regular expression + strings. + +2. The REGEXPS strings are put, in order, into a compiled + regular expression object called WORD_RE, under the TweetTokenizer + class. + +3. The tokenization is done by WORD_RE.findall(s), where s is the + user-supplied string, inside the tokenize() method of the class + TweetTokenizer. + +4. When instantiating Tokenizer objects, there are several options: + * preserve_case. By default, it is set to True. If it is set to + False, then the tokenizer will downcase everything except for + emoticons. + * reduce_len. By default, it is set to False. It specifies whether + to replace repeated character sequences of length 3 or greater + with sequences of length 3. + * strip_handles. By default, it is set to False. It specifies + whether to remove Twitter handles of text used in the + `tokenize` method. + * match_phone_numbers. By default, it is set to True. It indicates + whether the `tokenize` method should look for phone numbers. +""" + + +###################################################################### + +import html +from typing import List + +import regex # https://github.com/nltk/nltk/issues/2409 + +from nltk.tokenize.api import TokenizerI + +###################################################################### +# The following strings are components in the regular expression +# that is used for tokenizing. It's important that phone_number +# appears first in the final regex (since it can contain whitespace). +# It also could matter that tags comes after emoticons, due to the +# possibility of having text like +# +# <:| and some text >:) +# +# Most importantly, the final element should always be last, since it +# does a last ditch whitespace-based tokenization of whatever is left. + +# ToDo: Update with https://en.wikipedia.org/wiki/List_of_emoticons ? + +# This particular element is used in a couple ways, so we define it +# with a name: +EMOTICONS = r""" + (?: + [<>]? + [:;=8] # eyes + [\-o\*\']? # optional nose + [\)\]\(\[dDpP/\:\}\{@\|\\] # mouth + | + [\)\]\(\[dDpP/\:\}\{@\|\\] # mouth + [\-o\*\']? # optional nose + [:;=8] # eyes + [<>]? + | + {}\[\]]+ # Run of non-space, non-()<>{}[] + | # or + \([^\s()]*?\([^\s()]+\)[^\s()]*?\) # balanced parens, one level deep: (...(...)...) + | + \([^\s]+?\) # balanced parens, non-recursive: (...) + )+ + (?: # End with: + \([^\s()]*?\([^\s()]+\)[^\s()]*?\) # balanced parens, one level deep: (...(...)...) + | + \([^\s]+?\) # balanced parens, non-recursive: (...) + | # or + [^\s`!()\[\]{};:'".,<>?«»“”‘’] # not a space or one of these punct chars + ) + | # OR, the following to match naked domains: + (?: + (?\s]+>""", + # ASCII Arrows + r"""[\-]+>|<[\-]+""", + # Twitter username: + r"""(?:@[\w_]+)""", + # Twitter hashtags: + r"""(?:\#+[\w_]+[\w\'_\-]*[\w_]+)""", + # email addresses + r"""[\w.+-]+@[\w-]+\.(?:[\w-]\.?)+[\w-]""", + # Zero-Width-Joiner and Skin tone modifier emojis + """.(?: + [\U0001F3FB-\U0001F3FF]?(?:\u200d.[\U0001F3FB-\U0001F3FF]?)+ + | + [\U0001F3FB-\U0001F3FF] + )""", + # flags + FLAGS, + # Remaining word types: + r""" + (?:[^\W\d_](?:[^\W\d_]|['\-_])+[^\W\d_]) # Words with apostrophes or dashes. + | + (?:[+\-]?\d+[,/.:-]\d+[+\-]?) # Numbers, including fractions, decimals. + | + (?:[\w_]+) # Words without apostrophes or dashes. + | + (?:\.(?:\s*\.){1,}) # Ellipsis dots. + | + (?:\S) # Everything else that isn't whitespace. + """, +) + +# Take the main components and add a phone regex as the second parameter +REGEXPS_PHONE = (REGEXPS[0], PHONE_REGEX, *REGEXPS[1:]) + +###################################################################### +# TweetTokenizer.WORD_RE and TweetTokenizer.PHONE_WORD_RE represent +# the core tokenizing regexes. They are compiled lazily. + +# WORD_RE performs poorly on these patterns: +HANG_RE = regex.compile(r"([^a-zA-Z0-9])\1{3,}") + +# The emoticon string gets its own regex so that we can preserve case for +# them as needed: +EMOTICON_RE = regex.compile(EMOTICONS, regex.VERBOSE | regex.I | regex.UNICODE) + +# These are for regularizing HTML entities to Unicode: +ENT_RE = regex.compile(r"&(#?(x?))([^&;\s]+);") + +# For stripping away handles from a tweet: +HANDLES_RE = regex.compile( + r"(?>> from nltk.tokenize.casual import _replace_html_entities + >>> _replace_html_entities(b'Price: £100') + 'Price: \\xa3100' + >>> print(_replace_html_entities(b'Price: £100')) + Price: £100 + >>> + """ + + def _convert_entity(match): + entity_body = match.group(3) + if match.group(1): + try: + if match.group(2): + number = int(entity_body, 16) + else: + number = int(entity_body, 10) + # Numeric character references in the 80-9F range are typically + # interpreted by browsers as representing the characters mapped + # to bytes 80-9F in the Windows-1252 encoding. For more info + # see: https://en.wikipedia.org/wiki/ISO/IEC_8859-1#Similar_character_sets + if 0x80 <= number <= 0x9F: + return bytes((number,)).decode("cp1252") + except ValueError: + number = None + else: + if entity_body in keep: + return match.group(0) + number = html.entities.name2codepoint.get(entity_body) + if number is not None: + try: + return chr(number) + except (ValueError, OverflowError): + pass + + return "" if remove_illegal else match.group(0) + + return ENT_RE.sub(_convert_entity, _str_to_unicode(text, encoding)) + + +###################################################################### + + +class TweetTokenizer(TokenizerI): + r""" + Tokenizer for tweets. + + >>> from nltk.tokenize import TweetTokenizer + >>> tknzr = TweetTokenizer() + >>> s0 = "This is a cooool #dummysmiley: :-) :-P <3 and some arrows < > -> <--" + >>> tknzr.tokenize(s0) # doctest: +NORMALIZE_WHITESPACE + ['This', 'is', 'a', 'cooool', '#dummysmiley', ':', ':-)', ':-P', '<3', 'and', 'some', 'arrows', '<', '>', '->', + '<--'] + + Examples using `strip_handles` and `reduce_len parameters`: + + >>> tknzr = TweetTokenizer(strip_handles=True, reduce_len=True) + >>> s1 = '@remy: This is waaaaayyyy too much for you!!!!!!' + >>> tknzr.tokenize(s1) + [':', 'This', 'is', 'waaayyy', 'too', 'much', 'for', 'you', '!', '!', '!'] + """ + + # Values used to lazily compile WORD_RE and PHONE_WORD_RE, + # which are the core tokenizing regexes. + _WORD_RE = None + _PHONE_WORD_RE = None + + ###################################################################### + + def __init__( + self, + preserve_case=True, + reduce_len=False, + strip_handles=False, + match_phone_numbers=True, + ): + """ + Create a `TweetTokenizer` instance with settings for use in the `tokenize` method. + + :param preserve_case: Flag indicating whether to preserve the casing (capitalisation) + of text used in the `tokenize` method. Defaults to True. + :type preserve_case: bool + :param reduce_len: Flag indicating whether to replace repeated character sequences + of length 3 or greater with sequences of length 3. Defaults to False. + :type reduce_len: bool + :param strip_handles: Flag indicating whether to remove Twitter handles of text used + in the `tokenize` method. Defaults to False. + :type strip_handles: bool + :param match_phone_numbers: Flag indicating whether the `tokenize` method should look + for phone numbers. Defaults to True. + :type match_phone_numbers: bool + """ + self.preserve_case = preserve_case + self.reduce_len = reduce_len + self.strip_handles = strip_handles + self.match_phone_numbers = match_phone_numbers + + def tokenize(self, text: str) -> List[str]: + """Tokenize the input text. + + :param text: str + :rtype: list(str) + :return: a tokenized list of strings; joining this list returns\ + the original string if `preserve_case=False`. + """ + # Fix HTML character entities: + text = _replace_html_entities(text) + # Remove username handles + if self.strip_handles: + text = remove_handles(text) + # Normalize word lengthening + if self.reduce_len: + text = reduce_lengthening(text) + # Shorten problematic sequences of characters + safe_text = HANG_RE.sub(r"\1\1\1", text) + # Recognise phone numbers during tokenization + if self.match_phone_numbers: + words = self.PHONE_WORD_RE.findall(safe_text) + else: + words = self.WORD_RE.findall(safe_text) + # Possibly alter the case, but avoid changing emoticons like :D into :d: + if not self.preserve_case: + words = list( + map((lambda x: x if EMOTICON_RE.search(x) else x.lower()), words) + ) + return words + + @property + def WORD_RE(self) -> "regex.Pattern": + """Core TweetTokenizer regex""" + # Compiles the regex for this and all future instantiations of TweetTokenizer. + if not type(self)._WORD_RE: + type(self)._WORD_RE = regex.compile( + f"({'|'.join(REGEXPS)})", + regex.VERBOSE | regex.I | regex.UNICODE, + ) + return type(self)._WORD_RE + + @property + def PHONE_WORD_RE(self) -> "regex.Pattern": + """Secondary core TweetTokenizer regex""" + # Compiles the regex for this and all future instantiations of TweetTokenizer. + if not type(self)._PHONE_WORD_RE: + type(self)._PHONE_WORD_RE = regex.compile( + f"({'|'.join(REGEXPS_PHONE)})", + regex.VERBOSE | regex.I | regex.UNICODE, + ) + return type(self)._PHONE_WORD_RE + + +###################################################################### +# Normalization Functions +###################################################################### + + +def reduce_lengthening(text): + """ + Replace repeated character sequences of length 3 or greater with sequences + of length 3. + """ + pattern = regex.compile(r"(.)\1{2,}") + return pattern.sub(r"\1\1\1", text) + + +def remove_handles(text): + """ + Remove Twitter username handles from text. + """ + # Substitute handles with ' ' to ensure that text on either side of removed handles are tokenized correctly + return HANDLES_RE.sub(" ", text) + + +###################################################################### +# Tokenization Function +###################################################################### + + +def casual_tokenize( + text, + preserve_case=True, + reduce_len=False, + strip_handles=False, + match_phone_numbers=True, +): + """ + Convenience function for wrapping the tokenizer. + """ + return TweetTokenizer( + preserve_case=preserve_case, + reduce_len=reduce_len, + strip_handles=strip_handles, + match_phone_numbers=match_phone_numbers, + ).tokenize(text) + + +############################################################################### diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/legality_principle.py b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/legality_principle.py new file mode 100644 index 0000000000000000000000000000000000000000..547827cefe1af65209e1f44237b7ac160b167920 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/legality_principle.py @@ -0,0 +1,147 @@ +# Natural Language Toolkit: Tokenizers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Christopher Hench +# Alex Estes +# URL: +# For license information, see LICENSE.TXT + +""" +The Legality Principle is a language agnostic principle maintaining that syllable +onsets and codas (the beginning and ends of syllables not including the vowel) +are only legal if they are found as word onsets or codas in the language. The English +word ''admit'' must then be syllabified as ''ad-mit'' since ''dm'' is not found +word-initially in the English language (Bartlett et al.). This principle was first proposed +in Daniel Kahn's 1976 dissertation, ''Syllable-based generalizations in English phonology''. + +Kahn further argues that there is a ''strong tendency to syllabify in such a way that +initial clusters are of maximal length, consistent with the general constraints on +word-initial consonant clusters.'' Consequently, in addition to being legal onsets, +the longest legal onset is preferable---''Onset Maximization''. + +The default implementation assumes an English vowel set, but the `vowels` attribute +can be set to IPA or any other alphabet's vowel set for the use-case. +Both a valid set of vowels as well as a text corpus of words in the language +are necessary to determine legal onsets and subsequently syllabify words. + +The legality principle with onset maximization is a universal syllabification algorithm, +but that does not mean it performs equally across languages. Bartlett et al. (2009) +is a good benchmark for English accuracy if utilizing IPA (pg. 311). + +References: + +- Otto Jespersen. 1904. Lehrbuch der Phonetik. + Leipzig, Teubner. Chapter 13, Silbe, pp. 185-203. +- Theo Vennemann, ''On the Theory of Syllabic Phonology,'' 1972, p. 11. +- Daniel Kahn, ''Syllable-based generalizations in English phonology'', (PhD diss., MIT, 1976). +- Elisabeth Selkirk. 1984. On the major class features and syllable theory. + In Aronoff & Oehrle (eds.) Language Sound Structure: Studies in Phonology. + Cambridge, MIT Press. pp. 107-136. +- Jeremy Goslin and Ulrich Frauenfelder. 2001. A comparison of theoretical and human syllabification. Language and Speech, 44:409–436. +- Susan Bartlett, et al. 2009. On the Syllabification of Phonemes. + In HLT-NAACL. pp. 308-316. +- Christopher Hench. 2017. Resonances in Middle High German: New Methodologies in Prosody. UC Berkeley. +""" + +from collections import Counter + +from nltk.tokenize.api import TokenizerI + + +class LegalitySyllableTokenizer(TokenizerI): + """ + Syllabifies words based on the Legality Principle and Onset Maximization. + + >>> from nltk.tokenize import LegalitySyllableTokenizer + >>> from nltk import word_tokenize + >>> from nltk.corpus import words + >>> text = "This is a wonderful sentence." + >>> text_words = word_tokenize(text) + >>> LP = LegalitySyllableTokenizer(words.words()) + >>> [LP.tokenize(word) for word in text_words] + [['This'], ['is'], ['a'], ['won', 'der', 'ful'], ['sen', 'ten', 'ce'], ['.']] + """ + + def __init__( + self, tokenized_source_text, vowels="aeiouy", legal_frequency_threshold=0.001 + ): + """ + :param tokenized_source_text: List of valid tokens in the language + :type tokenized_source_text: list(str) + :param vowels: Valid vowels in language or IPA representation + :type vowels: str + :param legal_frequency_threshold: Lowest frequency of all onsets to be considered a legal onset + :type legal_frequency_threshold: float + """ + self.legal_frequency_threshold = legal_frequency_threshold + self.vowels = vowels + self.legal_onsets = self.find_legal_onsets(tokenized_source_text) + + def find_legal_onsets(self, words): + """ + Gathers all onsets and then return only those above the frequency threshold + + :param words: List of words in a language + :type words: list(str) + :return: Set of legal onsets + :rtype: set(str) + """ + onsets = [self.onset(word) for word in words] + legal_onsets = [ + k + for k, v in Counter(onsets).items() + if (v / len(onsets)) > self.legal_frequency_threshold + ] + return set(legal_onsets) + + def onset(self, word): + """ + Returns consonant cluster of word, i.e. all characters until the first vowel. + + :param word: Single word or token + :type word: str + :return: String of characters of onset + :rtype: str + """ + onset = "" + for c in word.lower(): + if c in self.vowels: + return onset + else: + onset += c + return onset + + def tokenize(self, token): + """ + Apply the Legality Principle in combination with + Onset Maximization to return a list of syllables. + + :param token: Single word or token + :type token: str + :return syllable_list: Single word or token broken up into syllables. + :rtype: list(str) + """ + syllables = [] + syllable, current_onset = "", "" + vowel, onset = False, False + for char in token[::-1]: + char_lower = char.lower() + if not vowel: + syllable += char + vowel = bool(char_lower in self.vowels) + else: + if char_lower + current_onset[::-1] in self.legal_onsets: + syllable += char + current_onset += char_lower + onset = True + elif char_lower in self.vowels and not onset: + syllable += char + current_onset += char_lower + else: + syllables.append(syllable) + syllable = char + current_onset = "" + vowel = bool(char_lower in self.vowels) + syllables.append(syllable) + syllables_ordered = [syllable[::-1] for syllable in syllables][::-1] + return syllables_ordered diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/mwe.py b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/mwe.py new file mode 100644 index 0000000000000000000000000000000000000000..c39244c7b1c7a9be96331548150c60ce9aaae8be --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/mwe.py @@ -0,0 +1,124 @@ +# Multi-Word Expression tokenizer +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Rob Malouf +# URL: +# For license information, see LICENSE.TXT + +""" +Multi-Word Expression Tokenizer + +A ``MWETokenizer`` takes a string which has already been divided into tokens and +retokenizes it, merging multi-word expressions into single tokens, using a lexicon +of MWEs: + + + >>> from nltk.tokenize import MWETokenizer + + >>> tokenizer = MWETokenizer([('a', 'little'), ('a', 'little', 'bit'), ('a', 'lot')]) + >>> tokenizer.add_mwe(('in', 'spite', 'of')) + + >>> tokenizer.tokenize('Testing testing testing one two three'.split()) + ['Testing', 'testing', 'testing', 'one', 'two', 'three'] + + >>> tokenizer.tokenize('This is a test in spite'.split()) + ['This', 'is', 'a', 'test', 'in', 'spite'] + + >>> tokenizer.tokenize('In a little or a little bit or a lot in spite of'.split()) + ['In', 'a_little', 'or', 'a_little_bit', 'or', 'a_lot', 'in_spite_of'] + +""" +from nltk.tokenize.api import TokenizerI +from nltk.util import Trie + + +class MWETokenizer(TokenizerI): + """A tokenizer that processes tokenized text and merges multi-word expressions + into single tokens. + """ + + def __init__(self, mwes=None, separator="_"): + """Initialize the multi-word tokenizer with a list of expressions and a + separator + + :type mwes: list(list(str)) + :param mwes: A sequence of multi-word expressions to be merged, where + each MWE is a sequence of strings. + :type separator: str + :param separator: String that should be inserted between words in a multi-word + expression token. (Default is '_') + + """ + if not mwes: + mwes = [] + self._mwes = Trie(mwes) + self._separator = separator + + def add_mwe(self, mwe): + """Add a multi-word expression to the lexicon (stored as a word trie) + + We use ``util.Trie`` to represent the trie. Its form is a dict of dicts. + The key True marks the end of a valid MWE. + + :param mwe: The multi-word expression we're adding into the word trie + :type mwe: tuple(str) or list(str) + + :Example: + + >>> tokenizer = MWETokenizer() + >>> tokenizer.add_mwe(('a', 'b')) + >>> tokenizer.add_mwe(('a', 'b', 'c')) + >>> tokenizer.add_mwe(('a', 'x')) + >>> expected = {'a': {'x': {True: None}, 'b': {True: None, 'c': {True: None}}}} + >>> tokenizer._mwes == expected + True + + """ + self._mwes.insert(mwe) + + def tokenize(self, text): + """ + + :param text: A list containing tokenized text + :type text: list(str) + :return: A list of the tokenized text with multi-words merged together + :rtype: list(str) + + :Example: + + >>> tokenizer = MWETokenizer([('hors', "d'oeuvre")], separator='+') + >>> tokenizer.tokenize("An hors d'oeuvre tonight, sir?".split()) + ['An', "hors+d'oeuvre", 'tonight,', 'sir?'] + + """ + i = 0 + n = len(text) + result = [] + + while i < n: + if text[i] in self._mwes: + # possible MWE match + j = i + trie = self._mwes + last_match = -1 + while j < n and text[j] in trie: # and len(trie[text[j]]) > 0 : + trie = trie[text[j]] + j = j + 1 + if Trie.LEAF in trie: + last_match = j + else: + if last_match > -1: + j = last_match + + if Trie.LEAF in trie or last_match > -1: + # success! + result.append(self._separator.join(text[i:j])) + i = j + else: + # no match, so backtrack + result.append(text[i]) + i += 1 + else: + result.append(text[i]) + i += 1 + return result diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/nist.py b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/nist.py new file mode 100644 index 0000000000000000000000000000000000000000..b9e13dad28b81d91891a838d89bcdf5a0c1ad086 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/nist.py @@ -0,0 +1,179 @@ +# Natural Language Toolkit: Python port of the mteval-v14.pl tokenizer. +# +# Copyright (C) 2001-2015 NLTK Project +# Author: Liling Tan (ported from ftp://jaguar.ncsl.nist.gov/mt/resources/mteval-v14.pl) +# Contributors: Ozan Caglayan, Wiktor Stribizew +# +# URL: +# For license information, see LICENSE.TXT + +""" +This is a NLTK port of the tokenizer used in the NIST BLEU evaluation script, +https://github.com/moses-smt/mosesdecoder/blob/master/scripts/generic/mteval-v14.pl#L926 +which was also ported into Python in +https://github.com/lium-lst/nmtpy/blob/master/nmtpy/metrics/mtevalbleu.py#L162 +""" + + +import io +import re + +from nltk.corpus import perluniprops +from nltk.tokenize.api import TokenizerI +from nltk.tokenize.util import xml_unescape + + +class NISTTokenizer(TokenizerI): + """ + This NIST tokenizer is sentence-based instead of the original + paragraph-based tokenization from mteval-14.pl; The sentence-based + tokenization is consistent with the other tokenizers available in NLTK. + + >>> from nltk.tokenize.nist import NISTTokenizer + >>> nist = NISTTokenizer() + >>> s = "Good muffins cost $3.88 in New York." + >>> expected_lower = [u'good', u'muffins', u'cost', u'$', u'3.88', u'in', u'new', u'york', u'.'] + >>> expected_cased = [u'Good', u'muffins', u'cost', u'$', u'3.88', u'in', u'New', u'York', u'.'] + >>> nist.tokenize(s, lowercase=False) == expected_cased + True + >>> nist.tokenize(s, lowercase=True) == expected_lower # Lowercased. + True + + The international_tokenize() is the preferred function when tokenizing + non-european text, e.g. + + >>> from nltk.tokenize.nist import NISTTokenizer + >>> nist = NISTTokenizer() + + # Input strings. + >>> albb = u'Alibaba Group Holding Limited (Chinese: 阿里巴巴集团控股 有限公司) us a Chinese e-commerce company...' + >>> amz = u'Amazon.com, Inc. (/ˈæməzɒn/) is an American electronic commerce...' + >>> rkt = u'Rakuten, Inc. (楽天株式会社 Rakuten Kabushiki-gaisha) is a Japanese electronic commerce and Internet company based in Tokyo.' + + # Expected tokens. + >>> expected_albb = [u'Alibaba', u'Group', u'Holding', u'Limited', u'(', u'Chinese', u':', u'\u963f\u91cc\u5df4\u5df4\u96c6\u56e2\u63a7\u80a1', u'\u6709\u9650\u516c\u53f8', u')'] + >>> expected_amz = [u'Amazon', u'.', u'com', u',', u'Inc', u'.', u'(', u'/', u'\u02c8\xe6', u'm'] + >>> expected_rkt = [u'Rakuten', u',', u'Inc', u'.', u'(', u'\u697d\u5929\u682a\u5f0f\u4f1a\u793e', u'Rakuten', u'Kabushiki', u'-', u'gaisha'] + + >>> nist.international_tokenize(albb)[:10] == expected_albb + True + >>> nist.international_tokenize(amz)[:10] == expected_amz + True + >>> nist.international_tokenize(rkt)[:10] == expected_rkt + True + + # Doctest for patching issue #1926 + >>> sent = u'this is a foo\u2604sentence.' + >>> expected_sent = [u'this', u'is', u'a', u'foo', u'\u2604', u'sentence', u'.'] + >>> nist.international_tokenize(sent) == expected_sent + True + """ + + # Strip "skipped" tags + STRIP_SKIP = re.compile(""), "" + # Strip end-of-line hyphenation and join lines + STRIP_EOL_HYPHEN = re.compile("\u2028"), " " + # Tokenize punctuation. + PUNCT = re.compile(r"([\{-\~\[-\` -\&\(-\+\:-\@\/])"), " \\1 " + # Tokenize period and comma unless preceded by a digit. + PERIOD_COMMA_PRECEED = re.compile(r"([^0-9])([\.,])"), "\\1 \\2 " + # Tokenize period and comma unless followed by a digit. + PERIOD_COMMA_FOLLOW = re.compile(r"([\.,])([^0-9])"), " \\1 \\2" + # Tokenize dash when preceded by a digit + DASH_PRECEED_DIGIT = re.compile("([0-9])(-)"), "\\1 \\2 " + + LANG_DEPENDENT_REGEXES = [ + PUNCT, + PERIOD_COMMA_PRECEED, + PERIOD_COMMA_FOLLOW, + DASH_PRECEED_DIGIT, + ] + + # Perluniprops characters used in NIST tokenizer. + pup_number = str("".join(set(perluniprops.chars("Number")))) # i.e. \p{N} + pup_punct = str("".join(set(perluniprops.chars("Punctuation")))) # i.e. \p{P} + pup_symbol = str("".join(set(perluniprops.chars("Symbol")))) # i.e. \p{S} + + # Python regexes needs to escape some special symbols, see + # see https://stackoverflow.com/q/45670950/610569 + number_regex = re.sub(r"[]^\\-]", r"\\\g<0>", pup_number) + punct_regex = re.sub(r"[]^\\-]", r"\\\g<0>", pup_punct) + symbol_regex = re.sub(r"[]^\\-]", r"\\\g<0>", pup_symbol) + + # Note: In the original perl implementation, \p{Z} and \p{Zl} were used to + # (i) strip trailing and heading spaces and + # (ii) de-deuplicate spaces. + # In Python, this would do: ' '.join(str.strip().split()) + # Thus, the next two lines were commented out. + # Line_Separator = str(''.join(perluniprops.chars('Line_Separator'))) # i.e. \p{Zl} + # Separator = str(''.join(perluniprops.chars('Separator'))) # i.e. \p{Z} + + # Pads non-ascii strings with space. + NONASCII = re.compile("([\x00-\x7f]+)"), r" \1 " + # Tokenize any punctuation unless followed AND preceded by a digit. + PUNCT_1 = ( + re.compile(f"([{number_regex}])([{punct_regex}])"), + "\\1 \\2 ", + ) + PUNCT_2 = ( + re.compile(f"([{punct_regex}])([{number_regex}])"), + " \\1 \\2", + ) + # Tokenize symbols + SYMBOLS = re.compile(f"([{symbol_regex}])"), " \\1 " + + INTERNATIONAL_REGEXES = [NONASCII, PUNCT_1, PUNCT_2, SYMBOLS] + + def lang_independent_sub(self, text): + """Performs the language independent string substituitions.""" + # It's a strange order of regexes. + # It'll be better to unescape after STRIP_EOL_HYPHEN + # but let's keep it close to the original NIST implementation. + regexp, substitution = self.STRIP_SKIP + text = regexp.sub(substitution, text) + text = xml_unescape(text) + regexp, substitution = self.STRIP_EOL_HYPHEN + text = regexp.sub(substitution, text) + return text + + def tokenize(self, text, lowercase=False, western_lang=True, return_str=False): + text = str(text) + # Language independent regex. + text = self.lang_independent_sub(text) + # Language dependent regex. + if western_lang: + # Pad string with whitespace. + text = " " + text + " " + if lowercase: + text = text.lower() + for regexp, substitution in self.LANG_DEPENDENT_REGEXES: + text = regexp.sub(substitution, text) + # Remove contiguous whitespaces. + text = " ".join(text.split()) + # Finally, strips heading and trailing spaces + # and converts output string into unicode. + text = str(text.strip()) + return text if return_str else text.split() + + def international_tokenize( + self, text, lowercase=False, split_non_ascii=True, return_str=False + ): + text = str(text) + # Different from the 'normal' tokenize(), STRIP_EOL_HYPHEN is applied + # first before unescaping. + regexp, substitution = self.STRIP_SKIP + text = regexp.sub(substitution, text) + regexp, substitution = self.STRIP_EOL_HYPHEN + text = regexp.sub(substitution, text) + text = xml_unescape(text) + + if lowercase: + text = text.lower() + + for regexp, substitution in self.INTERNATIONAL_REGEXES: + text = regexp.sub(substitution, text) + + # Make sure that there's only one space only between words. + # Strip leading and trailing spaces. + text = " ".join(text.strip().split()) + return text if return_str else text.split() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/punkt.py b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/punkt.py new file mode 100644 index 0000000000000000000000000000000000000000..129bd49c270c301d97a44eec5e58d7e19f15cabe --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/punkt.py @@ -0,0 +1,1767 @@ +# Natural Language Toolkit: Punkt sentence tokenizer +# +# Copyright (C) 2001-2023 NLTK Project +# Algorithm: Kiss & Strunk (2006) +# Author: Willy (original Python port) +# Steven Bird (additions) +# Edward Loper (rewrite) +# Joel Nothman (almost rewrite) +# Arthur Darcet (fixes) +# Tom Aarsen <> (tackle ReDoS & performance issues) +# URL: +# For license information, see LICENSE.TXT + +r""" +Punkt Sentence Tokenizer + +This tokenizer divides a text into a list of sentences +by using an unsupervised algorithm to build a model for abbreviation +words, collocations, and words that start sentences. It must be +trained on a large collection of plaintext in the target language +before it can be used. + +The NLTK data package includes a pre-trained Punkt tokenizer for +English. + + >>> import nltk.data + >>> text = ''' + ... Punkt knows that the periods in Mr. Smith and Johann S. Bach + ... do not mark sentence boundaries. And sometimes sentences + ... can start with non-capitalized words. i is a good variable + ... name. + ... ''' + >>> sent_detector = nltk.data.load('tokenizers/punkt/english.pickle') + >>> print('\n-----\n'.join(sent_detector.tokenize(text.strip()))) + Punkt knows that the periods in Mr. Smith and Johann S. Bach + do not mark sentence boundaries. + ----- + And sometimes sentences + can start with non-capitalized words. + ----- + i is a good variable + name. + +(Note that whitespace from the original text, including newlines, is +retained in the output.) + +Punctuation following sentences is also included by default +(from NLTK 3.0 onwards). It can be excluded with the realign_boundaries +flag. + + >>> text = ''' + ... (How does it deal with this parenthesis?) "It should be part of the + ... previous sentence." "(And the same with this one.)" ('And this one!') + ... "('(And (this)) '?)" [(and this. )] + ... ''' + >>> print('\n-----\n'.join( + ... sent_detector.tokenize(text.strip()))) + (How does it deal with this parenthesis?) + ----- + "It should be part of the + previous sentence." + ----- + "(And the same with this one.)" + ----- + ('And this one!') + ----- + "('(And (this)) '?)" + ----- + [(and this. )] + >>> print('\n-----\n'.join( + ... sent_detector.tokenize(text.strip(), realign_boundaries=False))) + (How does it deal with this parenthesis? + ----- + ) "It should be part of the + previous sentence. + ----- + " "(And the same with this one. + ----- + )" ('And this one! + ----- + ') + "('(And (this)) '? + ----- + )" [(and this. + ----- + )] + +However, Punkt is designed to learn parameters (a list of abbreviations, etc.) +unsupervised from a corpus similar to the target domain. The pre-packaged models +may therefore be unsuitable: use ``PunktSentenceTokenizer(text)`` to learn +parameters from the given text. + +:class:`.PunktTrainer` learns parameters such as a list of abbreviations +(without supervision) from portions of text. Using a ``PunktTrainer`` directly +allows for incremental training and modification of the hyper-parameters used +to decide what is considered an abbreviation, etc. + +The algorithm for this tokenizer is described in:: + + Kiss, Tibor and Strunk, Jan (2006): Unsupervised Multilingual Sentence + Boundary Detection. Computational Linguistics 32: 485-525. +""" + +# TODO: Make orthographic heuristic less susceptible to overtraining +# TODO: Frequent sentence starters optionally exclude always-capitalised words +# FIXME: Problem with ending string with e.g. '!!!' -> '!! !' + +import math +import re +import string +from collections import defaultdict +from typing import Any, Dict, Iterator, List, Match, Optional, Tuple, Union + +from nltk.probability import FreqDist +from nltk.tokenize.api import TokenizerI + +###################################################################### +# { Orthographic Context Constants +###################################################################### +# The following constants are used to describe the orthographic +# contexts in which a word can occur. BEG=beginning, MID=middle, +# UNK=unknown, UC=uppercase, LC=lowercase, NC=no case. + +_ORTHO_BEG_UC = 1 << 1 +"""Orthographic context: beginning of a sentence with upper case.""" + +_ORTHO_MID_UC = 1 << 2 +"""Orthographic context: middle of a sentence with upper case.""" + +_ORTHO_UNK_UC = 1 << 3 +"""Orthographic context: unknown position in a sentence with upper case.""" + +_ORTHO_BEG_LC = 1 << 4 +"""Orthographic context: beginning of a sentence with lower case.""" + +_ORTHO_MID_LC = 1 << 5 +"""Orthographic context: middle of a sentence with lower case.""" + +_ORTHO_UNK_LC = 1 << 6 +"""Orthographic context: unknown position in a sentence with lower case.""" + +_ORTHO_UC = _ORTHO_BEG_UC + _ORTHO_MID_UC + _ORTHO_UNK_UC +"""Orthographic context: occurs with upper case.""" + +_ORTHO_LC = _ORTHO_BEG_LC + _ORTHO_MID_LC + _ORTHO_UNK_LC +"""Orthographic context: occurs with lower case.""" + +_ORTHO_MAP = { + ("initial", "upper"): _ORTHO_BEG_UC, + ("internal", "upper"): _ORTHO_MID_UC, + ("unknown", "upper"): _ORTHO_UNK_UC, + ("initial", "lower"): _ORTHO_BEG_LC, + ("internal", "lower"): _ORTHO_MID_LC, + ("unknown", "lower"): _ORTHO_UNK_LC, +} +"""A map from context position and first-letter case to the +appropriate orthographic context flag.""" + +# } (end orthographic context constants) +###################################################################### + +###################################################################### +# { Decision reasons for debugging +###################################################################### + +REASON_DEFAULT_DECISION = "default decision" +REASON_KNOWN_COLLOCATION = "known collocation (both words)" +REASON_ABBR_WITH_ORTHOGRAPHIC_HEURISTIC = "abbreviation + orthographic heuristic" +REASON_ABBR_WITH_SENTENCE_STARTER = "abbreviation + frequent sentence starter" +REASON_INITIAL_WITH_ORTHOGRAPHIC_HEURISTIC = "initial + orthographic heuristic" +REASON_NUMBER_WITH_ORTHOGRAPHIC_HEURISTIC = "initial + orthographic heuristic" +REASON_INITIAL_WITH_SPECIAL_ORTHOGRAPHIC_HEURISTIC = ( + "initial + special orthographic heuristic" +) + + +# } (end decision reasons for debugging) +###################################################################### + +###################################################################### +# { Language-dependent variables +###################################################################### + + +class PunktLanguageVars: + """ + Stores variables, mostly regular expressions, which may be + language-dependent for correct application of the algorithm. + An extension of this class may modify its properties to suit + a language other than English; an instance can then be passed + as an argument to PunktSentenceTokenizer and PunktTrainer + constructors. + """ + + __slots__ = ("_re_period_context", "_re_word_tokenizer") + + def __getstate__(self): + # All modifications to the class are performed by inheritance. + # Non-default parameters to be pickled must be defined in the inherited + # class. + return 1 + + def __setstate__(self, state): + return 1 + + sent_end_chars = (".", "?", "!") + """Characters which are candidates for sentence boundaries""" + + @property + def _re_sent_end_chars(self): + return "[%s]" % re.escape("".join(self.sent_end_chars)) + + internal_punctuation = ",:;" # might want to extend this.. + """sentence internal punctuation, which indicates an abbreviation if + preceded by a period-final token.""" + + re_boundary_realignment = re.compile(r'["\')\]}]+?(?:\s+|(?=--)|$)', re.MULTILINE) + """Used to realign punctuation that should be included in a sentence + although it follows the period (or ?, !).""" + + _re_word_start = r"[^\(\"\`{\[:;&\#\*@\)}\]\-,]" + """Excludes some characters from starting word tokens""" + + @property + def _re_non_word_chars(self): + return r"(?:[)\";}\]\*:@\'\({\[%s])" % re.escape( + "".join(set(self.sent_end_chars) - {"."}) + ) + + """Characters that cannot appear within words""" + + _re_multi_char_punct = r"(?:\-{2,}|\.{2,}|(?:\.\s){2,}\.)" + """Hyphen and ellipsis are multi-character punctuation""" + + _word_tokenize_fmt = r"""( + %(MultiChar)s + | + (?=%(WordStart)s)\S+? # Accept word characters until end is found + (?= # Sequences marking a word's end + \s| # White-space + $| # End-of-string + %(NonWord)s|%(MultiChar)s| # Punctuation + ,(?=$|\s|%(NonWord)s|%(MultiChar)s) # Comma if at end of word + ) + | + \S + )""" + """Format of a regular expression to split punctuation from words, + excluding period.""" + + def _word_tokenizer_re(self): + """Compiles and returns a regular expression for word tokenization""" + try: + return self._re_word_tokenizer + except AttributeError: + self._re_word_tokenizer = re.compile( + self._word_tokenize_fmt + % { + "NonWord": self._re_non_word_chars, + "MultiChar": self._re_multi_char_punct, + "WordStart": self._re_word_start, + }, + re.UNICODE | re.VERBOSE, + ) + return self._re_word_tokenizer + + def word_tokenize(self, s): + """Tokenize a string to split off punctuation other than periods""" + return self._word_tokenizer_re().findall(s) + + _period_context_fmt = r""" + %(SentEndChars)s # a potential sentence ending + (?=(?P + %(NonWord)s # either other punctuation + | + \s+(?P\S+) # or whitespace and some other token + ))""" + """Format of a regular expression to find contexts including possible + sentence boundaries. Matches token which the possible sentence boundary + ends, and matches the following token within a lookahead expression.""" + + def period_context_re(self): + """Compiles and returns a regular expression to find contexts + including possible sentence boundaries.""" + try: + return self._re_period_context + except: + self._re_period_context = re.compile( + self._period_context_fmt + % { + "NonWord": self._re_non_word_chars, + "SentEndChars": self._re_sent_end_chars, + }, + re.UNICODE | re.VERBOSE, + ) + return self._re_period_context + + +_re_non_punct = re.compile(r"[^\W\d]", re.UNICODE) +"""Matches token types that are not merely punctuation. (Types for +numeric tokens are changed to ##number## and hence contain alpha.)""" + + +# } +###################################################################### + + +# //////////////////////////////////////////////////////////// +# { Helper Functions +# //////////////////////////////////////////////////////////// + + +def _pair_iter(iterator): + """ + Yields pairs of tokens from the given iterator such that each input + token will appear as the first element in a yielded tuple. The last + pair will have None as its second element. + """ + iterator = iter(iterator) + try: + prev = next(iterator) + except StopIteration: + return + for el in iterator: + yield (prev, el) + prev = el + yield (prev, None) + + +###################################################################### +# { Punkt Parameters +###################################################################### + + +class PunktParameters: + """Stores data used to perform sentence boundary detection with Punkt.""" + + def __init__(self): + self.abbrev_types = set() + """A set of word types for known abbreviations.""" + + self.collocations = set() + """A set of word type tuples for known common collocations + where the first word ends in a period. E.g., ('S.', 'Bach') + is a common collocation in a text that discusses 'Johann + S. Bach'. These count as negative evidence for sentence + boundaries.""" + + self.sent_starters = set() + """A set of word types for words that often appear at the + beginning of sentences.""" + + self.ortho_context = defaultdict(int) + """A dictionary mapping word types to the set of orthographic + contexts that word type appears in. Contexts are represented + by adding orthographic context flags: ...""" + + def clear_abbrevs(self): + self.abbrev_types = set() + + def clear_collocations(self): + self.collocations = set() + + def clear_sent_starters(self): + self.sent_starters = set() + + def clear_ortho_context(self): + self.ortho_context = defaultdict(int) + + def add_ortho_context(self, typ, flag): + self.ortho_context[typ] |= flag + + def _debug_ortho_context(self, typ): + context = self.ortho_context[typ] + if context & _ORTHO_BEG_UC: + yield "BEG-UC" + if context & _ORTHO_MID_UC: + yield "MID-UC" + if context & _ORTHO_UNK_UC: + yield "UNK-UC" + if context & _ORTHO_BEG_LC: + yield "BEG-LC" + if context & _ORTHO_MID_LC: + yield "MID-LC" + if context & _ORTHO_UNK_LC: + yield "UNK-LC" + + +###################################################################### +# { PunktToken +###################################################################### + + +class PunktToken: + """Stores a token of text with annotations produced during + sentence boundary detection.""" + + _properties = ["parastart", "linestart", "sentbreak", "abbr", "ellipsis"] + __slots__ = ["tok", "type", "period_final"] + _properties + + def __init__(self, tok, **params): + self.tok = tok + self.type = self._get_type(tok) + self.period_final = tok.endswith(".") + + for prop in self._properties: + setattr(self, prop, None) + for k in params: + setattr(self, k, params[k]) + + # //////////////////////////////////////////////////////////// + # { Regular expressions for properties + # //////////////////////////////////////////////////////////// + # Note: [A-Za-z] is approximated by [^\W\d] in the general case. + _RE_ELLIPSIS = re.compile(r"\.\.+$") + _RE_NUMERIC = re.compile(r"^-?[\.,]?\d[\d,\.-]*\.?$") + _RE_INITIAL = re.compile(r"[^\W\d]\.$", re.UNICODE) + _RE_ALPHA = re.compile(r"[^\W\d]+$", re.UNICODE) + + # //////////////////////////////////////////////////////////// + # { Derived properties + # //////////////////////////////////////////////////////////// + + def _get_type(self, tok): + """Returns a case-normalized representation of the token.""" + return self._RE_NUMERIC.sub("##number##", tok.lower()) + + @property + def type_no_period(self): + """ + The type with its final period removed if it has one. + """ + if len(self.type) > 1 and self.type[-1] == ".": + return self.type[:-1] + return self.type + + @property + def type_no_sentperiod(self): + """ + The type with its final period removed if it is marked as a + sentence break. + """ + if self.sentbreak: + return self.type_no_period + return self.type + + @property + def first_upper(self): + """True if the token's first character is uppercase.""" + return self.tok[0].isupper() + + @property + def first_lower(self): + """True if the token's first character is lowercase.""" + return self.tok[0].islower() + + @property + def first_case(self): + if self.first_lower: + return "lower" + if self.first_upper: + return "upper" + return "none" + + @property + def is_ellipsis(self): + """True if the token text is that of an ellipsis.""" + return self._RE_ELLIPSIS.match(self.tok) + + @property + def is_number(self): + """True if the token text is that of a number.""" + return self.type.startswith("##number##") + + @property + def is_initial(self): + """True if the token text is that of an initial.""" + return self._RE_INITIAL.match(self.tok) + + @property + def is_alpha(self): + """True if the token text is all alphabetic.""" + return self._RE_ALPHA.match(self.tok) + + @property + def is_non_punct(self): + """True if the token is either a number or is alphabetic.""" + return _re_non_punct.search(self.type) + + # //////////////////////////////////////////////////////////// + # { String representation + # //////////////////////////////////////////////////////////// + + def __repr__(self): + """ + A string representation of the token that can reproduce it + with eval(), which lists all the token's non-default + annotations. + """ + typestr = " type=%s," % repr(self.type) if self.type != self.tok else "" + + propvals = ", ".join( + f"{p}={repr(getattr(self, p))}" + for p in self._properties + if getattr(self, p) + ) + + return "{}({},{} {})".format( + self.__class__.__name__, + repr(self.tok), + typestr, + propvals, + ) + + def __str__(self): + """ + A string representation akin to that used by Kiss and Strunk. + """ + res = self.tok + if self.abbr: + res += "" + if self.ellipsis: + res += "" + if self.sentbreak: + res += "" + return res + + +###################################################################### +# { Punkt base class +###################################################################### + + +class PunktBaseClass: + """ + Includes common components of PunktTrainer and PunktSentenceTokenizer. + """ + + def __init__(self, lang_vars=None, token_cls=PunktToken, params=None): + if lang_vars is None: + lang_vars = PunktLanguageVars() + if params is None: + params = PunktParameters() + self._params = params + self._lang_vars = lang_vars + self._Token = token_cls + """The collection of parameters that determines the behavior + of the punkt tokenizer.""" + + # //////////////////////////////////////////////////////////// + # { Word tokenization + # //////////////////////////////////////////////////////////// + + def _tokenize_words(self, plaintext): + """ + Divide the given text into tokens, using the punkt word + segmentation regular expression, and generate the resulting list + of tokens augmented as three-tuples with two boolean values for whether + the given token occurs at the start of a paragraph or a new line, + respectively. + """ + parastart = False + for line in plaintext.split("\n"): + if line.strip(): + line_toks = iter(self._lang_vars.word_tokenize(line)) + + try: + tok = next(line_toks) + except StopIteration: + continue + + yield self._Token(tok, parastart=parastart, linestart=True) + parastart = False + + for tok in line_toks: + yield self._Token(tok) + else: + parastart = True + + # //////////////////////////////////////////////////////////// + # { Annotation Procedures + # //////////////////////////////////////////////////////////// + + def _annotate_first_pass( + self, tokens: Iterator[PunktToken] + ) -> Iterator[PunktToken]: + """ + Perform the first pass of annotation, which makes decisions + based purely based on the word type of each word: + + - '?', '!', and '.' are marked as sentence breaks. + - sequences of two or more periods are marked as ellipsis. + - any word ending in '.' that's a known abbreviation is + marked as an abbreviation. + - any other word ending in '.' is marked as a sentence break. + + Return these annotations as a tuple of three sets: + + - sentbreak_toks: The indices of all sentence breaks. + - abbrev_toks: The indices of all abbreviations. + - ellipsis_toks: The indices of all ellipsis marks. + """ + for aug_tok in tokens: + self._first_pass_annotation(aug_tok) + yield aug_tok + + def _first_pass_annotation(self, aug_tok: PunktToken) -> None: + """ + Performs type-based annotation on a single token. + """ + + tok = aug_tok.tok + + if tok in self._lang_vars.sent_end_chars: + aug_tok.sentbreak = True + elif aug_tok.is_ellipsis: + aug_tok.ellipsis = True + elif aug_tok.period_final and not tok.endswith(".."): + if ( + tok[:-1].lower() in self._params.abbrev_types + or tok[:-1].lower().split("-")[-1] in self._params.abbrev_types + ): + + aug_tok.abbr = True + else: + aug_tok.sentbreak = True + + return + + +###################################################################### +# { Punkt Trainer +###################################################################### + + +class PunktTrainer(PunktBaseClass): + """Learns parameters used in Punkt sentence boundary detection.""" + + def __init__( + self, train_text=None, verbose=False, lang_vars=None, token_cls=PunktToken + ): + + PunktBaseClass.__init__(self, lang_vars=lang_vars, token_cls=token_cls) + + self._type_fdist = FreqDist() + """A frequency distribution giving the frequency of each + case-normalized token type in the training data.""" + + self._num_period_toks = 0 + """The number of words ending in period in the training data.""" + + self._collocation_fdist = FreqDist() + """A frequency distribution giving the frequency of all + bigrams in the training data where the first word ends in a + period. Bigrams are encoded as tuples of word types. + Especially common collocations are extracted from this + frequency distribution, and stored in + ``_params``.``collocations ``.""" + + self._sent_starter_fdist = FreqDist() + """A frequency distribution giving the frequency of all words + that occur at the training data at the beginning of a sentence + (after the first pass of annotation). Especially common + sentence starters are extracted from this frequency + distribution, and stored in ``_params.sent_starters``. + """ + + self._sentbreak_count = 0 + """The total number of sentence breaks identified in training, used for + calculating the frequent sentence starter heuristic.""" + + self._finalized = True + """A flag as to whether the training has been finalized by finding + collocations and sentence starters, or whether finalize_training() + still needs to be called.""" + + if train_text: + self.train(train_text, verbose, finalize=True) + + def get_params(self): + """ + Calculates and returns parameters for sentence boundary detection as + derived from training.""" + if not self._finalized: + self.finalize_training() + return self._params + + # //////////////////////////////////////////////////////////// + # { Customization Variables + # //////////////////////////////////////////////////////////// + + ABBREV = 0.3 + """cut-off value whether a 'token' is an abbreviation""" + + IGNORE_ABBREV_PENALTY = False + """allows the disabling of the abbreviation penalty heuristic, which + exponentially disadvantages words that are found at times without a + final period.""" + + ABBREV_BACKOFF = 5 + """upper cut-off for Mikheev's(2002) abbreviation detection algorithm""" + + COLLOCATION = 7.88 + """minimal log-likelihood value that two tokens need to be considered + as a collocation""" + + SENT_STARTER = 30 + """minimal log-likelihood value that a token requires to be considered + as a frequent sentence starter""" + + INCLUDE_ALL_COLLOCS = False + """this includes as potential collocations all word pairs where the first + word ends in a period. It may be useful in corpora where there is a lot + of variation that makes abbreviations like Mr difficult to identify.""" + + INCLUDE_ABBREV_COLLOCS = False + """this includes as potential collocations all word pairs where the first + word is an abbreviation. Such collocations override the orthographic + heuristic, but not the sentence starter heuristic. This is overridden by + INCLUDE_ALL_COLLOCS, and if both are false, only collocations with initials + and ordinals are considered.""" + """""" + + MIN_COLLOC_FREQ = 1 + """this sets a minimum bound on the number of times a bigram needs to + appear before it can be considered a collocation, in addition to log + likelihood statistics. This is useful when INCLUDE_ALL_COLLOCS is True.""" + + # //////////////////////////////////////////////////////////// + # { Training.. + # //////////////////////////////////////////////////////////// + + def train(self, text, verbose=False, finalize=True): + """ + Collects training data from a given text. If finalize is True, it + will determine all the parameters for sentence boundary detection. If + not, this will be delayed until get_params() or finalize_training() is + called. If verbose is True, abbreviations found will be listed. + """ + # Break the text into tokens; record which token indices correspond to + # line starts and paragraph starts; and determine their types. + self._train_tokens(self._tokenize_words(text), verbose) + if finalize: + self.finalize_training(verbose) + + def train_tokens(self, tokens, verbose=False, finalize=True): + """ + Collects training data from a given list of tokens. + """ + self._train_tokens((self._Token(t) for t in tokens), verbose) + if finalize: + self.finalize_training(verbose) + + def _train_tokens(self, tokens, verbose): + self._finalized = False + + # Ensure tokens are a list + tokens = list(tokens) + + # Find the frequency of each case-normalized type. (Don't + # strip off final periods.) Also keep track of the number of + # tokens that end in periods. + for aug_tok in tokens: + self._type_fdist[aug_tok.type] += 1 + if aug_tok.period_final: + self._num_period_toks += 1 + + # Look for new abbreviations, and for types that no longer are + unique_types = self._unique_types(tokens) + for abbr, score, is_add in self._reclassify_abbrev_types(unique_types): + if score >= self.ABBREV: + if is_add: + self._params.abbrev_types.add(abbr) + if verbose: + print(f" Abbreviation: [{score:6.4f}] {abbr}") + else: + if not is_add: + self._params.abbrev_types.remove(abbr) + if verbose: + print(f" Removed abbreviation: [{score:6.4f}] {abbr}") + + # Make a preliminary pass through the document, marking likely + # sentence breaks, abbreviations, and ellipsis tokens. + tokens = list(self._annotate_first_pass(tokens)) + + # Check what contexts each word type can appear in, given the + # case of its first letter. + self._get_orthography_data(tokens) + + # We need total number of sentence breaks to find sentence starters + self._sentbreak_count += self._get_sentbreak_count(tokens) + + # The remaining heuristics relate to pairs of tokens where the first + # ends in a period. + for aug_tok1, aug_tok2 in _pair_iter(tokens): + if not aug_tok1.period_final or not aug_tok2: + continue + + # Is the first token a rare abbreviation? + if self._is_rare_abbrev_type(aug_tok1, aug_tok2): + self._params.abbrev_types.add(aug_tok1.type_no_period) + if verbose: + print(" Rare Abbrev: %s" % aug_tok1.type) + + # Does second token have a high likelihood of starting a sentence? + if self._is_potential_sent_starter(aug_tok2, aug_tok1): + self._sent_starter_fdist[aug_tok2.type] += 1 + + # Is this bigram a potential collocation? + if self._is_potential_collocation(aug_tok1, aug_tok2): + self._collocation_fdist[ + (aug_tok1.type_no_period, aug_tok2.type_no_sentperiod) + ] += 1 + + def _unique_types(self, tokens): + return {aug_tok.type for aug_tok in tokens} + + def finalize_training(self, verbose=False): + """ + Uses data that has been gathered in training to determine likely + collocations and sentence starters. + """ + self._params.clear_sent_starters() + for typ, log_likelihood in self._find_sent_starters(): + self._params.sent_starters.add(typ) + if verbose: + print(f" Sent Starter: [{log_likelihood:6.4f}] {typ!r}") + + self._params.clear_collocations() + for (typ1, typ2), log_likelihood in self._find_collocations(): + self._params.collocations.add((typ1, typ2)) + if verbose: + print(f" Collocation: [{log_likelihood:6.4f}] {typ1!r}+{typ2!r}") + + self._finalized = True + + # //////////////////////////////////////////////////////////// + # { Overhead reduction + # //////////////////////////////////////////////////////////// + + def freq_threshold( + self, ortho_thresh=2, type_thresh=2, colloc_thres=2, sentstart_thresh=2 + ): + """ + Allows memory use to be reduced after much training by removing data + about rare tokens that are unlikely to have a statistical effect with + further training. Entries occurring above the given thresholds will be + retained. + """ + if ortho_thresh > 1: + old_oc = self._params.ortho_context + self._params.clear_ortho_context() + for tok in self._type_fdist: + count = self._type_fdist[tok] + if count >= ortho_thresh: + self._params.ortho_context[tok] = old_oc[tok] + + self._type_fdist = self._freq_threshold(self._type_fdist, type_thresh) + self._collocation_fdist = self._freq_threshold( + self._collocation_fdist, colloc_thres + ) + self._sent_starter_fdist = self._freq_threshold( + self._sent_starter_fdist, sentstart_thresh + ) + + def _freq_threshold(self, fdist, threshold): + """ + Returns a FreqDist containing only data with counts below a given + threshold, as well as a mapping (None -> count_removed). + """ + # We assume that there is more data below the threshold than above it + # and so create a new FreqDist rather than working in place. + res = FreqDist() + num_removed = 0 + for tok in fdist: + count = fdist[tok] + if count < threshold: + num_removed += 1 + else: + res[tok] += count + res[None] += num_removed + return res + + # //////////////////////////////////////////////////////////// + # { Orthographic data + # //////////////////////////////////////////////////////////// + + def _get_orthography_data(self, tokens): + """ + Collect information about whether each token type occurs + with different case patterns (i) overall, (ii) at + sentence-initial positions, and (iii) at sentence-internal + positions. + """ + # 'initial' or 'internal' or 'unknown' + context = "internal" + tokens = list(tokens) + + for aug_tok in tokens: + # If we encounter a paragraph break, then it's a good sign + # that it's a sentence break. But err on the side of + # caution (by not positing a sentence break) if we just + # saw an abbreviation. + if aug_tok.parastart and context != "unknown": + context = "initial" + + # If we're at the beginning of a line, then we can't decide + # between 'internal' and 'initial'. + if aug_tok.linestart and context == "internal": + context = "unknown" + + # Find the case-normalized type of the token. If it's a + # sentence-final token, strip off the period. + typ = aug_tok.type_no_sentperiod + + # Update the orthographic context table. + flag = _ORTHO_MAP.get((context, aug_tok.first_case), 0) + if flag: + self._params.add_ortho_context(typ, flag) + + # Decide whether the next word is at a sentence boundary. + if aug_tok.sentbreak: + if not (aug_tok.is_number or aug_tok.is_initial): + context = "initial" + else: + context = "unknown" + elif aug_tok.ellipsis or aug_tok.abbr: + context = "unknown" + else: + context = "internal" + + # //////////////////////////////////////////////////////////// + # { Abbreviations + # //////////////////////////////////////////////////////////// + + def _reclassify_abbrev_types(self, types): + """ + (Re)classifies each given token if + - it is period-final and not a known abbreviation; or + - it is not period-final and is otherwise a known abbreviation + by checking whether its previous classification still holds according + to the heuristics of section 3. + Yields triples (abbr, score, is_add) where abbr is the type in question, + score is its log-likelihood with penalties applied, and is_add specifies + whether the present type is a candidate for inclusion or exclusion as an + abbreviation, such that: + - (is_add and score >= 0.3) suggests a new abbreviation; and + - (not is_add and score < 0.3) suggests excluding an abbreviation. + """ + # (While one could recalculate abbreviations from all .-final tokens at + # every iteration, in cases requiring efficiency, the number of tokens + # in the present training document will be much less.) + + for typ in types: + # Check some basic conditions, to rule out words that are + # clearly not abbrev_types. + if not _re_non_punct.search(typ) or typ == "##number##": + continue + + if typ.endswith("."): + if typ in self._params.abbrev_types: + continue + typ = typ[:-1] + is_add = True + else: + if typ not in self._params.abbrev_types: + continue + is_add = False + + # Count how many periods & nonperiods are in the + # candidate. + num_periods = typ.count(".") + 1 + num_nonperiods = len(typ) - num_periods + 1 + + # Let be the candidate without the period, and + # be the period. Find a log likelihood ratio that + # indicates whether occurs as a single unit (high + # value of log_likelihood), or as two independent units and + # (low value of log_likelihood). + count_with_period = self._type_fdist[typ + "."] + count_without_period = self._type_fdist[typ] + log_likelihood = self._dunning_log_likelihood( + count_with_period + count_without_period, + self._num_period_toks, + count_with_period, + self._type_fdist.N(), + ) + + # Apply three scaling factors to 'tweak' the basic log + # likelihood ratio: + # F_length: long word -> less likely to be an abbrev + # F_periods: more periods -> more likely to be an abbrev + # F_penalty: penalize occurrences w/o a period + f_length = math.exp(-num_nonperiods) + f_periods = num_periods + f_penalty = int(self.IGNORE_ABBREV_PENALTY) or math.pow( + num_nonperiods, -count_without_period + ) + score = log_likelihood * f_length * f_periods * f_penalty + + yield typ, score, is_add + + def find_abbrev_types(self): + """ + Recalculates abbreviations given type frequencies, despite no prior + determination of abbreviations. + This fails to include abbreviations otherwise found as "rare". + """ + self._params.clear_abbrevs() + tokens = (typ for typ in self._type_fdist if typ and typ.endswith(".")) + for abbr, score, _is_add in self._reclassify_abbrev_types(tokens): + if score >= self.ABBREV: + self._params.abbrev_types.add(abbr) + + # This function combines the work done by the original code's + # functions `count_orthography_context`, `get_orthography_count`, + # and `get_rare_abbreviations`. + def _is_rare_abbrev_type(self, cur_tok, next_tok): + """ + A word type is counted as a rare abbreviation if... + - it's not already marked as an abbreviation + - it occurs fewer than ABBREV_BACKOFF times + - either it is followed by a sentence-internal punctuation + mark, *or* it is followed by a lower-case word that + sometimes appears with upper case, but never occurs with + lower case at the beginning of sentences. + """ + if cur_tok.abbr or not cur_tok.sentbreak: + return False + + # Find the case-normalized type of the token. If it's + # a sentence-final token, strip off the period. + typ = cur_tok.type_no_sentperiod + + # Proceed only if the type hasn't been categorized as an + # abbreviation already, and is sufficiently rare... + count = self._type_fdist[typ] + self._type_fdist[typ[:-1]] + if typ in self._params.abbrev_types or count >= self.ABBREV_BACKOFF: + return False + + # Record this token as an abbreviation if the next + # token is a sentence-internal punctuation mark. + # [XX] :1 or check the whole thing?? + if next_tok.tok[:1] in self._lang_vars.internal_punctuation: + return True + + # Record this type as an abbreviation if the next + # token... (i) starts with a lower case letter, + # (ii) sometimes occurs with an uppercase letter, + # and (iii) never occus with an uppercase letter + # sentence-internally. + # [xx] should the check for (ii) be modified?? + if next_tok.first_lower: + typ2 = next_tok.type_no_sentperiod + typ2ortho_context = self._params.ortho_context[typ2] + if (typ2ortho_context & _ORTHO_BEG_UC) and not ( + typ2ortho_context & _ORTHO_MID_UC + ): + return True + + # //////////////////////////////////////////////////////////// + # { Log Likelihoods + # //////////////////////////////////////////////////////////// + + # helper for _reclassify_abbrev_types: + @staticmethod + def _dunning_log_likelihood(count_a, count_b, count_ab, N): + """ + A function that calculates the modified Dunning log-likelihood + ratio scores for abbreviation candidates. The details of how + this works is available in the paper. + """ + p1 = count_b / N + p2 = 0.99 + + null_hypo = count_ab * math.log(p1) + (count_a - count_ab) * math.log(1.0 - p1) + alt_hypo = count_ab * math.log(p2) + (count_a - count_ab) * math.log(1.0 - p2) + + likelihood = null_hypo - alt_hypo + + return -2.0 * likelihood + + @staticmethod + def _col_log_likelihood(count_a, count_b, count_ab, N): + """ + A function that will just compute log-likelihood estimate, in + the original paper it's described in algorithm 6 and 7. + + This *should* be the original Dunning log-likelihood values, + unlike the previous log_l function where it used modified + Dunning log-likelihood values + """ + p = count_b / N + p1 = count_ab / count_a + try: + p2 = (count_b - count_ab) / (N - count_a) + except ZeroDivisionError: + p2 = 1 + + try: + summand1 = count_ab * math.log(p) + (count_a - count_ab) * math.log(1.0 - p) + except ValueError: + summand1 = 0 + + try: + summand2 = (count_b - count_ab) * math.log(p) + ( + N - count_a - count_b + count_ab + ) * math.log(1.0 - p) + except ValueError: + summand2 = 0 + + if count_a == count_ab or p1 <= 0 or p1 >= 1: + summand3 = 0 + else: + summand3 = count_ab * math.log(p1) + (count_a - count_ab) * math.log( + 1.0 - p1 + ) + + if count_b == count_ab or p2 <= 0 or p2 >= 1: + summand4 = 0 + else: + summand4 = (count_b - count_ab) * math.log(p2) + ( + N - count_a - count_b + count_ab + ) * math.log(1.0 - p2) + + likelihood = summand1 + summand2 - summand3 - summand4 + + return -2.0 * likelihood + + # //////////////////////////////////////////////////////////// + # { Collocation Finder + # //////////////////////////////////////////////////////////// + + def _is_potential_collocation(self, aug_tok1, aug_tok2): + """ + Returns True if the pair of tokens may form a collocation given + log-likelihood statistics. + """ + return ( + ( + self.INCLUDE_ALL_COLLOCS + or (self.INCLUDE_ABBREV_COLLOCS and aug_tok1.abbr) + or (aug_tok1.sentbreak and (aug_tok1.is_number or aug_tok1.is_initial)) + ) + and aug_tok1.is_non_punct + and aug_tok2.is_non_punct + ) + + def _find_collocations(self): + """ + Generates likely collocations and their log-likelihood. + """ + for types in self._collocation_fdist: + try: + typ1, typ2 = types + except TypeError: + # types may be None after calling freq_threshold() + continue + if typ2 in self._params.sent_starters: + continue + + col_count = self._collocation_fdist[types] + typ1_count = self._type_fdist[typ1] + self._type_fdist[typ1 + "."] + typ2_count = self._type_fdist[typ2] + self._type_fdist[typ2 + "."] + if ( + typ1_count > 1 + and typ2_count > 1 + and self.MIN_COLLOC_FREQ < col_count <= min(typ1_count, typ2_count) + ): + + log_likelihood = self._col_log_likelihood( + typ1_count, typ2_count, col_count, self._type_fdist.N() + ) + # Filter out the not-so-collocative + if log_likelihood >= self.COLLOCATION and ( + self._type_fdist.N() / typ1_count > typ2_count / col_count + ): + yield (typ1, typ2), log_likelihood + + # //////////////////////////////////////////////////////////// + # { Sentence-Starter Finder + # //////////////////////////////////////////////////////////// + + def _is_potential_sent_starter(self, cur_tok, prev_tok): + """ + Returns True given a token and the token that precedes it if it + seems clear that the token is beginning a sentence. + """ + # If a token (i) is preceded by a sentece break that is + # not a potential ordinal number or initial, and (ii) is + # alphabetic, then it is a a sentence-starter. + return ( + prev_tok.sentbreak + and not (prev_tok.is_number or prev_tok.is_initial) + and cur_tok.is_alpha + ) + + def _find_sent_starters(self): + """ + Uses collocation heuristics for each candidate token to + determine if it frequently starts sentences. + """ + for typ in self._sent_starter_fdist: + if not typ: + continue + + typ_at_break_count = self._sent_starter_fdist[typ] + typ_count = self._type_fdist[typ] + self._type_fdist[typ + "."] + if typ_count < typ_at_break_count: + # needed after freq_threshold + continue + + log_likelihood = self._col_log_likelihood( + self._sentbreak_count, + typ_count, + typ_at_break_count, + self._type_fdist.N(), + ) + + if ( + log_likelihood >= self.SENT_STARTER + and self._type_fdist.N() / self._sentbreak_count + > typ_count / typ_at_break_count + ): + yield typ, log_likelihood + + def _get_sentbreak_count(self, tokens): + """ + Returns the number of sentence breaks marked in a given set of + augmented tokens. + """ + return sum(1 for aug_tok in tokens if aug_tok.sentbreak) + + +###################################################################### +# { Punkt Sentence Tokenizer +###################################################################### + + +class PunktSentenceTokenizer(PunktBaseClass, TokenizerI): + """ + A sentence tokenizer which uses an unsupervised algorithm to build + a model for abbreviation words, collocations, and words that start + sentences; and then uses that model to find sentence boundaries. + This approach has been shown to work well for many European + languages. + """ + + def __init__( + self, train_text=None, verbose=False, lang_vars=None, token_cls=PunktToken + ): + """ + train_text can either be the sole training text for this sentence + boundary detector, or can be a PunktParameters object. + """ + PunktBaseClass.__init__(self, lang_vars=lang_vars, token_cls=token_cls) + + if train_text: + self._params = self.train(train_text, verbose) + + def train(self, train_text, verbose=False): + """ + Derives parameters from a given training text, or uses the parameters + given. Repeated calls to this method destroy previous parameters. For + incremental training, instantiate a separate PunktTrainer instance. + """ + if not isinstance(train_text, str): + return train_text + return PunktTrainer( + train_text, lang_vars=self._lang_vars, token_cls=self._Token + ).get_params() + + # //////////////////////////////////////////////////////////// + # { Tokenization + # //////////////////////////////////////////////////////////// + + def tokenize(self, text: str, realign_boundaries: bool = True) -> List[str]: + """ + Given a text, returns a list of the sentences in that text. + """ + return list(self.sentences_from_text(text, realign_boundaries)) + + def debug_decisions(self, text: str) -> Iterator[Dict[str, Any]]: + """ + Classifies candidate periods as sentence breaks, yielding a dict for + each that may be used to understand why the decision was made. + + See format_debug_decision() to help make this output readable. + """ + + for match, decision_text in self._match_potential_end_contexts(text): + tokens = self._tokenize_words(decision_text) + tokens = list(self._annotate_first_pass(tokens)) + while tokens and not tokens[0].tok.endswith(self._lang_vars.sent_end_chars): + tokens.pop(0) + yield { + "period_index": match.end() - 1, + "text": decision_text, + "type1": tokens[0].type, + "type2": tokens[1].type, + "type1_in_abbrs": bool(tokens[0].abbr), + "type1_is_initial": bool(tokens[0].is_initial), + "type2_is_sent_starter": tokens[1].type_no_sentperiod + in self._params.sent_starters, + "type2_ortho_heuristic": self._ortho_heuristic(tokens[1]), + "type2_ortho_contexts": set( + self._params._debug_ortho_context(tokens[1].type_no_sentperiod) + ), + "collocation": ( + tokens[0].type_no_sentperiod, + tokens[1].type_no_sentperiod, + ) + in self._params.collocations, + "reason": self._second_pass_annotation(tokens[0], tokens[1]) + or REASON_DEFAULT_DECISION, + "break_decision": tokens[0].sentbreak, + } + + def span_tokenize( + self, text: str, realign_boundaries: bool = True + ) -> Iterator[Tuple[int, int]]: + """ + Given a text, generates (start, end) spans of sentences + in the text. + """ + slices = self._slices_from_text(text) + if realign_boundaries: + slices = self._realign_boundaries(text, slices) + for sentence in slices: + yield (sentence.start, sentence.stop) + + def sentences_from_text( + self, text: str, realign_boundaries: bool = True + ) -> List[str]: + """ + Given a text, generates the sentences in that text by only + testing candidate sentence breaks. If realign_boundaries is + True, includes in the sentence closing punctuation that + follows the period. + """ + return [text[s:e] for s, e in self.span_tokenize(text, realign_boundaries)] + + def _get_last_whitespace_index(self, text: str) -> int: + """ + Given a text, find the index of the *last* occurrence of *any* + whitespace character, i.e. " ", "\n", "\t", "\r", etc. + If none is found, return 0. + """ + for i in range(len(text) - 1, -1, -1): + if text[i] in string.whitespace: + return i + return 0 + + def _match_potential_end_contexts(self, text: str) -> Iterator[Tuple[Match, str]]: + """ + Given a text, find the matches of potential sentence breaks, + alongside the contexts surrounding these sentence breaks. + + Since the fix for the ReDOS discovered in issue #2866, we no longer match + the word before a potential end of sentence token. Instead, we use a separate + regex for this. As a consequence, `finditer`'s desire to find non-overlapping + matches no longer aids us in finding the single longest match. + Where previously, we could use:: + + >>> pst = PunktSentenceTokenizer() + >>> text = "Very bad acting!!! I promise." + >>> list(pst._lang_vars.period_context_re().finditer(text)) # doctest: +SKIP + [] + + Now we have to find the word before (i.e. 'acting') separately, and `finditer` + returns:: + + >>> pst = PunktSentenceTokenizer() + >>> text = "Very bad acting!!! I promise." + >>> list(pst._lang_vars.period_context_re().finditer(text)) # doctest: +NORMALIZE_WHITESPACE + [, + , + ] + + So, we need to find the word before the match from right to left, and then manually remove + the overlaps. That is what this method does:: + + >>> pst = PunktSentenceTokenizer() + >>> text = "Very bad acting!!! I promise." + >>> list(pst._match_potential_end_contexts(text)) + [(, 'acting!!! I')] + + :param text: String of one or more sentences + :type text: str + :return: Generator of match-context tuples. + :rtype: Iterator[Tuple[Match, str]] + """ + previous_slice = slice(0, 0) + previous_match = None + for match in self._lang_vars.period_context_re().finditer(text): + + # Get the slice of the previous word + before_text = text[previous_slice.stop : match.start()] + index_after_last_space = self._get_last_whitespace_index(before_text) + if index_after_last_space: + # + 1 to exclude the space itself + index_after_last_space += previous_slice.stop + 1 + else: + index_after_last_space = previous_slice.start + prev_word_slice = slice(index_after_last_space, match.start()) + + # If the previous slice does not overlap with this slice, then + # we can yield the previous match and slice. If there is an overlap, + # then we do not yield the previous match and slice. + if previous_match and previous_slice.stop <= prev_word_slice.start: + yield ( + previous_match, + text[previous_slice] + + previous_match.group() + + previous_match.group("after_tok"), + ) + previous_match = match + previous_slice = prev_word_slice + + # Yield the last match and context, if it exists + if previous_match: + yield ( + previous_match, + text[previous_slice] + + previous_match.group() + + previous_match.group("after_tok"), + ) + + def _slices_from_text(self, text: str) -> Iterator[slice]: + last_break = 0 + for match, context in self._match_potential_end_contexts(text): + if self.text_contains_sentbreak(context): + yield slice(last_break, match.end()) + if match.group("next_tok"): + # next sentence starts after whitespace + last_break = match.start("next_tok") + else: + # next sentence starts at following punctuation + last_break = match.end() + # The last sentence should not contain trailing whitespace. + yield slice(last_break, len(text.rstrip())) + + def _realign_boundaries( + self, text: str, slices: Iterator[slice] + ) -> Iterator[slice]: + """ + Attempts to realign punctuation that falls after the period but + should otherwise be included in the same sentence. + + For example: "(Sent1.) Sent2." will otherwise be split as:: + + ["(Sent1.", ") Sent1."]. + + This method will produce:: + + ["(Sent1.)", "Sent2."]. + """ + realign = 0 + for sentence1, sentence2 in _pair_iter(slices): + sentence1 = slice(sentence1.start + realign, sentence1.stop) + if not sentence2: + if text[sentence1]: + yield sentence1 + continue + + m = self._lang_vars.re_boundary_realignment.match(text[sentence2]) + if m: + yield slice(sentence1.start, sentence2.start + len(m.group(0).rstrip())) + realign = m.end() + else: + realign = 0 + if text[sentence1]: + yield sentence1 + + def text_contains_sentbreak(self, text: str) -> bool: + """ + Returns True if the given text includes a sentence break. + """ + found = False # used to ignore last token + for tok in self._annotate_tokens(self._tokenize_words(text)): + if found: + return True + if tok.sentbreak: + found = True + return False + + def sentences_from_text_legacy(self, text: str) -> Iterator[str]: + """ + Given a text, generates the sentences in that text. Annotates all + tokens, rather than just those with possible sentence breaks. Should + produce the same results as ``sentences_from_text``. + """ + tokens = self._annotate_tokens(self._tokenize_words(text)) + return self._build_sentence_list(text, tokens) + + def sentences_from_tokens( + self, tokens: Iterator[PunktToken] + ) -> Iterator[PunktToken]: + """ + Given a sequence of tokens, generates lists of tokens, each list + corresponding to a sentence. + """ + tokens = iter(self._annotate_tokens(self._Token(t) for t in tokens)) + sentence = [] + for aug_tok in tokens: + sentence.append(aug_tok.tok) + if aug_tok.sentbreak: + yield sentence + sentence = [] + if sentence: + yield sentence + + def _annotate_tokens(self, tokens: Iterator[PunktToken]) -> Iterator[PunktToken]: + """ + Given a set of tokens augmented with markers for line-start and + paragraph-start, returns an iterator through those tokens with full + annotation including predicted sentence breaks. + """ + # Make a preliminary pass through the document, marking likely + # sentence breaks, abbreviations, and ellipsis tokens. + tokens = self._annotate_first_pass(tokens) + + # Make a second pass through the document, using token context + # information to change our preliminary decisions about where + # sentence breaks, abbreviations, and ellipsis occurs. + tokens = self._annotate_second_pass(tokens) + + ## [XX] TESTING + # tokens = list(tokens) + # self.dump(tokens) + + return tokens + + def _build_sentence_list( + self, text: str, tokens: Iterator[PunktToken] + ) -> Iterator[str]: + """ + Given the original text and the list of augmented word tokens, + construct and return a tokenized list of sentence strings. + """ + # Most of the work here is making sure that we put the right + # pieces of whitespace back in all the right places. + + # Our position in the source text, used to keep track of which + # whitespace to add: + pos = 0 + + # A regular expression that finds pieces of whitespace: + white_space_regexp = re.compile(r"\s*") + + sentence = "" + for aug_tok in tokens: + tok = aug_tok.tok + + # Find the whitespace before this token, and update pos. + white_space = white_space_regexp.match(text, pos).group() + pos += len(white_space) + + # Some of the rules used by the punkt word tokenizer + # strip whitespace out of the text, resulting in tokens + # that contain whitespace in the source text. If our + # token doesn't match, see if adding whitespace helps. + # If so, then use the version with whitespace. + if text[pos : pos + len(tok)] != tok: + pat = r"\s*".join(re.escape(c) for c in tok) + m = re.compile(pat).match(text, pos) + if m: + tok = m.group() + + # Move our position pointer to the end of the token. + assert text[pos : pos + len(tok)] == tok + pos += len(tok) + + # Add this token. If it's not at the beginning of the + # sentence, then include any whitespace that separated it + # from the previous token. + if sentence: + sentence += white_space + sentence += tok + + # If we're at a sentence break, then start a new sentence. + if aug_tok.sentbreak: + yield sentence + sentence = "" + + # If the last sentence is empty, discard it. + if sentence: + yield sentence + + # [XX] TESTING + def dump(self, tokens: Iterator[PunktToken]) -> None: + print("writing to /tmp/punkt.new...") + with open("/tmp/punkt.new", "w") as outfile: + for aug_tok in tokens: + if aug_tok.parastart: + outfile.write("\n\n") + elif aug_tok.linestart: + outfile.write("\n") + else: + outfile.write(" ") + + outfile.write(str(aug_tok)) + + # //////////////////////////////////////////////////////////// + # { Customization Variables + # //////////////////////////////////////////////////////////// + + PUNCTUATION = tuple(";:,.!?") + + # //////////////////////////////////////////////////////////// + # { Annotation Procedures + # //////////////////////////////////////////////////////////// + + def _annotate_second_pass( + self, tokens: Iterator[PunktToken] + ) -> Iterator[PunktToken]: + """ + Performs a token-based classification (section 4) over the given + tokens, making use of the orthographic heuristic (4.1.1), collocation + heuristic (4.1.2) and frequent sentence starter heuristic (4.1.3). + """ + for token1, token2 in _pair_iter(tokens): + self._second_pass_annotation(token1, token2) + yield token1 + + def _second_pass_annotation( + self, aug_tok1: PunktToken, aug_tok2: Optional[PunktToken] + ) -> Optional[str]: + """ + Performs token-based classification over a pair of contiguous tokens + updating the first. + """ + # Is it the last token? We can't do anything then. + if not aug_tok2: + return + + if not aug_tok1.period_final: + # We only care about words ending in periods. + return + typ = aug_tok1.type_no_period + next_typ = aug_tok2.type_no_sentperiod + tok_is_initial = aug_tok1.is_initial + + # [4.1.2. Collocation Heuristic] If there's a + # collocation between the word before and after the + # period, then label tok as an abbreviation and NOT + # a sentence break. Note that collocations with + # frequent sentence starters as their second word are + # excluded in training. + if (typ, next_typ) in self._params.collocations: + aug_tok1.sentbreak = False + aug_tok1.abbr = True + return REASON_KNOWN_COLLOCATION + + # [4.2. Token-Based Reclassification of Abbreviations] If + # the token is an abbreviation or an ellipsis, then decide + # whether we should *also* classify it as a sentbreak. + if (aug_tok1.abbr or aug_tok1.ellipsis) and (not tok_is_initial): + # [4.1.1. Orthographic Heuristic] Check if there's + # orthogrpahic evidence about whether the next word + # starts a sentence or not. + is_sent_starter = self._ortho_heuristic(aug_tok2) + if is_sent_starter == True: + aug_tok1.sentbreak = True + return REASON_ABBR_WITH_ORTHOGRAPHIC_HEURISTIC + + # [4.1.3. Frequent Sentence Starter Heruistic] If the + # next word is capitalized, and is a member of the + # frequent-sentence-starters list, then label tok as a + # sentence break. + if aug_tok2.first_upper and next_typ in self._params.sent_starters: + aug_tok1.sentbreak = True + return REASON_ABBR_WITH_SENTENCE_STARTER + + # [4.3. Token-Based Detection of Initials and Ordinals] + # Check if any initials or ordinals tokens that are marked + # as sentbreaks should be reclassified as abbreviations. + if tok_is_initial or typ == "##number##": + + # [4.1.1. Orthographic Heuristic] Check if there's + # orthogrpahic evidence about whether the next word + # starts a sentence or not. + is_sent_starter = self._ortho_heuristic(aug_tok2) + + if is_sent_starter == False: + aug_tok1.sentbreak = False + aug_tok1.abbr = True + if tok_is_initial: + return REASON_INITIAL_WITH_ORTHOGRAPHIC_HEURISTIC + return REASON_NUMBER_WITH_ORTHOGRAPHIC_HEURISTIC + + # Special heuristic for initials: if orthogrpahic + # heuristic is unknown, and next word is always + # capitalized, then mark as abbrev (eg: J. Bach). + if ( + is_sent_starter == "unknown" + and tok_is_initial + and aug_tok2.first_upper + and not (self._params.ortho_context[next_typ] & _ORTHO_LC) + ): + aug_tok1.sentbreak = False + aug_tok1.abbr = True + return REASON_INITIAL_WITH_SPECIAL_ORTHOGRAPHIC_HEURISTIC + + return + + def _ortho_heuristic(self, aug_tok: PunktToken) -> Union[bool, str]: + """ + Decide whether the given token is the first token in a sentence. + """ + # Sentences don't start with punctuation marks: + if aug_tok.tok in self.PUNCTUATION: + return False + + ortho_context = self._params.ortho_context[aug_tok.type_no_sentperiod] + + # If the word is capitalized, occurs at least once with a + # lower case first letter, and never occurs with an upper case + # first letter sentence-internally, then it's a sentence starter. + if ( + aug_tok.first_upper + and (ortho_context & _ORTHO_LC) + and not (ortho_context & _ORTHO_MID_UC) + ): + return True + + # If the word is lower case, and either (a) we've seen it used + # with upper case, or (b) we've never seen it used + # sentence-initially with lower case, then it's not a sentence + # starter. + if aug_tok.first_lower and ( + (ortho_context & _ORTHO_UC) or not (ortho_context & _ORTHO_BEG_LC) + ): + return False + + # Otherwise, we're not sure. + return "unknown" + + +DEBUG_DECISION_FMT = """Text: {text!r} (at offset {period_index}) +Sentence break? {break_decision} ({reason}) +Collocation? {collocation} +{type1!r}: + known abbreviation: {type1_in_abbrs} + is initial: {type1_is_initial} +{type2!r}: + known sentence starter: {type2_is_sent_starter} + orthographic heuristic suggests is a sentence starter? {type2_ortho_heuristic} + orthographic contexts in training: {type2_ortho_contexts} +""" + + +def format_debug_decision(d): + return DEBUG_DECISION_FMT.format(**d) + + +def demo(text, tok_cls=PunktSentenceTokenizer, train_cls=PunktTrainer): + """Builds a punkt model and applies it to the same text""" + cleanup = ( + lambda s: re.compile(r"(?:\r|^\s+)", re.MULTILINE).sub("", s).replace("\n", " ") + ) + trainer = train_cls() + trainer.INCLUDE_ALL_COLLOCS = True + trainer.train(text) + sbd = tok_cls(trainer.get_params()) + for sentence in sbd.sentences_from_text(text): + print(cleanup(sentence)) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/regexp.py b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/regexp.py new file mode 100644 index 0000000000000000000000000000000000000000..e3875b1447ba2843b7e6f186de24b4e67baf8844 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/regexp.py @@ -0,0 +1,220 @@ +# Natural Language Toolkit: Tokenizers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# Trevor Cohn +# URL: +# For license information, see LICENSE.TXT + +r""" +Regular-Expression Tokenizers + +A ``RegexpTokenizer`` splits a string into substrings using a regular expression. +For example, the following tokenizer forms tokens out of alphabetic sequences, +money expressions, and any other non-whitespace sequences: + + >>> from nltk.tokenize import RegexpTokenizer + >>> s = "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\n\nThanks." + >>> tokenizer = RegexpTokenizer(r'\w+|\$[\d\.]+|\S+') + >>> tokenizer.tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$3.88', 'in', 'New', 'York', '.', + 'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.'] + +A ``RegexpTokenizer`` can use its regexp to match delimiters instead: + + >>> tokenizer = RegexpTokenizer(r'\s+', gaps=True) + >>> tokenizer.tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$3.88', 'in', 'New', 'York.', + 'Please', 'buy', 'me', 'two', 'of', 'them.', 'Thanks.'] + +Note that empty tokens are not returned when the delimiter appears at +the start or end of the string. + +The material between the tokens is discarded. For example, +the following tokenizer selects just the capitalized words: + + >>> capword_tokenizer = RegexpTokenizer(r'[A-Z]\w+') + >>> capword_tokenizer.tokenize(s) + ['Good', 'New', 'York', 'Please', 'Thanks'] + +This module contains several subclasses of ``RegexpTokenizer`` +that use pre-defined regular expressions. + + >>> from nltk.tokenize import BlanklineTokenizer + >>> # Uses '\s*\n\s*\n\s*': + >>> BlanklineTokenizer().tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.', + 'Thanks.'] + +All of the regular expression tokenizers are also available as functions: + + >>> from nltk.tokenize import regexp_tokenize, wordpunct_tokenize, blankline_tokenize + >>> regexp_tokenize(s, pattern=r'\w+|\$[\d\.]+|\S+') # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$3.88', 'in', 'New', 'York', '.', + 'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.'] + >>> wordpunct_tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$', '3', '.', '88', 'in', 'New', 'York', + '.', 'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.'] + >>> blankline_tokenize(s) + ['Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.', 'Thanks.'] + +Caution: The function ``regexp_tokenize()`` takes the text as its +first argument, and the regular expression pattern as its second +argument. This differs from the conventions used by Python's +``re`` functions, where the pattern is always the first argument. +(This is for consistency with the other NLTK tokenizers.) +""" + +import re + +from nltk.tokenize.api import TokenizerI +from nltk.tokenize.util import regexp_span_tokenize + + +class RegexpTokenizer(TokenizerI): + r""" + A tokenizer that splits a string using a regular expression, which + matches either the tokens or the separators between tokens. + + >>> tokenizer = RegexpTokenizer(r'\w+|\$[\d\.]+|\S+') + + :type pattern: str + :param pattern: The pattern used to build this tokenizer. + (This pattern must not contain capturing parentheses; + Use non-capturing parentheses, e.g. (?:...), instead) + :type gaps: bool + :param gaps: True if this tokenizer's pattern should be used + to find separators between tokens; False if this + tokenizer's pattern should be used to find the tokens + themselves. + :type discard_empty: bool + :param discard_empty: True if any empty tokens `''` + generated by the tokenizer should be discarded. Empty + tokens can only be generated if `_gaps == True`. + :type flags: int + :param flags: The regexp flags used to compile this + tokenizer's pattern. By default, the following flags are + used: `re.UNICODE | re.MULTILINE | re.DOTALL`. + + """ + + def __init__( + self, + pattern, + gaps=False, + discard_empty=True, + flags=re.UNICODE | re.MULTILINE | re.DOTALL, + ): + # If they gave us a regexp object, extract the pattern. + pattern = getattr(pattern, "pattern", pattern) + + self._pattern = pattern + self._gaps = gaps + self._discard_empty = discard_empty + self._flags = flags + self._regexp = None + + def _check_regexp(self): + if self._regexp is None: + self._regexp = re.compile(self._pattern, self._flags) + + def tokenize(self, text): + self._check_regexp() + # If our regexp matches gaps, use re.split: + if self._gaps: + if self._discard_empty: + return [tok for tok in self._regexp.split(text) if tok] + else: + return self._regexp.split(text) + + # If our regexp matches tokens, use re.findall: + else: + return self._regexp.findall(text) + + def span_tokenize(self, text): + self._check_regexp() + + if self._gaps: + for left, right in regexp_span_tokenize(text, self._regexp): + if not (self._discard_empty and left == right): + yield left, right + else: + for m in re.finditer(self._regexp, text): + yield m.span() + + def __repr__(self): + return "{}(pattern={!r}, gaps={!r}, discard_empty={!r}, flags={!r})".format( + self.__class__.__name__, + self._pattern, + self._gaps, + self._discard_empty, + self._flags, + ) + + +class WhitespaceTokenizer(RegexpTokenizer): + r""" + Tokenize a string on whitespace (space, tab, newline). + In general, users should use the string ``split()`` method instead. + + >>> from nltk.tokenize import WhitespaceTokenizer + >>> s = "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\n\nThanks." + >>> WhitespaceTokenizer().tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$3.88', 'in', 'New', 'York.', + 'Please', 'buy', 'me', 'two', 'of', 'them.', 'Thanks.'] + """ + + def __init__(self): + RegexpTokenizer.__init__(self, r"\s+", gaps=True) + + +class BlanklineTokenizer(RegexpTokenizer): + """ + Tokenize a string, treating any sequence of blank lines as a delimiter. + Blank lines are defined as lines containing no characters, except for + space or tab characters. + """ + + def __init__(self): + RegexpTokenizer.__init__(self, r"\s*\n\s*\n\s*", gaps=True) + + +class WordPunctTokenizer(RegexpTokenizer): + r""" + Tokenize a text into a sequence of alphabetic and + non-alphabetic characters, using the regexp ``\w+|[^\w\s]+``. + + >>> from nltk.tokenize import WordPunctTokenizer + >>> s = "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\n\nThanks." + >>> WordPunctTokenizer().tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$', '3', '.', '88', 'in', 'New', 'York', + '.', 'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.'] + """ + + def __init__(self): + RegexpTokenizer.__init__(self, r"\w+|[^\w\s]+") + + +###################################################################### +# { Tokenization Functions +###################################################################### + + +def regexp_tokenize( + text, + pattern, + gaps=False, + discard_empty=True, + flags=re.UNICODE | re.MULTILINE | re.DOTALL, +): + """ + Return a tokenized copy of *text*. See :class:`.RegexpTokenizer` + for descriptions of the arguments. + """ + tokenizer = RegexpTokenizer(pattern, gaps, discard_empty, flags) + return tokenizer.tokenize(text) + + +blankline_tokenize = BlanklineTokenizer().tokenize +wordpunct_tokenize = WordPunctTokenizer().tokenize diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/sexpr.py b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/sexpr.py new file mode 100644 index 0000000000000000000000000000000000000000..0776642fbd2759c3f37352a97b18d915198cc20c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/sexpr.py @@ -0,0 +1,140 @@ +# Natural Language Toolkit: Tokenizers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Yoav Goldberg +# Steven Bird (minor edits) +# URL: +# For license information, see LICENSE.TXT + +""" +S-Expression Tokenizer + +``SExprTokenizer`` is used to find parenthesized expressions in a +string. In particular, it divides a string into a sequence of +substrings that are either parenthesized expressions (including any +nested parenthesized expressions), or other whitespace-separated +tokens. + + >>> from nltk.tokenize import SExprTokenizer + >>> SExprTokenizer().tokenize('(a b (c d)) e f (g)') + ['(a b (c d))', 'e', 'f', '(g)'] + +By default, `SExprTokenizer` will raise a ``ValueError`` exception if +used to tokenize an expression with non-matching parentheses: + + >>> SExprTokenizer().tokenize('c) d) e (f (g') + Traceback (most recent call last): + ... + ValueError: Un-matched close paren at char 1 + +The ``strict`` argument can be set to False to allow for +non-matching parentheses. Any unmatched close parentheses will be +listed as their own s-expression; and the last partial sexpr with +unmatched open parentheses will be listed as its own sexpr: + + >>> SExprTokenizer(strict=False).tokenize('c) d) e (f (g') + ['c', ')', 'd', ')', 'e', '(f (g'] + +The characters used for open and close parentheses may be customized +using the ``parens`` argument to the `SExprTokenizer` constructor: + + >>> SExprTokenizer(parens='{}').tokenize('{a b {c d}} e f {g}') + ['{a b {c d}}', 'e', 'f', '{g}'] + +The s-expression tokenizer is also available as a function: + + >>> from nltk.tokenize import sexpr_tokenize + >>> sexpr_tokenize('(a b (c d)) e f (g)') + ['(a b (c d))', 'e', 'f', '(g)'] + +""" + +import re + +from nltk.tokenize.api import TokenizerI + + +class SExprTokenizer(TokenizerI): + """ + A tokenizer that divides strings into s-expressions. + An s-expresion can be either: + + - a parenthesized expression, including any nested parenthesized + expressions, or + - a sequence of non-whitespace non-parenthesis characters. + + For example, the string ``(a (b c)) d e (f)`` consists of four + s-expressions: ``(a (b c))``, ``d``, ``e``, and ``(f)``. + + By default, the characters ``(`` and ``)`` are treated as open and + close parentheses, but alternative strings may be specified. + + :param parens: A two-element sequence specifying the open and close parentheses + that should be used to find sexprs. This will typically be either a + two-character string, or a list of two strings. + :type parens: str or list + :param strict: If true, then raise an exception when tokenizing an ill-formed sexpr. + """ + + def __init__(self, parens="()", strict=True): + if len(parens) != 2: + raise ValueError("parens must contain exactly two strings") + self._strict = strict + self._open_paren = parens[0] + self._close_paren = parens[1] + self._paren_regexp = re.compile( + f"{re.escape(parens[0])}|{re.escape(parens[1])}" + ) + + def tokenize(self, text): + """ + Return a list of s-expressions extracted from *text*. + For example: + + >>> SExprTokenizer().tokenize('(a b (c d)) e f (g)') + ['(a b (c d))', 'e', 'f', '(g)'] + + All parentheses are assumed to mark s-expressions. + (No special processing is done to exclude parentheses that occur + inside strings, or following backslash characters.) + + If the given expression contains non-matching parentheses, + then the behavior of the tokenizer depends on the ``strict`` + parameter to the constructor. If ``strict`` is ``True``, then + raise a ``ValueError``. If ``strict`` is ``False``, then any + unmatched close parentheses will be listed as their own + s-expression; and the last partial s-expression with unmatched open + parentheses will be listed as its own s-expression: + + >>> SExprTokenizer(strict=False).tokenize('c) d) e (f (g') + ['c', ')', 'd', ')', 'e', '(f (g'] + + :param text: the string to be tokenized + :type text: str or iter(str) + :rtype: iter(str) + """ + result = [] + pos = 0 + depth = 0 + for m in self._paren_regexp.finditer(text): + paren = m.group() + if depth == 0: + result += text[pos : m.start()].split() + pos = m.start() + if paren == self._open_paren: + depth += 1 + if paren == self._close_paren: + if self._strict and depth == 0: + raise ValueError("Un-matched close paren at char %d" % m.start()) + depth = max(0, depth - 1) + if depth == 0: + result.append(text[pos : m.end()]) + pos = m.end() + if self._strict and depth > 0: + raise ValueError("Un-matched open paren at char %d" % pos) + if pos < len(text): + result.append(text[pos:]) + return result + + +sexpr_tokenize = SExprTokenizer().tokenize diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/sonority_sequencing.py b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/sonority_sequencing.py new file mode 100644 index 0000000000000000000000000000000000000000..24e43caae2dae6e3c76e66704fa9b856a6dc348c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/sonority_sequencing.py @@ -0,0 +1,194 @@ +# Natural Language Toolkit: Tokenizers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Christopher Hench +# Alex Estes +# URL: +# For license information, see LICENSE.TXT + +""" +The Sonority Sequencing Principle (SSP) is a language agnostic algorithm proposed +by Otto Jesperson in 1904. The sonorous quality of a phoneme is judged by the +openness of the lips. Syllable breaks occur before troughs in sonority. For more +on the SSP see Selkirk (1984). + +The default implementation uses the English alphabet, but the `sonority_hiearchy` +can be modified to IPA or any other alphabet for the use-case. The SSP is a +universal syllabification algorithm, but that does not mean it performs equally +across languages. Bartlett et al. (2009) is a good benchmark for English accuracy +if utilizing IPA (pg. 311). + +Importantly, if a custom hierarchy is supplied and vowels span across more than +one level, they should be given separately to the `vowels` class attribute. + +References: + +- Otto Jespersen. 1904. Lehrbuch der Phonetik. + Leipzig, Teubner. Chapter 13, Silbe, pp. 185-203. +- Elisabeth Selkirk. 1984. On the major class features and syllable theory. + In Aronoff & Oehrle (eds.) Language Sound Structure: Studies in Phonology. + Cambridge, MIT Press. pp. 107-136. +- Susan Bartlett, et al. 2009. On the Syllabification of Phonemes. + In HLT-NAACL. pp. 308-316. +""" + +import re +import warnings +from string import punctuation + +from nltk.tokenize.api import TokenizerI +from nltk.util import ngrams + + +class SyllableTokenizer(TokenizerI): + """ + Syllabifies words based on the Sonority Sequencing Principle (SSP). + + >>> from nltk.tokenize import SyllableTokenizer + >>> from nltk import word_tokenize + >>> SSP = SyllableTokenizer() + >>> SSP.tokenize('justification') + ['jus', 'ti', 'fi', 'ca', 'tion'] + >>> text = "This is a foobar-like sentence." + >>> [SSP.tokenize(token) for token in word_tokenize(text)] + [['This'], ['is'], ['a'], ['foo', 'bar', '-', 'li', 'ke'], ['sen', 'ten', 'ce'], ['.']] + """ + + def __init__(self, lang="en", sonority_hierarchy=False): + """ + :param lang: Language parameter, default is English, 'en' + :type lang: str + :param sonority_hierarchy: Sonority hierarchy according to the + Sonority Sequencing Principle. + :type sonority_hierarchy: list(str) + """ + # Sonority hierarchy should be provided in descending order. + # If vowels are spread across multiple levels, they should be + # passed assigned self.vowels var together, otherwise should be + # placed in first index of hierarchy. + if not sonority_hierarchy and lang == "en": + sonority_hierarchy = [ + "aeiouy", # vowels. + "lmnrw", # nasals. + "zvsf", # fricatives. + "bcdgtkpqxhj", # stops. + ] + + self.vowels = sonority_hierarchy[0] + self.phoneme_map = {} + for i, level in enumerate(sonority_hierarchy): + for c in level: + sonority_level = len(sonority_hierarchy) - i + self.phoneme_map[c] = sonority_level + self.phoneme_map[c.upper()] = sonority_level + + def assign_values(self, token): + """ + Assigns each phoneme its value from the sonority hierarchy. + Note: Sentence/text has to be tokenized first. + + :param token: Single word or token + :type token: str + :return: List of tuples, first element is character/phoneme and + second is the soronity value. + :rtype: list(tuple(str, int)) + """ + syllables_values = [] + for c in token: + try: + syllables_values.append((c, self.phoneme_map[c])) + except KeyError: + if c not in "0123456789" and c not in punctuation: + warnings.warn( + "Character not defined in sonority_hierarchy," + " assigning as vowel: '{}'".format(c) + ) + syllables_values.append((c, max(self.phoneme_map.values()))) + if c not in self.vowels: + self.vowels += c + else: # If it's a punctuation or numbers, assign -1. + syllables_values.append((c, -1)) + return syllables_values + + def validate_syllables(self, syllable_list): + """ + Ensures each syllable has at least one vowel. + If the following syllable doesn't have vowel, add it to the current one. + + :param syllable_list: Single word or token broken up into syllables. + :type syllable_list: list(str) + :return: Single word or token broken up into syllables + (with added syllables if necessary) + :rtype: list(str) + """ + valid_syllables = [] + front = "" + vowel_pattern = re.compile("|".join(self.vowels)) + for i, syllable in enumerate(syllable_list): + if syllable in punctuation: + valid_syllables.append(syllable) + continue + if not vowel_pattern.search(syllable): + if len(valid_syllables) == 0: + front += syllable + else: + valid_syllables = valid_syllables[:-1] + [ + valid_syllables[-1] + syllable + ] + else: + if len(valid_syllables) == 0: + valid_syllables.append(front + syllable) + else: + valid_syllables.append(syllable) + + return valid_syllables + + def tokenize(self, token): + """ + Apply the SSP to return a list of syllables. + Note: Sentence/text has to be tokenized first. + + :param token: Single word or token + :type token: str + :return syllable_list: Single word or token broken up into syllables. + :rtype: list(str) + """ + # assign values from hierarchy + syllables_values = self.assign_values(token) + + # if only one vowel return word + if sum(token.count(x) for x in self.vowels) <= 1: + return [token] + + syllable_list = [] + syllable = syllables_values[0][0] # start syllable with first phoneme + for trigram in ngrams(syllables_values, n=3): + phonemes, values = zip(*trigram) + # Sonority of previous, focal and following phoneme + prev_value, focal_value, next_value = values + # Focal phoneme. + focal_phoneme = phonemes[1] + + # These cases trigger syllable break. + if focal_value == -1: # If it's a punctuation, just break. + syllable_list.append(syllable) + syllable_list.append(focal_phoneme) + syllable = "" + elif prev_value >= focal_value == next_value: + syllable += focal_phoneme + syllable_list.append(syllable) + syllable = "" + + elif prev_value > focal_value < next_value: + syllable_list.append(syllable) + syllable = "" + syllable += focal_phoneme + + # no syllable break + else: + syllable += focal_phoneme + + syllable += syllables_values[-1][0] # append last phoneme + syllable_list.append(syllable) + + return self.validate_syllables(syllable_list) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/stanford.py b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/stanford.py new file mode 100644 index 0000000000000000000000000000000000000000..81a2d8584aee1d4c39042af6a150bd41c838ee14 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/stanford.py @@ -0,0 +1,115 @@ +# Natural Language Toolkit: Interface to the Stanford Tokenizer +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Xu +# +# URL: +# For license information, see LICENSE.TXT + +import json +import os +import tempfile +import warnings +from subprocess import PIPE + +from nltk.internals import _java_options, config_java, find_jar, java +from nltk.parse.corenlp import CoreNLPParser +from nltk.tokenize.api import TokenizerI + +_stanford_url = "https://nlp.stanford.edu/software/tokenizer.shtml" + + +class StanfordTokenizer(TokenizerI): + r""" + Interface to the Stanford Tokenizer + + >>> from nltk.tokenize.stanford import StanfordTokenizer + >>> s = "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\nThanks." + >>> StanfordTokenizer().tokenize(s) # doctest: +SKIP + ['Good', 'muffins', 'cost', '$', '3.88', 'in', 'New', 'York', '.', 'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.'] + >>> s = "The colour of the wall is blue." + >>> StanfordTokenizer(options={"americanize": True}).tokenize(s) # doctest: +SKIP + ['The', 'color', 'of', 'the', 'wall', 'is', 'blue', '.'] + """ + + _JAR = "stanford-postagger.jar" + + def __init__( + self, + path_to_jar=None, + encoding="utf8", + options=None, + verbose=False, + java_options="-mx1000m", + ): + # Raise deprecation warning. + warnings.warn( + str( + "\nThe StanfordTokenizer will " + "be deprecated in version 3.2.5.\n" + "Please use \033[91mnltk.parse.corenlp.CoreNLPParser\033[0m instead.'" + ), + DeprecationWarning, + stacklevel=2, + ) + + self._stanford_jar = find_jar( + self._JAR, + path_to_jar, + env_vars=("STANFORD_POSTAGGER",), + searchpath=(), + url=_stanford_url, + verbose=verbose, + ) + + self._encoding = encoding + self.java_options = java_options + + options = {} if options is None else options + self._options_cmd = ",".join(f"{key}={val}" for key, val in options.items()) + + @staticmethod + def _parse_tokenized_output(s): + return s.splitlines() + + def tokenize(self, s): + """ + Use stanford tokenizer's PTBTokenizer to tokenize multiple sentences. + """ + cmd = ["edu.stanford.nlp.process.PTBTokenizer"] + return self._parse_tokenized_output(self._execute(cmd, s)) + + def _execute(self, cmd, input_, verbose=False): + encoding = self._encoding + cmd.extend(["-charset", encoding]) + _options_cmd = self._options_cmd + if _options_cmd: + cmd.extend(["-options", self._options_cmd]) + + default_options = " ".join(_java_options) + + # Configure java. + config_java(options=self.java_options, verbose=verbose) + + # Windows is incompatible with NamedTemporaryFile() without passing in delete=False. + with tempfile.NamedTemporaryFile(mode="wb", delete=False) as input_file: + # Write the actual sentences to the temporary input file + if isinstance(input_, str) and encoding: + input_ = input_.encode(encoding) + input_file.write(input_) + input_file.flush() + + cmd.append(input_file.name) + + # Run the tagger and get the output. + stdout, stderr = java( + cmd, classpath=self._stanford_jar, stdout=PIPE, stderr=PIPE + ) + stdout = stdout.decode(encoding) + + os.unlink(input_file.name) + + # Return java configurations to their default values. + config_java(options=default_options, verbose=False) + + return stdout diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/stanford_segmenter.py b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/stanford_segmenter.py new file mode 100644 index 0000000000000000000000000000000000000000..ff3f16621e3a3c38ee0265e817b04c655856dd70 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/stanford_segmenter.py @@ -0,0 +1,292 @@ +#!/usr/bin/env python +# Natural Language Toolkit: Interface to the Stanford Segmenter +# for Chinese and Arabic +# +# Copyright (C) 2001-2023 NLTK Project +# Author: 52nlp <52nlpcn@gmail.com> +# Casper Lehmann-Strøm +# Alex Constantin +# +# URL: +# For license information, see LICENSE.TXT + +import json +import os +import tempfile +import warnings +from subprocess import PIPE + +from nltk.internals import ( + _java_options, + config_java, + find_dir, + find_file, + find_jar, + java, +) +from nltk.tokenize.api import TokenizerI + +_stanford_url = "https://nlp.stanford.edu/software" + + +class StanfordSegmenter(TokenizerI): + """Interface to the Stanford Segmenter + + If stanford-segmenter version is older than 2016-10-31, then path_to_slf4j + should be provieded, for example:: + + seg = StanfordSegmenter(path_to_slf4j='/YOUR_PATH/slf4j-api.jar') + + >>> from nltk.tokenize.stanford_segmenter import StanfordSegmenter + >>> seg = StanfordSegmenter() # doctest: +SKIP + >>> seg.default_config('zh') # doctest: +SKIP + >>> sent = u'这是斯坦福中文分词器测试' + >>> print(seg.segment(sent)) # doctest: +SKIP + \u8fd9 \u662f \u65af\u5766\u798f \u4e2d\u6587 \u5206\u8bcd\u5668 \u6d4b\u8bd5 + + >>> seg.default_config('ar') # doctest: +SKIP + >>> sent = u'هذا هو تصنيف ستانفورد العربي للكلمات' + >>> print(seg.segment(sent.split())) # doctest: +SKIP + \u0647\u0630\u0627 \u0647\u0648 \u062a\u0635\u0646\u064a\u0641 \u0633\u062a\u0627\u0646\u0641\u0648\u0631\u062f \u0627\u0644\u0639\u0631\u0628\u064a \u0644 \u0627\u0644\u0643\u0644\u0645\u0627\u062a + + """ + + _JAR = "stanford-segmenter.jar" + + def __init__( + self, + path_to_jar=None, + path_to_slf4j=None, + java_class=None, + path_to_model=None, + path_to_dict=None, + path_to_sihan_corpora_dict=None, + sihan_post_processing="false", + keep_whitespaces="false", + encoding="UTF-8", + options=None, + verbose=False, + java_options="-mx2g", + ): + # Raise deprecation warning. + warnings.simplefilter("always", DeprecationWarning) + warnings.warn( + str( + "\nThe StanfordTokenizer will " + "be deprecated in version 3.2.5.\n" + "Please use \033[91mnltk.parse.corenlp.CoreNLPTokenizer\033[0m instead.'" + ), + DeprecationWarning, + stacklevel=2, + ) + warnings.simplefilter("ignore", DeprecationWarning) + + stanford_segmenter = find_jar( + self._JAR, + path_to_jar, + env_vars=("STANFORD_SEGMENTER",), + searchpath=(), + url=_stanford_url, + verbose=verbose, + ) + if path_to_slf4j is not None: + slf4j = find_jar( + "slf4j-api.jar", + path_to_slf4j, + env_vars=("SLF4J", "STANFORD_SEGMENTER"), + searchpath=(), + url=_stanford_url, + verbose=verbose, + ) + else: + slf4j = None + + # This is passed to java as the -cp option, the old version of segmenter needs slf4j. + # The new version of stanford-segmenter-2016-10-31 doesn't need slf4j + self._stanford_jar = os.pathsep.join( + _ for _ in [stanford_segmenter, slf4j] if _ is not None + ) + + self._java_class = java_class + self._model = path_to_model + self._sihan_corpora_dict = path_to_sihan_corpora_dict + self._sihan_post_processing = sihan_post_processing + self._keep_whitespaces = keep_whitespaces + self._dict = path_to_dict + + self._encoding = encoding + self.java_options = java_options + options = {} if options is None else options + self._options_cmd = ",".join( + f"{key}={json.dumps(val)}" for key, val in options.items() + ) + + def default_config(self, lang): + """ + Attempt to initialize Stanford Word Segmenter for the specified language + using the STANFORD_SEGMENTER and STANFORD_MODELS environment variables + """ + + search_path = () + if os.environ.get("STANFORD_SEGMENTER"): + search_path = {os.path.join(os.environ.get("STANFORD_SEGMENTER"), "data")} + + # init for Chinese-specific files + self._dict = None + self._sihan_corpora_dict = None + self._sihan_post_processing = "false" + + if lang == "ar": + self._java_class = ( + "edu.stanford.nlp.international.arabic.process.ArabicSegmenter" + ) + model = "arabic-segmenter-atb+bn+arztrain.ser.gz" + + elif lang == "zh": + self._java_class = "edu.stanford.nlp.ie.crf.CRFClassifier" + model = "pku.gz" + self._sihan_post_processing = "true" + + path_to_dict = "dict-chris6.ser.gz" + try: + self._dict = find_file( + path_to_dict, + searchpath=search_path, + url=_stanford_url, + verbose=False, + env_vars=("STANFORD_MODELS",), + ) + except LookupError as e: + raise LookupError( + "Could not find '%s' (tried using env. " + "variables STANFORD_MODELS and /data/)" + % path_to_dict + ) from e + + sihan_dir = "./data/" + try: + path_to_sihan_dir = find_dir( + sihan_dir, + url=_stanford_url, + verbose=False, + env_vars=("STANFORD_SEGMENTER",), + ) + self._sihan_corpora_dict = os.path.join(path_to_sihan_dir, sihan_dir) + except LookupError as e: + raise LookupError( + "Could not find '%s' (tried using the " + "STANFORD_SEGMENTER environment variable)" % sihan_dir + ) from e + else: + raise LookupError(f"Unsupported language {lang}") + + try: + self._model = find_file( + model, + searchpath=search_path, + url=_stanford_url, + verbose=False, + env_vars=("STANFORD_MODELS", "STANFORD_SEGMENTER"), + ) + except LookupError as e: + raise LookupError( + "Could not find '%s' (tried using env. " + "variables STANFORD_MODELS and /data/)" % model + ) from e + + def tokenize(self, s): + super().tokenize(s) + + def segment_file(self, input_file_path): + """ """ + cmd = [ + self._java_class, + "-loadClassifier", + self._model, + "-keepAllWhitespaces", + self._keep_whitespaces, + "-textFile", + input_file_path, + ] + if self._sihan_corpora_dict is not None: + cmd.extend( + [ + "-serDictionary", + self._dict, + "-sighanCorporaDict", + self._sihan_corpora_dict, + "-sighanPostProcessing", + self._sihan_post_processing, + ] + ) + + stdout = self._execute(cmd) + + return stdout + + def segment(self, tokens): + return self.segment_sents([tokens]) + + def segment_sents(self, sentences): + """ """ + encoding = self._encoding + # Create a temporary input file + _input_fh, self._input_file_path = tempfile.mkstemp(text=True) + + # Write the actural sentences to the temporary input file + _input_fh = os.fdopen(_input_fh, "wb") + _input = "\n".join(" ".join(x) for x in sentences) + if isinstance(_input, str) and encoding: + _input = _input.encode(encoding) + _input_fh.write(_input) + _input_fh.close() + + cmd = [ + self._java_class, + "-loadClassifier", + self._model, + "-keepAllWhitespaces", + self._keep_whitespaces, + "-textFile", + self._input_file_path, + ] + if self._sihan_corpora_dict is not None: + cmd.extend( + [ + "-serDictionary", + self._dict, + "-sighanCorporaDict", + self._sihan_corpora_dict, + "-sighanPostProcessing", + self._sihan_post_processing, + ] + ) + + stdout = self._execute(cmd) + + # Delete the temporary file + os.unlink(self._input_file_path) + + return stdout + + def _execute(self, cmd, verbose=False): + encoding = self._encoding + cmd.extend(["-inputEncoding", encoding]) + _options_cmd = self._options_cmd + if _options_cmd: + cmd.extend(["-options", self._options_cmd]) + + default_options = " ".join(_java_options) + + # Configure java. + config_java(options=self.java_options, verbose=verbose) + + stdout, _stderr = java( + cmd, classpath=self._stanford_jar, stdout=PIPE, stderr=PIPE + ) + stdout = stdout.decode(encoding) + + # Return java configurations to their default values. + config_java(options=default_options, verbose=False) + + return stdout diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/toktok.py b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/toktok.py new file mode 100644 index 0000000000000000000000000000000000000000..4229a7327743ad9788449a82c8d2350b9c8db392 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/toktok.py @@ -0,0 +1,179 @@ +# Natural Language Toolkit: Python port of the tok-tok.pl tokenizer. +# +# Copyright (C) 2001-2015 NLTK Project +# Author: Jon Dehdari +# Contributors: Liling Tan, Selcuk Ayguney, ikegami, Martijn Pieters +# +# URL: +# For license information, see LICENSE.TXT + +""" +The tok-tok tokenizer is a simple, general tokenizer, where the input has one +sentence per line; thus only final period is tokenized. + +Tok-tok has been tested on, and gives reasonably good results for English, +Persian, Russian, Czech, French, German, Vietnamese, Tajik, and a few others. +The input should be in UTF-8 encoding. + +Reference: +Jon Dehdari. 2014. A Neurophysiologically-Inspired Statistical Language +Model (Doctoral dissertation). Columbus, OH, USA: The Ohio State University. +""" + +import re + +from nltk.tokenize.api import TokenizerI + + +class ToktokTokenizer(TokenizerI): + """ + This is a Python port of the tok-tok.pl from + https://github.com/jonsafari/tok-tok/blob/master/tok-tok.pl + + >>> toktok = ToktokTokenizer() + >>> text = u'Is 9.5 or 525,600 my favorite number?' + >>> print(toktok.tokenize(text, return_str=True)) + Is 9.5 or 525,600 my favorite number ? + >>> text = u'The https://github.com/jonsafari/tok-tok/blob/master/tok-tok.pl is a website with/and/or slashes and sort of weird : things' + >>> print(toktok.tokenize(text, return_str=True)) + The https://github.com/jonsafari/tok-tok/blob/master/tok-tok.pl is a website with/and/or slashes and sort of weird : things + >>> text = u'\xa1This, is a sentence with weird\xbb symbols\u2026 appearing everywhere\xbf' + >>> expected = u'\xa1 This , is a sentence with weird \xbb symbols \u2026 appearing everywhere \xbf' + >>> assert toktok.tokenize(text, return_str=True) == expected + >>> toktok.tokenize(text) == [u'\xa1', u'This', u',', u'is', u'a', u'sentence', u'with', u'weird', u'\xbb', u'symbols', u'\u2026', u'appearing', u'everywhere', u'\xbf'] + True + """ + + # Replace non-breaking spaces with normal spaces. + NON_BREAKING = re.compile("\u00A0"), " " + + # Pad some funky punctuation. + FUNKY_PUNCT_1 = re.compile(r'([،;؛¿!"\])}»›”؟¡%٪°±©®।॥…])'), r" \1 " + # Pad more funky punctuation. + FUNKY_PUNCT_2 = re.compile(r"([({\[“‘„‚«‹「『])"), r" \1 " + # Pad En dash and em dash + EN_EM_DASHES = re.compile("([–—])"), r" \1 " + + # Replace problematic character with numeric character reference. + AMPERCENT = re.compile("& "), "& " + TAB = re.compile("\t"), " " + PIPE = re.compile(r"\|"), " | " + + # Pad numbers with commas to keep them from further tokenization. + COMMA_IN_NUM = re.compile(r"(? "something ..." + # "something." -> "something ." + FINAL_PERIOD_1 = re.compile(r"(? "... stuff ." + FINAL_PERIOD_2 = re.compile(r"""(? +# Michael Heilman (re-port from http://www.cis.upenn.edu/~treebank/tokenizer.sed) +# Tom Aarsen <> (modifications) +# +# URL: +# For license information, see LICENSE.TXT + +r""" + +Penn Treebank Tokenizer + +The Treebank tokenizer uses regular expressions to tokenize text as in Penn Treebank. +This implementation is a port of the tokenizer sed script written by Robert McIntyre +and available at http://www.cis.upenn.edu/~treebank/tokenizer.sed. +""" + +import re +import warnings +from typing import Iterator, List, Tuple + +from nltk.tokenize.api import TokenizerI +from nltk.tokenize.destructive import MacIntyreContractions +from nltk.tokenize.util import align_tokens + + +class TreebankWordTokenizer(TokenizerI): + r""" + The Treebank tokenizer uses regular expressions to tokenize text as in Penn Treebank. + + This tokenizer performs the following steps: + + - split standard contractions, e.g. ``don't`` -> ``do n't`` and ``they'll`` -> ``they 'll`` + - treat most punctuation characters as separate tokens + - split off commas and single quotes, when followed by whitespace + - separate periods that appear at the end of line + + >>> from nltk.tokenize import TreebankWordTokenizer + >>> s = '''Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\nThanks.''' + >>> TreebankWordTokenizer().tokenize(s) + ['Good', 'muffins', 'cost', '$', '3.88', 'in', 'New', 'York.', 'Please', 'buy', 'me', 'two', 'of', 'them.', 'Thanks', '.'] + >>> s = "They'll save and invest more." + >>> TreebankWordTokenizer().tokenize(s) + ['They', "'ll", 'save', 'and', 'invest', 'more', '.'] + >>> s = "hi, my name can't hello," + >>> TreebankWordTokenizer().tokenize(s) + ['hi', ',', 'my', 'name', 'ca', "n't", 'hello', ','] + """ + + # starting quotes + STARTING_QUOTES = [ + (re.compile(r"^\""), r"``"), + (re.compile(r"(``)"), r" \1 "), + (re.compile(r"([ \(\[{<])(\"|\'{2})"), r"\1 `` "), + ] + + # punctuation + PUNCTUATION = [ + (re.compile(r"([:,])([^\d])"), r" \1 \2"), + (re.compile(r"([:,])$"), r" \1 "), + (re.compile(r"\.\.\."), r" ... "), + (re.compile(r"[;@#$%&]"), r" \g<0> "), + ( + re.compile(r'([^\.])(\.)([\]\)}>"\']*)\s*$'), + r"\1 \2\3 ", + ), # Handles the final period. + (re.compile(r"[?!]"), r" \g<0> "), + (re.compile(r"([^'])' "), r"\1 ' "), + ] + + # Pads parentheses + PARENS_BRACKETS = (re.compile(r"[\]\[\(\)\{\}\<\>]"), r" \g<0> ") + + # Optionally: Convert parentheses, brackets and converts them to PTB symbols. + CONVERT_PARENTHESES = [ + (re.compile(r"\("), "-LRB-"), + (re.compile(r"\)"), "-RRB-"), + (re.compile(r"\["), "-LSB-"), + (re.compile(r"\]"), "-RSB-"), + (re.compile(r"\{"), "-LCB-"), + (re.compile(r"\}"), "-RCB-"), + ] + + DOUBLE_DASHES = (re.compile(r"--"), r" -- ") + + # ending quotes + ENDING_QUOTES = [ + (re.compile(r"''"), " '' "), + (re.compile(r'"'), " '' "), + (re.compile(r"([^' ])('[sS]|'[mM]|'[dD]|') "), r"\1 \2 "), + (re.compile(r"([^' ])('ll|'LL|'re|'RE|'ve|'VE|n't|N'T) "), r"\1 \2 "), + ] + + # List of contractions adapted from Robert MacIntyre's tokenizer. + _contractions = MacIntyreContractions() + CONTRACTIONS2 = list(map(re.compile, _contractions.CONTRACTIONS2)) + CONTRACTIONS3 = list(map(re.compile, _contractions.CONTRACTIONS3)) + + def tokenize( + self, text: str, convert_parentheses: bool = False, return_str: bool = False + ) -> List[str]: + r"""Return a tokenized copy of `text`. + + >>> from nltk.tokenize import TreebankWordTokenizer + >>> s = '''Good muffins cost $3.88 (roughly 3,36 euros)\nin New York. Please buy me\ntwo of them.\nThanks.''' + >>> TreebankWordTokenizer().tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$', '3.88', '(', 'roughly', '3,36', + 'euros', ')', 'in', 'New', 'York.', 'Please', 'buy', 'me', 'two', + 'of', 'them.', 'Thanks', '.'] + >>> TreebankWordTokenizer().tokenize(s, convert_parentheses=True) # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$', '3.88', '-LRB-', 'roughly', '3,36', + 'euros', '-RRB-', 'in', 'New', 'York.', 'Please', 'buy', 'me', 'two', + 'of', 'them.', 'Thanks', '.'] + + :param text: A string with a sentence or sentences. + :type text: str + :param convert_parentheses: if True, replace parentheses to PTB symbols, + e.g. `(` to `-LRB-`. Defaults to False. + :type convert_parentheses: bool, optional + :param return_str: If True, return tokens as space-separated string, + defaults to False. + :type return_str: bool, optional + :return: List of tokens from `text`. + :rtype: List[str] + """ + if return_str is not False: + warnings.warn( + "Parameter 'return_str' has been deprecated and should no " + "longer be used.", + category=DeprecationWarning, + stacklevel=2, + ) + + for regexp, substitution in self.STARTING_QUOTES: + text = regexp.sub(substitution, text) + + for regexp, substitution in self.PUNCTUATION: + text = regexp.sub(substitution, text) + + # Handles parentheses. + regexp, substitution = self.PARENS_BRACKETS + text = regexp.sub(substitution, text) + # Optionally convert parentheses + if convert_parentheses: + for regexp, substitution in self.CONVERT_PARENTHESES: + text = regexp.sub(substitution, text) + + # Handles double dash. + regexp, substitution = self.DOUBLE_DASHES + text = regexp.sub(substitution, text) + + # add extra space to make things easier + text = " " + text + " " + + for regexp, substitution in self.ENDING_QUOTES: + text = regexp.sub(substitution, text) + + for regexp in self.CONTRACTIONS2: + text = regexp.sub(r" \1 \2 ", text) + for regexp in self.CONTRACTIONS3: + text = regexp.sub(r" \1 \2 ", text) + + # We are not using CONTRACTIONS4 since + # they are also commented out in the SED scripts + # for regexp in self._contractions.CONTRACTIONS4: + # text = regexp.sub(r' \1 \2 \3 ', text) + + return text.split() + + def span_tokenize(self, text: str) -> Iterator[Tuple[int, int]]: + r""" + Returns the spans of the tokens in ``text``. + Uses the post-hoc nltk.tokens.align_tokens to return the offset spans. + + >>> from nltk.tokenize import TreebankWordTokenizer + >>> s = '''Good muffins cost $3.88\nin New (York). Please (buy) me\ntwo of them.\n(Thanks).''' + >>> expected = [(0, 4), (5, 12), (13, 17), (18, 19), (19, 23), + ... (24, 26), (27, 30), (31, 32), (32, 36), (36, 37), (37, 38), + ... (40, 46), (47, 48), (48, 51), (51, 52), (53, 55), (56, 59), + ... (60, 62), (63, 68), (69, 70), (70, 76), (76, 77), (77, 78)] + >>> list(TreebankWordTokenizer().span_tokenize(s)) == expected + True + >>> expected = ['Good', 'muffins', 'cost', '$', '3.88', 'in', + ... 'New', '(', 'York', ')', '.', 'Please', '(', 'buy', ')', + ... 'me', 'two', 'of', 'them.', '(', 'Thanks', ')', '.'] + >>> [s[start:end] for start, end in TreebankWordTokenizer().span_tokenize(s)] == expected + True + + :param text: A string with a sentence or sentences. + :type text: str + :yield: Tuple[int, int] + """ + raw_tokens = self.tokenize(text) + + # Convert converted quotes back to original double quotes + # Do this only if original text contains double quote(s) or double + # single-quotes (because '' might be transformed to `` if it is + # treated as starting quotes). + if ('"' in text) or ("''" in text): + # Find double quotes and converted quotes + matched = [m.group() for m in re.finditer(r"``|'{2}|\"", text)] + + # Replace converted quotes back to double quotes + tokens = [ + matched.pop(0) if tok in ['"', "``", "''"] else tok + for tok in raw_tokens + ] + else: + tokens = raw_tokens + + yield from align_tokens(tokens, text) + + +class TreebankWordDetokenizer(TokenizerI): + r""" + The Treebank detokenizer uses the reverse regex operations corresponding to + the Treebank tokenizer's regexes. + + Note: + + - There're additional assumption mades when undoing the padding of ``[;@#$%&]`` + punctuation symbols that isn't presupposed in the TreebankTokenizer. + - There're additional regexes added in reversing the parentheses tokenization, + such as the ``r'([\]\)\}\>])\s([:;,.])'``, which removes the additional right + padding added to the closing parentheses precedding ``[:;,.]``. + - It's not possible to return the original whitespaces as they were because + there wasn't explicit records of where `'\n'`, `'\t'` or `'\s'` were removed at + the text.split() operation. + + >>> from nltk.tokenize.treebank import TreebankWordTokenizer, TreebankWordDetokenizer + >>> s = '''Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\nThanks.''' + >>> d = TreebankWordDetokenizer() + >>> t = TreebankWordTokenizer() + >>> toks = t.tokenize(s) + >>> d.detokenize(toks) + 'Good muffins cost $3.88 in New York. Please buy me two of them. Thanks.' + + The MXPOST parentheses substitution can be undone using the ``convert_parentheses`` + parameter: + + >>> s = '''Good muffins cost $3.88\nin New (York). Please (buy) me\ntwo of them.\n(Thanks).''' + >>> expected_tokens = ['Good', 'muffins', 'cost', '$', '3.88', 'in', + ... 'New', '-LRB-', 'York', '-RRB-', '.', 'Please', '-LRB-', 'buy', + ... '-RRB-', 'me', 'two', 'of', 'them.', '-LRB-', 'Thanks', '-RRB-', '.'] + >>> expected_tokens == t.tokenize(s, convert_parentheses=True) + True + >>> expected_detoken = 'Good muffins cost $3.88 in New (York). Please (buy) me two of them. (Thanks).' + >>> expected_detoken == d.detokenize(t.tokenize(s, convert_parentheses=True), convert_parentheses=True) + True + + During tokenization it's safe to add more spaces but during detokenization, + simply undoing the padding doesn't really help. + + - During tokenization, left and right pad is added to ``[!?]``, when + detokenizing, only left shift the ``[!?]`` is needed. + Thus ``(re.compile(r'\s([?!])'), r'\g<1>')``. + + - During tokenization ``[:,]`` are left and right padded but when detokenizing, + only left shift is necessary and we keep right pad after comma/colon + if the string after is a non-digit. + Thus ``(re.compile(r'\s([:,])\s([^\d])'), r'\1 \2')``. + + >>> from nltk.tokenize.treebank import TreebankWordDetokenizer + >>> toks = ['hello', ',', 'i', 'ca', "n't", 'feel', 'my', 'feet', '!', 'Help', '!', '!'] + >>> twd = TreebankWordDetokenizer() + >>> twd.detokenize(toks) + "hello, i can't feel my feet! Help!!" + + >>> toks = ['hello', ',', 'i', "can't", 'feel', ';', 'my', 'feet', '!', + ... 'Help', '!', '!', 'He', 'said', ':', 'Help', ',', 'help', '?', '!'] + >>> twd.detokenize(toks) + "hello, i can't feel; my feet! Help!! He said: Help, help?!" + """ + + _contractions = MacIntyreContractions() + CONTRACTIONS2 = [ + re.compile(pattern.replace("(?#X)", r"\s")) + for pattern in _contractions.CONTRACTIONS2 + ] + CONTRACTIONS3 = [ + re.compile(pattern.replace("(?#X)", r"\s")) + for pattern in _contractions.CONTRACTIONS3 + ] + + # ending quotes + ENDING_QUOTES = [ + (re.compile(r"([^' ])\s('ll|'LL|'re|'RE|'ve|'VE|n't|N'T) "), r"\1\2 "), + (re.compile(r"([^' ])\s('[sS]|'[mM]|'[dD]|') "), r"\1\2 "), + (re.compile(r"(\S)\s(\'\')"), r"\1\2"), + ( + re.compile(r"(\'\')\s([.,:)\]>};%])"), + r"\1\2", + ), # Quotes followed by no-left-padded punctuations. + (re.compile(r"''"), '"'), + ] + + # Handles double dashes + DOUBLE_DASHES = (re.compile(r" -- "), r"--") + + # Optionally: Convert parentheses, brackets and converts them from PTB symbols. + CONVERT_PARENTHESES = [ + (re.compile("-LRB-"), "("), + (re.compile("-RRB-"), ")"), + (re.compile("-LSB-"), "["), + (re.compile("-RSB-"), "]"), + (re.compile("-LCB-"), "{"), + (re.compile("-RCB-"), "}"), + ] + + # Undo padding on parentheses. + PARENS_BRACKETS = [ + (re.compile(r"([\[\(\{\<])\s"), r"\g<1>"), + (re.compile(r"\s([\]\)\}\>])"), r"\g<1>"), + (re.compile(r"([\]\)\}\>])\s([:;,.])"), r"\1\2"), + ] + + # punctuation + PUNCTUATION = [ + (re.compile(r"([^'])\s'\s"), r"\1' "), + (re.compile(r"\s([?!])"), r"\g<1>"), # Strip left pad for [?!] + # (re.compile(r'\s([?!])\s'), r'\g<1>'), + (re.compile(r'([^\.])\s(\.)([\]\)}>"\']*)\s*$'), r"\1\2\3"), + # When tokenizing, [;@#$%&] are padded with whitespace regardless of + # whether there are spaces before or after them. + # But during detokenization, we need to distinguish between left/right + # pad, so we split this up. + (re.compile(r"([#$])\s"), r"\g<1>"), # Left pad. + (re.compile(r"\s([;%])"), r"\g<1>"), # Right pad. + # (re.compile(r"\s([&*])\s"), r" \g<1> "), # Unknown pad. + (re.compile(r"\s\.\.\.\s"), r"..."), + # (re.compile(r"\s([:,])\s$"), r"\1"), # .strip() takes care of it. + ( + re.compile(r"\s([:,])"), + r"\1", + ), # Just remove left padding. Punctuation in numbers won't be padded. + ] + + # starting quotes + STARTING_QUOTES = [ + (re.compile(r"([ (\[{<])\s``"), r"\1``"), + (re.compile(r"(``)\s"), r"\1"), + (re.compile(r"``"), r'"'), + ] + + def tokenize(self, tokens: List[str], convert_parentheses: bool = False) -> str: + """ + Treebank detokenizer, created by undoing the regexes from + the TreebankWordTokenizer.tokenize. + + :param tokens: A list of strings, i.e. tokenized text. + :type tokens: List[str] + :param convert_parentheses: if True, replace PTB symbols with parentheses, + e.g. `-LRB-` to `(`. Defaults to False. + :type convert_parentheses: bool, optional + :return: str + """ + text = " ".join(tokens) + + # Add extra space to make things easier + text = " " + text + " " + + # Reverse the contractions regexes. + # Note: CONTRACTIONS4 are not used in tokenization. + for regexp in self.CONTRACTIONS3: + text = regexp.sub(r"\1\2", text) + for regexp in self.CONTRACTIONS2: + text = regexp.sub(r"\1\2", text) + + # Reverse the regexes applied for ending quotes. + for regexp, substitution in self.ENDING_QUOTES: + text = regexp.sub(substitution, text) + + # Undo the space padding. + text = text.strip() + + # Reverse the padding on double dashes. + regexp, substitution = self.DOUBLE_DASHES + text = regexp.sub(substitution, text) + + if convert_parentheses: + for regexp, substitution in self.CONVERT_PARENTHESES: + text = regexp.sub(substitution, text) + + # Reverse the padding regexes applied for parenthesis/brackets. + for regexp, substitution in self.PARENS_BRACKETS: + text = regexp.sub(substitution, text) + + # Reverse the regexes applied for punctuations. + for regexp, substitution in self.PUNCTUATION: + text = regexp.sub(substitution, text) + + # Reverse the regexes applied for starting quotes. + for regexp, substitution in self.STARTING_QUOTES: + text = regexp.sub(substitution, text) + + return text.strip() + + def detokenize(self, tokens: List[str], convert_parentheses: bool = False) -> str: + """Duck-typing the abstract *tokenize()*.""" + return self.tokenize(tokens, convert_parentheses) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/util.py b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/util.py new file mode 100644 index 0000000000000000000000000000000000000000..e496e0169aa89569b8f0428096b972d4776a0b2e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tokenize/util.py @@ -0,0 +1,295 @@ +# Natural Language Toolkit: Tokenizer Utilities +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# URL: +# For license information, see LICENSE.TXT + +from re import finditer +from xml.sax.saxutils import escape, unescape + + +def string_span_tokenize(s, sep): + r""" + Return the offsets of the tokens in *s*, as a sequence of ``(start, end)`` + tuples, by splitting the string at each occurrence of *sep*. + + >>> from nltk.tokenize.util import string_span_tokenize + >>> s = '''Good muffins cost $3.88\nin New York. Please buy me + ... two of them.\n\nThanks.''' + >>> list(string_span_tokenize(s, " ")) # doctest: +NORMALIZE_WHITESPACE + [(0, 4), (5, 12), (13, 17), (18, 26), (27, 30), (31, 36), (37, 37), + (38, 44), (45, 48), (49, 55), (56, 58), (59, 73)] + + :param s: the string to be tokenized + :type s: str + :param sep: the token separator + :type sep: str + :rtype: iter(tuple(int, int)) + """ + if len(sep) == 0: + raise ValueError("Token delimiter must not be empty") + left = 0 + while True: + try: + right = s.index(sep, left) + if right != 0: + yield left, right + except ValueError: + if left != len(s): + yield left, len(s) + break + + left = right + len(sep) + + +def regexp_span_tokenize(s, regexp): + r""" + Return the offsets of the tokens in *s*, as a sequence of ``(start, end)`` + tuples, by splitting the string at each successive match of *regexp*. + + >>> from nltk.tokenize.util import regexp_span_tokenize + >>> s = '''Good muffins cost $3.88\nin New York. Please buy me + ... two of them.\n\nThanks.''' + >>> list(regexp_span_tokenize(s, r'\s')) # doctest: +NORMALIZE_WHITESPACE + [(0, 4), (5, 12), (13, 17), (18, 23), (24, 26), (27, 30), (31, 36), + (38, 44), (45, 48), (49, 51), (52, 55), (56, 58), (59, 64), (66, 73)] + + :param s: the string to be tokenized + :type s: str + :param regexp: regular expression that matches token separators (must not be empty) + :type regexp: str + :rtype: iter(tuple(int, int)) + """ + left = 0 + for m in finditer(regexp, s): + right, next = m.span() + if right != left: + yield left, right + left = next + yield left, len(s) + + +def spans_to_relative(spans): + r""" + Return a sequence of relative spans, given a sequence of spans. + + >>> from nltk.tokenize import WhitespaceTokenizer + >>> from nltk.tokenize.util import spans_to_relative + >>> s = '''Good muffins cost $3.88\nin New York. Please buy me + ... two of them.\n\nThanks.''' + >>> list(spans_to_relative(WhitespaceTokenizer().span_tokenize(s))) # doctest: +NORMALIZE_WHITESPACE + [(0, 4), (1, 7), (1, 4), (1, 5), (1, 2), (1, 3), (1, 5), (2, 6), + (1, 3), (1, 2), (1, 3), (1, 2), (1, 5), (2, 7)] + + :param spans: a sequence of (start, end) offsets of the tokens + :type spans: iter(tuple(int, int)) + :rtype: iter(tuple(int, int)) + """ + prev = 0 + for left, right in spans: + yield left - prev, right - left + prev = right + + +class CJKChars: + """ + An object that enumerates the code points of the CJK characters as listed on + https://en.wikipedia.org/wiki/Basic_Multilingual_Plane#Basic_Multilingual_Plane + + This is a Python port of the CJK code point enumerations of Moses tokenizer: + https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/detokenizer.perl#L309 + """ + + # Hangul Jamo (1100–11FF) + Hangul_Jamo = (4352, 4607) # (ord(u"\u1100"), ord(u"\u11ff")) + + # CJK Radicals Supplement (2E80–2EFF) + # Kangxi Radicals (2F00–2FDF) + # Ideographic Description Characters (2FF0–2FFF) + # CJK Symbols and Punctuation (3000–303F) + # Hiragana (3040–309F) + # Katakana (30A0–30FF) + # Bopomofo (3100–312F) + # Hangul Compatibility Jamo (3130–318F) + # Kanbun (3190–319F) + # Bopomofo Extended (31A0–31BF) + # CJK Strokes (31C0–31EF) + # Katakana Phonetic Extensions (31F0–31FF) + # Enclosed CJK Letters and Months (3200–32FF) + # CJK Compatibility (3300–33FF) + # CJK Unified Ideographs Extension A (3400–4DBF) + # Yijing Hexagram Symbols (4DC0–4DFF) + # CJK Unified Ideographs (4E00–9FFF) + # Yi Syllables (A000–A48F) + # Yi Radicals (A490–A4CF) + CJK_Radicals = (11904, 42191) # (ord(u"\u2e80"), ord(u"\ua4cf")) + + # Phags-pa (A840–A87F) + Phags_Pa = (43072, 43135) # (ord(u"\ua840"), ord(u"\ua87f")) + + # Hangul Syllables (AC00–D7AF) + Hangul_Syllables = (44032, 55215) # (ord(u"\uAC00"), ord(u"\uD7AF")) + + # CJK Compatibility Ideographs (F900–FAFF) + CJK_Compatibility_Ideographs = (63744, 64255) # (ord(u"\uF900"), ord(u"\uFAFF")) + + # CJK Compatibility Forms (FE30–FE4F) + CJK_Compatibility_Forms = (65072, 65103) # (ord(u"\uFE30"), ord(u"\uFE4F")) + + # Range U+FF65–FFDC encodes halfwidth forms, of Katakana and Hangul characters + Katakana_Hangul_Halfwidth = (65381, 65500) # (ord(u"\uFF65"), ord(u"\uFFDC")) + + # Supplementary Ideographic Plane 20000–2FFFF + Supplementary_Ideographic_Plane = ( + 131072, + 196607, + ) # (ord(u"\U00020000"), ord(u"\U0002FFFF")) + + ranges = [ + Hangul_Jamo, + CJK_Radicals, + Phags_Pa, + Hangul_Syllables, + CJK_Compatibility_Ideographs, + CJK_Compatibility_Forms, + Katakana_Hangul_Halfwidth, + Supplementary_Ideographic_Plane, + ] + + +def is_cjk(character): + """ + Python port of Moses' code to check for CJK character. + + >>> CJKChars().ranges + [(4352, 4607), (11904, 42191), (43072, 43135), (44032, 55215), (63744, 64255), (65072, 65103), (65381, 65500), (131072, 196607)] + >>> is_cjk(u'\u33fe') + True + >>> is_cjk(u'\uFE5F') + False + + :param character: The character that needs to be checked. + :type character: char + :return: bool + """ + return any( + [ + start <= ord(character) <= end + for start, end in [ + (4352, 4607), + (11904, 42191), + (43072, 43135), + (44032, 55215), + (63744, 64255), + (65072, 65103), + (65381, 65500), + (131072, 196607), + ] + ] + ) + + +def xml_escape(text): + """ + This function transforms the input text into an "escaped" version suitable + for well-formed XML formatting. + + Note that the default xml.sax.saxutils.escape() function don't escape + some characters that Moses does so we have to manually add them to the + entities dictionary. + + >>> input_str = ''')| & < > ' " ] [''' + >>> expected_output = ''')| & < > ' " ] [''' + >>> escape(input_str) == expected_output + True + >>> xml_escape(input_str) + ')| & < > ' " ] [' + + :param text: The text that needs to be escaped. + :type text: str + :rtype: str + """ + return escape( + text, + entities={ + r"'": r"'", + r'"': r""", + r"|": r"|", + r"[": r"[", + r"]": r"]", + }, + ) + + +def xml_unescape(text): + """ + This function transforms the "escaped" version suitable + for well-formed XML formatting into humanly-readable string. + + Note that the default xml.sax.saxutils.unescape() function don't unescape + some characters that Moses does so we have to manually add them to the + entities dictionary. + + >>> from xml.sax.saxutils import unescape + >>> s = ')| & < > ' " ] [' + >>> expected = ''')| & < > \' " ] [''' + >>> xml_unescape(s) == expected + True + + :param text: The text that needs to be unescaped. + :type text: str + :rtype: str + """ + return unescape( + text, + entities={ + r"'": r"'", + r""": r'"', + r"|": r"|", + r"[": r"[", + r"]": r"]", + }, + ) + + +def align_tokens(tokens, sentence): + """ + This module attempt to find the offsets of the tokens in *s*, as a sequence + of ``(start, end)`` tuples, given the tokens and also the source string. + + >>> from nltk.tokenize import TreebankWordTokenizer + >>> from nltk.tokenize.util import align_tokens + >>> s = str("The plane, bound for St Petersburg, crashed in Egypt's " + ... "Sinai desert just 23 minutes after take-off from Sharm el-Sheikh " + ... "on Saturday.") + >>> tokens = TreebankWordTokenizer().tokenize(s) + >>> expected = [(0, 3), (4, 9), (9, 10), (11, 16), (17, 20), (21, 23), + ... (24, 34), (34, 35), (36, 43), (44, 46), (47, 52), (52, 54), + ... (55, 60), (61, 67), (68, 72), (73, 75), (76, 83), (84, 89), + ... (90, 98), (99, 103), (104, 109), (110, 119), (120, 122), + ... (123, 131), (131, 132)] + >>> output = list(align_tokens(tokens, s)) + >>> len(tokens) == len(expected) == len(output) # Check that length of tokens and tuples are the same. + True + >>> expected == list(align_tokens(tokens, s)) # Check that the output is as expected. + True + >>> tokens == [s[start:end] for start, end in output] # Check that the slices of the string corresponds to the tokens. + True + + :param tokens: The list of strings that are the result of tokenization + :type tokens: list(str) + :param sentence: The original string + :type sentence: str + :rtype: list(tuple(int,int)) + """ + point = 0 + offsets = [] + for token in tokens: + try: + start = sentence.index(token, point) + except ValueError as e: + raise ValueError(f'substring "{token}" not found in "{sentence}"') from e + point = start + len(token) + offsets.append((start, point)) + return offsets diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tree/__pycache__/tree.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tree/__pycache__/tree.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e76aac6e877faa0e50631d3c447ad0133ef97f7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tree/__pycache__/tree.cpython-310.pyc differ